repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
zarboz/XBMC-PVR-mac | lib/ffmpeg/libavfilter/vf_showinfo.c | 31 | 3936 | /*
* Copyright (c) 2011 Stefano Sabatini
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* filter for showing textual video frame information
*/
#include "libavutil/adler32.h"
#include "libavutil/imgutils.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
typedef struct {
unsigned int frame;
} ShowInfoContext;
static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
{
ShowInfoContext *showinfo = ctx->priv;
showinfo->frame = 0;
return 0;
}
static void end_frame(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ShowInfoContext *showinfo = ctx->priv;
AVFilterBufferRef *picref = inlink->cur_buf;
uint32_t plane_checksum[4] = {0}, checksum = 0;
int i, plane, vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h;
for (plane = 0; picref->data[plane] && plane < 4; plane++) {
size_t linesize = av_image_get_linesize(picref->format, picref->video->w, plane);
uint8_t *data = picref->data[plane];
int h = plane == 1 || plane == 2 ? inlink->h >> vsub : inlink->h;
for (i = 0; i < h; i++) {
plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
checksum = av_adler32_update(checksum, data, linesize);
data += picref->linesize[plane];
}
}
av_log(ctx, AV_LOG_INFO,
"n:%d pts:%"PRId64" pts_time:%f pos:%"PRId64" "
"fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
"checksum:%08X plane_checksum:[%08X %08X %08X %08X]\n",
showinfo->frame,
picref->pts, picref ->pts * av_q2d(inlink->time_base), picref->pos,
av_pix_fmt_descriptors[picref->format].name,
picref->video->sample_aspect_ratio.num, picref->video->sample_aspect_ratio.den,
picref->video->w, picref->video->h,
!picref->video->interlaced ? 'P' : /* Progressive */
picref->video->top_field_first ? 'T' : 'B', /* Top / Bottom */
picref->video->key_frame,
av_get_picture_type_char(picref->video->pict_type),
checksum, plane_checksum[0], plane_checksum[1], plane_checksum[2], plane_checksum[3]);
showinfo->frame++;
avfilter_end_frame(inlink->dst->outputs[0]);
}
AVFilter avfilter_vf_showinfo = {
.name = "showinfo",
.description = NULL_IF_CONFIG_SMALL("Show textual information for each video frame."),
.priv_size = sizeof(ShowInfoContext),
.init = init,
.inputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = avfilter_null_get_video_buffer,
.start_frame = avfilter_null_start_frame,
.end_frame = end_frame,
.min_perms = AV_PERM_READ, },
{ .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO },
{ .name = NULL}},
};
| gpl-2.0 |
SunguckLee/TokuDB-7.5.3 | storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp | 31 | 43102 | /* Copyright (c) 2003-2008 MySQL AB
Use is subject to license terms
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#define DBTUP_C
#define DBTUP_TRIGGER_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
#include <AttributeDescriptor.hpp>
#include "AttributeOffset.hpp"
#include <AttributeHeader.hpp>
#include <signaldata/FireTrigOrd.hpp>
#include <signaldata/CreateTrig.hpp>
#include <signaldata/TuxMaint.hpp>
/* **************************************************************** */
/* ---------------------------------------------------------------- */
/* ----------------------- TRIGGER HANDLING ----------------------- */
/* ---------------------------------------------------------------- */
/* **************************************************************** */
DLList<Dbtup::TupTriggerData>*
Dbtup::findTriggerList(Tablerec* table,
TriggerType::Value ttype,
TriggerActionTime::Value ttime,
TriggerEvent::Value tevent)
{
DLList<TupTriggerData>* tlist = NULL;
switch (ttype) {
case TriggerType::SUBSCRIPTION:
case TriggerType::SUBSCRIPTION_BEFORE:
switch (tevent) {
case TriggerEvent::TE_INSERT:
jam();
if (ttime == TriggerActionTime::TA_DETACHED)
tlist = &table->subscriptionInsertTriggers;
break;
case TriggerEvent::TE_UPDATE:
jam();
if (ttime == TriggerActionTime::TA_DETACHED)
tlist = &table->subscriptionUpdateTriggers;
break;
case TriggerEvent::TE_DELETE:
jam();
if (ttime == TriggerActionTime::TA_DETACHED)
tlist = &table->subscriptionDeleteTriggers;
break;
default:
break;
}
break;
case TriggerType::SECONDARY_INDEX:
switch (tevent) {
case TriggerEvent::TE_INSERT:
jam();
if (ttime == TriggerActionTime::TA_AFTER)
tlist = &table->afterInsertTriggers;
break;
case TriggerEvent::TE_UPDATE:
jam();
if (ttime == TriggerActionTime::TA_AFTER)
tlist = &table->afterUpdateTriggers;
break;
case TriggerEvent::TE_DELETE:
jam();
if (ttime == TriggerActionTime::TA_AFTER)
tlist = &table->afterDeleteTriggers;
break;
default:
break;
}
break;
case TriggerType::ORDERED_INDEX:
switch (tevent) {
case TriggerEvent::TE_CUSTOM:
jam();
if (ttime == TriggerActionTime::TA_CUSTOM)
tlist = &table->tuxCustomTriggers;
break;
default:
break;
}
break;
case TriggerType::READ_ONLY_CONSTRAINT:
switch (tevent) {
case TriggerEvent::TE_UPDATE:
jam();
if (ttime == TriggerActionTime::TA_AFTER)
tlist = &table->constraintUpdateTriggers;
break;
default:
break;
}
break;
default:
break;
}
return tlist;
}
// Trigger signals
void
Dbtup::execCREATE_TRIG_REQ(Signal* signal)
{
jamEntry();
BlockReference senderRef = signal->getSendersBlockRef();
const CreateTrigReq reqCopy = *(const CreateTrigReq*)signal->getDataPtr();
const CreateTrigReq* const req = &reqCopy;
CreateTrigRef::ErrorCode error= CreateTrigRef::NoError;
// Find table
TablerecPtr tabPtr;
tabPtr.i = req->getTableId();
ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
if (tabPtr.p->tableStatus != DEFINED )
{
jam();
error= CreateTrigRef::InvalidTable;
}
// Create trigger and associate it with the table
else if (createTrigger(tabPtr.p, req))
{
jam();
// Send conf
CreateTrigConf* const conf = (CreateTrigConf*)signal->getDataPtrSend();
conf->setUserRef(reference());
conf->setConnectionPtr(req->getConnectionPtr());
conf->setRequestType(req->getRequestType());
conf->setTableId(req->getTableId());
conf->setIndexId(req->getIndexId());
conf->setTriggerId(req->getTriggerId());
conf->setTriggerInfo(req->getTriggerInfo());
sendSignal(senderRef, GSN_CREATE_TRIG_CONF,
signal, CreateTrigConf::SignalLength, JBB);
return;
}
else
{
jam();
error= CreateTrigRef::TooManyTriggers;
}
ndbassert(error != CreateTrigRef::NoError);
// Send ref
CreateTrigRef* const ref = (CreateTrigRef*)signal->getDataPtrSend();
ref->setUserRef(reference());
ref->setConnectionPtr(req->getConnectionPtr());
ref->setRequestType(req->getRequestType());
ref->setTableId(req->getTableId());
ref->setIndexId(req->getIndexId());
ref->setTriggerId(req->getTriggerId());
ref->setTriggerInfo(req->getTriggerInfo());
ref->setErrorCode(error);
sendSignal(senderRef, GSN_CREATE_TRIG_REF,
signal, CreateTrigRef::SignalLength, JBB);
}//Dbtup::execCREATE_TRIG_REQ()
void
Dbtup::execDROP_TRIG_REQ(Signal* signal)
{
jamEntry();
BlockReference senderRef = signal->getSendersBlockRef();
const DropTrigReq reqCopy = *(const DropTrigReq*)signal->getDataPtr();
const DropTrigReq* const req = &reqCopy;
// Find table
TablerecPtr tabPtr;
tabPtr.i = req->getTableId();
ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
// Drop trigger
Uint32 r = dropTrigger(tabPtr.p, req, refToBlock(senderRef));
if (r == 0){
// Send conf
DropTrigConf* const conf = (DropTrigConf*)signal->getDataPtrSend();
conf->setUserRef(senderRef);
conf->setConnectionPtr(req->getConnectionPtr());
conf->setRequestType(req->getRequestType());
conf->setTableId(req->getTableId());
conf->setIndexId(req->getIndexId());
conf->setTriggerId(req->getTriggerId());
sendSignal(senderRef, GSN_DROP_TRIG_CONF,
signal, DropTrigConf::SignalLength, JBB);
} else {
// Send ref
DropTrigRef* const ref = (DropTrigRef*)signal->getDataPtrSend();
ref->setUserRef(senderRef);
ref->setConnectionPtr(req->getConnectionPtr());
ref->setRequestType(req->getRequestType());
ref->setTableId(req->getTableId());
ref->setIndexId(req->getIndexId());
ref->setTriggerId(req->getTriggerId());
ref->setErrorCode((DropTrigRef::ErrorCode)r);
ref->setErrorLine(__LINE__);
ref->setErrorNode(refToNode(reference()));
sendSignal(senderRef, GSN_DROP_TRIG_REF,
signal, DropTrigRef::SignalLength, JBB);
}
}//Dbtup::DROP_TRIG_REQ()
/* ---------------------------------------------------------------- */
/* ------------------------- createTrigger ------------------------ */
/* */
/* Creates a new trigger record by fetching one from the trigger */
/* pool and associates it with the given table. */
/* Trigger type can be one of secondary_index, subscription, */
/* constraint(NYI), foreign_key(NYI), schema_upgrade(NYI), */
/* api_trigger(NYI) or sql_trigger(NYI). */
/* Note that this method only checks for total number of allowed */
/* triggers. Checking the number of allowed triggers per table is */
/* done by TRIX. */
/* */
/* ---------------------------------------------------------------- */
bool
Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req)
{
if (ERROR_INSERTED(4003)) {
CLEAR_ERROR_INSERT_VALUE;
return false;
}
TriggerType::Value ttype = req->getTriggerType();
TriggerActionTime::Value ttime = req->getTriggerActionTime();
TriggerEvent::Value tevent = req->getTriggerEvent();
DLList<TupTriggerData>* tlist = findTriggerList(table, ttype, ttime, tevent);
ndbrequire(tlist != NULL);
TriggerPtr tptr;
if (!tlist->seize(tptr))
return false;
// Set trigger id
tptr.p->triggerId = req->getTriggerId();
// ndbout_c("Create TupTrigger %u = %u %u %u %u", tptr.p->triggerId, table, ttype, ttime, tevent);
// Set index id
tptr.p->indexId = req->getIndexId();
// Set trigger type etc
tptr.p->triggerType = ttype;
tptr.p->triggerActionTime = ttime;
tptr.p->triggerEvent = tevent;
tptr.p->sendBeforeValues = true;
if ((tptr.p->triggerType == TriggerType::SUBSCRIPTION) &&
((tptr.p->triggerEvent == TriggerEvent::TE_UPDATE) ||
(tptr.p->triggerEvent == TriggerEvent::TE_DELETE))) {
jam();
tptr.p->sendBeforeValues = false;
}
/*
tptr.p->sendOnlyChangedAttributes = false;
if (((tptr.p->triggerType == TriggerType::SUBSCRIPTION) ||
(tptr.p->triggerType == TriggerType::SUBSCRIPTION_BEFORE)) &&
(tptr.p->triggerEvent == TriggerEvent::TE_UPDATE)) {
jam();
tptr.p->sendOnlyChangedAttributes = true;
}
*/
tptr.p->sendOnlyChangedAttributes = !req->getReportAllMonitoredAttributes();
// Set monitor all
tptr.p->monitorAllAttributes = req->getMonitorAllAttributes();
tptr.p->monitorReplicas = req->getMonitorReplicas();
tptr.p->m_receiverBlock = refToBlock(req->getReceiverRef());
tptr.p->attributeMask.clear();
if (tptr.p->monitorAllAttributes) {
jam();
for(Uint32 i = 0; i < table->m_no_of_attributes; i++) {
if (!primaryKey(table, i)) {
jam();
tptr.p->attributeMask.set(i);
}
}
} else {
// Set attribute mask
jam();
tptr.p->attributeMask = req->getAttributeMask();
}
return true;
}//Dbtup::createTrigger()
bool
Dbtup::primaryKey(Tablerec* const regTabPtr, Uint32 attrId)
{
Uint32 attrDescriptorStart = regTabPtr->tabDescriptor;
Uint32 attrDescriptor = getTabDescrWord(attrDescriptorStart +
(attrId * ZAD_SIZE));
return (bool)AttributeDescriptor::getPrimaryKey(attrDescriptor);
}//Dbtup::primaryKey()
/* ---------------------------------------------------------------- */
/* -------------------------- dropTrigger ------------------------- */
/* */
/* Deletes a trigger record by disassociating it with the given */
/* table and returning it to the trigger pool. */
/* Trigger type can be one of secondary_index, subscription, */
/* constraint(NYI), foreign_key(NYI), schema_upgrade(NYI), */
/* api_trigger(NYI) or sql_trigger(NYI). */
/* */
/* ---------------------------------------------------------------- */
Uint32
Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req, BlockNumber sender)
{
if (ERROR_INSERTED(4004)) {
CLEAR_ERROR_INSERT_VALUE;
return 9999;
}
Uint32 triggerId = req->getTriggerId();
TriggerType::Value ttype = req->getTriggerType();
TriggerActionTime::Value ttime = req->getTriggerActionTime();
TriggerEvent::Value tevent = req->getTriggerEvent();
// ndbout_c("Drop TupTrigger %u = %u %u %u %u by %u", triggerId, table, ttype, ttime, tevent, sender);
DLList<TupTriggerData>* tlist = findTriggerList(table, ttype, ttime, tevent);
ndbrequire(tlist != NULL);
Ptr<TupTriggerData> ptr;
for (tlist->first(ptr); !ptr.isNull(); tlist->next(ptr)) {
jam();
if (ptr.p->triggerId == triggerId) {
if(ttype==TriggerType::SUBSCRIPTION && sender != ptr.p->m_receiverBlock)
{
/**
* You can only drop your own triggers for subscription triggers.
* Trigger IDs are private for each block.
*
* SUMA encodes information in the triggerId
*
* Backup doesn't really care about the Ids though.
*/
jam();
continue;
}
jam();
tlist->release(ptr.i);
return 0;
}
}
return DropTrigRef::TriggerNotFound;
}//Dbtup::dropTrigger()
/* ---------------------------------------------------------------- */
/* -------------- checkImmediateTriggersAfterOp ------------------ */
/* */
/* Called after an insert, delete, or update operation takes */
/* place. Fetches before tuple for deletes and updates and */
/* after tuple for inserts and updates. */
/* Executes immediate triggers by sending FIRETRIGORD */
/* */
/* ---------------------------------------------------------------- */
void
Dbtup::checkImmediateTriggersAfterInsert(KeyReqStruct *req_struct,
Operationrec *regOperPtr,
Tablerec *regTablePtr,
bool disk)
{
if(refToBlock(req_struct->TC_ref) != DBTC) {
return;
}
if ((regOperPtr->op_struct.primary_replica) &&
(!(regTablePtr->afterInsertTriggers.isEmpty()))) {
jam();
fireImmediateTriggers(req_struct,
regTablePtr->afterInsertTriggers,
regOperPtr,
disk);
}
}
void
Dbtup::checkImmediateTriggersAfterUpdate(KeyReqStruct *req_struct,
Operationrec* regOperPtr,
Tablerec* regTablePtr,
bool disk)
{
if(refToBlock(req_struct->TC_ref) != DBTC) {
return;
}
if ((regOperPtr->op_struct.primary_replica) &&
(!(regTablePtr->afterUpdateTriggers.isEmpty()))) {
jam();
fireImmediateTriggers(req_struct,
regTablePtr->afterUpdateTriggers,
regOperPtr,
disk);
}
if ((regOperPtr->op_struct.primary_replica) &&
(!(regTablePtr->constraintUpdateTriggers.isEmpty()))) {
jam();
fireImmediateTriggers(req_struct,
regTablePtr->constraintUpdateTriggers,
regOperPtr,
disk);
}
}
void
Dbtup::checkImmediateTriggersAfterDelete(KeyReqStruct *req_struct,
Operationrec* regOperPtr,
Tablerec* regTablePtr,
bool disk)
{
if(refToBlock(req_struct->TC_ref) != DBTC) {
return;
}
if ((regOperPtr->op_struct.primary_replica) &&
(!(regTablePtr->afterDeleteTriggers.isEmpty()))) {
jam();
executeTriggers(req_struct,
regTablePtr->afterDeleteTriggers,
regOperPtr,
disk);
}
}
#if 0
/* ---------------------------------------------------------------- */
/* --------------------- checkDeferredTriggers -------------------- */
/* */
/* Called before commit after an insert, delete, or update */
/* operation. Fetches before tuple for deletes and updates and */
/* after tuple for inserts and updates. */
/* Executes deferred triggers by sending FIRETRIGORD */
/* */
/* ---------------------------------------------------------------- */
void Dbtup::checkDeferredTriggers(Signal* signal,
Operationrec* const regOperPtr,
Tablerec* const regTablePtr)
{
jam();
// NYI
}//Dbtup::checkDeferredTriggers()
#endif
/* ---------------------------------------------------------------- */
/* --------------------- checkDetachedTriggers -------------------- */
/* */
/* Called at commit after an insert, delete, or update operation. */
/* Fetches before tuple for deletes and updates and */
/* after tuple for inserts and updates. */
/* Executes detached triggers by sending FIRETRIGORD */
/* */
/* ---------------------------------------------------------------- */
void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct,
Operationrec* regOperPtr,
Tablerec* regTablePtr,
bool disk)
{
Uint32 save_type = regOperPtr->op_struct.op_type;
Tuple_header *save_ptr = req_struct->m_tuple_ptr;
switch (save_type) {
case ZUPDATE:
case ZINSERT:
req_struct->m_tuple_ptr = (Tuple_header*)
c_undo_buffer.get_ptr(®OperPtr->m_copy_tuple_location);
break;
}
/**
* Set correct operation type and fix change mask
* Note ALLOC is set in "orig" tuple
*/
if (save_ptr->m_header_bits & Tuple_header::ALLOC) {
if (save_type == ZDELETE) {
// insert + delete = nothing
jam();
return;
goto end;
}
regOperPtr->op_struct.op_type = ZINSERT;
}
else if (save_type == ZINSERT) {
/**
* Tuple was not created but last op is INSERT.
* This is possible only on DELETE + INSERT
*/
regOperPtr->op_struct.op_type = ZUPDATE;
}
switch(regOperPtr->op_struct.op_type) {
case(ZINSERT):
jam();
if (regTablePtr->subscriptionInsertTriggers.isEmpty()) {
// Table has no active triggers monitoring inserts at commit
jam();
goto end;
}
// If any fired immediate insert trigger then fetch after tuple
fireDetachedTriggers(req_struct,
regTablePtr->subscriptionInsertTriggers,
regOperPtr, disk);
break;
case(ZDELETE):
jam();
if (regTablePtr->subscriptionDeleteTriggers.isEmpty()) {
// Table has no active triggers monitoring deletes at commit
jam();
goto end;
}
// Execute any after delete triggers by sending
// FIRETRIGORD with the before tuple
fireDetachedTriggers(req_struct,
regTablePtr->subscriptionDeleteTriggers,
regOperPtr, disk);
break;
case(ZUPDATE):
jam();
if (regTablePtr->subscriptionUpdateTriggers.isEmpty()) {
// Table has no active triggers monitoring updates at commit
jam();
goto end;
}
// If any fired immediate update trigger then fetch after tuple
// and send two FIRETRIGORD one with before tuple and one with after tuple
fireDetachedTriggers(req_struct,
regTablePtr->subscriptionUpdateTriggers,
regOperPtr, disk);
break;
default:
ndbrequire(false);
break;
}
end:
regOperPtr->op_struct.op_type = save_type;
req_struct->m_tuple_ptr = save_ptr;
}
void
Dbtup::fireImmediateTriggers(KeyReqStruct *req_struct,
DLList<TupTriggerData>& triggerList,
Operationrec* const regOperPtr,
bool disk)
{
TriggerPtr trigPtr;
triggerList.first(trigPtr);
while (trigPtr.i != RNIL) {
jam();
if (trigPtr.p->monitorAllAttributes ||
trigPtr.p->attributeMask.overlaps(req_struct->changeMask)) {
jam();
executeTrigger(req_struct,
trigPtr.p,
regOperPtr,
disk);
}//if
triggerList.next(trigPtr);
}//while
}//Dbtup::fireImmediateTriggers()
#if 0
void
Dbtup::fireDeferredTriggers(Signal* signal,
KeyReqStruct *req_struct,
DLList<TupTriggerData>& triggerList,
Operationrec* const regOperPtr)
{
TriggerPtr trigPtr;
triggerList.first(trigPtr);
while (trigPtr.i != RNIL) {
jam();
if (trigPtr.p->monitorAllAttributes ||
trigPtr.p->attributeMask.overlaps(req_struct->changeMask)) {
jam();
executeTrigger(req_struct,
trigPtr,
regOperPtr);
}//if
triggerList.next(trigPtr);
}//while
}//Dbtup::fireDeferredTriggers()
#endif
void
Dbtup::fireDetachedTriggers(KeyReqStruct *req_struct,
DLList<TupTriggerData>& triggerList,
Operationrec* const regOperPtr,
bool disk)
{
TriggerPtr trigPtr;
/**
* Set disk page
*/
req_struct->m_disk_page_ptr.i = m_pgman.m_ptr.i;
ndbrequire(regOperPtr->is_first_operation());
triggerList.first(trigPtr);
while (trigPtr.i != RNIL) {
jam();
if ((trigPtr.p->monitorReplicas ||
regOperPtr->op_struct.primary_replica) &&
(trigPtr.p->monitorAllAttributes ||
trigPtr.p->attributeMask.overlaps(req_struct->changeMask))) {
jam();
executeTrigger(req_struct,
trigPtr.p,
regOperPtr,
disk);
}
triggerList.next(trigPtr);
}
}
void Dbtup::executeTriggers(KeyReqStruct *req_struct,
DLList<TupTriggerData>& triggerList,
Operationrec* regOperPtr,
bool disk)
{
TriggerPtr trigPtr;
triggerList.first(trigPtr);
while (trigPtr.i != RNIL) {
jam();
executeTrigger(req_struct,
trigPtr.p,
regOperPtr,
disk);
triggerList.next(trigPtr);
}
}
void Dbtup::executeTrigger(KeyReqStruct *req_struct,
TupTriggerData* const trigPtr,
Operationrec* const regOperPtr,
bool disk)
{
/**
* The block below does not work together with GREP.
* I have 2 db nodes (2 replicas) -> one node group.
* I want to have FIRETRIG_ORD sent to all SumaParticipants,
* from all nodes in the node group described above. However,
* only one of the nodes in the node group actually sends the
* FIRE_TRIG_ORD, and the other node enters this "hack" below.
* I don't really know what the code snippet below does, but it
* does not work with GREP the way Lars and I want it.
* We need to have triggers fired from both the primary and the
* backup replica, not only the primary as it is now.
*
* Note: In Suma, I have changed triggers to be created with
* setMonitorReplicas(true).
* /Johan
*
* See RT 709
*/
// XXX quick fix to NR, should fix in LQHKEYREQ instead
/*
if (refToBlock(req_struct->TC_ref) == DBLQH) {
jam();
return;
}
*/
Signal* signal= req_struct->signal;
BlockReference ref = trigPtr->m_receiverBlock;
Uint32* const keyBuffer = &cinBuffer[0];
Uint32* const afterBuffer = &coutBuffer[0];
Uint32* const beforeBuffer = &clogMemBuffer[0];
Uint32 noPrimKey, noAfterWords, noBeforeWords;
FragrecordPtr regFragPtr;
regFragPtr.i= regOperPtr->fragmentPtr;
ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
if (ref == BACKUP) {
jam();
/*
In order for the implementation of BACKUP to work even when changing
primaries in the middle of the backup we need to set the trigger on
all replicas. This check checks whether this is the node where this
trigger should be fired. The check should preferably have been put
completely in the BACKUP block but it was about five times simpler
to put it here and also much faster for the backup (small overhead
for everybody else.
*/
signal->theData[0] = trigPtr->triggerId;
signal->theData[1] = regFragPtr.p->fragmentId;
EXECUTE_DIRECT(BACKUP, GSN_BACKUP_TRIG_REQ, signal, 2);
jamEntry();
if (signal->theData[0] == 0) {
jam();
return;
}
}
if (!readTriggerInfo(trigPtr,
regOperPtr,
req_struct,
regFragPtr.p,
keyBuffer,
noPrimKey,
afterBuffer,
noAfterWords,
beforeBuffer,
noBeforeWords,
disk)) {
jam();
return;
}
//--------------------------------------------------------------------
// Now all data for this trigger has been read. It is now time to send
// the trigger information consisting of two or three sets of TRIG_
// ATTRINFO signals and one FIRE_TRIG_ORD signal.
// We start by setting common header info for all TRIG_ATTRINFO signals.
//--------------------------------------------------------------------
bool executeDirect;
TrigAttrInfo* const trigAttrInfo = (TrigAttrInfo *)signal->getDataPtrSend();
trigAttrInfo->setConnectionPtr(req_struct->TC_index);
trigAttrInfo->setTriggerId(trigPtr->triggerId);
switch(trigPtr->triggerType) {
case (TriggerType::SECONDARY_INDEX):
jam();
ref = req_struct->TC_ref;
executeDirect = false;
break;
case (TriggerType::SUBSCRIPTION):
case (TriggerType::SUBSCRIPTION_BEFORE):
jam();
// Since only backup uses subscription triggers we send to backup directly for now
ref = trigPtr->m_receiverBlock;
executeDirect = true;
break;
case (TriggerType::READ_ONLY_CONSTRAINT):
terrorCode = ZREAD_ONLY_CONSTRAINT_VIOLATION;
// XXX should return status and abort the rest
return;
default:
ndbrequire(false);
executeDirect= false; // remove warning
}//switch
req_struct->no_fired_triggers++;
trigAttrInfo->setAttrInfoType(TrigAttrInfo::PRIMARY_KEY);
sendTrigAttrInfo(signal, keyBuffer, noPrimKey, executeDirect, ref);
switch(regOperPtr->op_struct.op_type) {
case(ZINSERT):
jam();
// Send AttrInfo signals with new attribute values
trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES);
sendTrigAttrInfo(signal, afterBuffer, noAfterWords, executeDirect, ref);
break;
case(ZDELETE):
if (trigPtr->sendBeforeValues) {
jam();
trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES);
sendTrigAttrInfo(signal, beforeBuffer, noBeforeWords, executeDirect,ref);
}
break;
case(ZUPDATE):
jam();
if (trigPtr->sendBeforeValues) {
jam();
trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES);
sendTrigAttrInfo(signal, beforeBuffer, noBeforeWords, executeDirect,ref);
}
trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES);
sendTrigAttrInfo(signal, afterBuffer, noAfterWords, executeDirect, ref);
break;
default:
ndbrequire(false);
}
sendFireTrigOrd(signal,
req_struct,
regOperPtr,
trigPtr,
regFragPtr.p->fragmentId,
noPrimKey,
noBeforeWords,
noAfterWords);
}
Uint32 Dbtup::setAttrIds(Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask,
Uint32 m_no_of_attributesibutes,
Uint32* inBuffer)
{
Uint32 bufIndx = 0;
for (Uint32 i = 0; i < m_no_of_attributesibutes; i++) {
jam();
if (attributeMask.get(i)) {
jam();
AttributeHeader::init(&inBuffer[bufIndx++], i, 0);
}
}
return bufIndx;
}
bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
Operationrec* const regOperPtr,
KeyReqStruct *req_struct,
Fragrecord* const regFragPtr,
Uint32* const keyBuffer,
Uint32& noPrimKey,
Uint32* const afterBuffer,
Uint32& noAfterWords,
Uint32* const beforeBuffer,
Uint32& noBeforeWords,
bool disk)
{
noAfterWords = 0;
noBeforeWords = 0;
Uint32 readBuffer[MAX_ATTRIBUTES_IN_TABLE];
//---------------------------------------------------------------------------
// Set-up variables needed by readAttributes operPtr.p, tabptr.p
//---------------------------------------------------------------------------
operPtr.p = regOperPtr;
tabptr.i = regFragPtr->fragTableId;
ptrCheckGuard(tabptr, cnoOfTablerec, tablerec);
Tablerec* const regTabPtr = tabptr.p;
Uint32 num_attr= regTabPtr->m_no_of_attributes;
Uint32 descr_start= regTabPtr->tabDescriptor;
ndbrequire(descr_start + (num_attr << ZAD_LOG_SIZE) <= cnoOfTabDescrRec);
req_struct->check_offset[MM]= regTabPtr->get_check_offset(MM);
req_struct->check_offset[DD]= regTabPtr->get_check_offset(DD);
req_struct->attr_descr= &tableDescriptor[descr_start];
//--------------------------------------------------------------------
// Read Primary Key Values
//--------------------------------------------------------------------
Tuple_header *save0= req_struct->m_tuple_ptr;
if (regOperPtr->op_struct.op_type == ZDELETE &&
!regOperPtr->is_first_operation())
{
jam();
req_struct->m_tuple_ptr= (Tuple_header*)
c_undo_buffer.get_ptr(&req_struct->prevOpPtr.p->m_copy_tuple_location);
}
if (regTabPtr->need_expand(disk))
prepare_read(req_struct, regTabPtr, disk);
int ret = readAttributes(req_struct,
&tableDescriptor[regTabPtr->readKeyArray].tabDescr,
regTabPtr->noOfKeyAttr,
keyBuffer,
ZATTR_BUFFER_SIZE,
false);
ndbrequire(ret != -1);
noPrimKey= ret;
req_struct->m_tuple_ptr = save0;
Uint32 numAttrsToRead;
if ((regOperPtr->op_struct.op_type == ZUPDATE) &&
(trigPtr->sendOnlyChangedAttributes)) {
jam();
//--------------------------------------------------------------------
// Update that sends only changed information
//--------------------------------------------------------------------
Bitmask<MAXNROFATTRIBUTESINWORDS> attributeMask;
attributeMask = trigPtr->attributeMask;
attributeMask.bitAND(req_struct->changeMask);
numAttrsToRead = setAttrIds(attributeMask, regTabPtr->m_no_of_attributes,
&readBuffer[0]);
} else if ((regOperPtr->op_struct.op_type == ZDELETE) &&
(!trigPtr->sendBeforeValues)) {
jam();
//--------------------------------------------------------------------
// Delete without sending before values only read Primary Key
//--------------------------------------------------------------------
return true;
} else {
jam();
//--------------------------------------------------------------------
// All others send all attributes that are monitored, except:
// Omit unchanged blob inlines on update i.e.
// attributeMask & ~ (blobAttributeMask & ~ changeMask)
//--------------------------------------------------------------------
Bitmask<MAXNROFATTRIBUTESINWORDS> attributeMask;
attributeMask = trigPtr->attributeMask;
if (regOperPtr->op_struct.op_type == ZUPDATE) {
Bitmask<MAXNROFATTRIBUTESINWORDS> tmpMask = regTabPtr->blobAttributeMask;
tmpMask.bitANDC(req_struct->changeMask);
attributeMask.bitANDC(tmpMask);
}
numAttrsToRead = setAttrIds(attributeMask, regTabPtr->m_no_of_attributes,
&readBuffer[0]);
}
ndbrequire(numAttrsToRead < MAX_ATTRIBUTES_IN_TABLE);
//--------------------------------------------------------------------
// Read Main tuple values
//--------------------------------------------------------------------
if (regOperPtr->op_struct.op_type != ZDELETE)
{
jam();
int ret = readAttributes(req_struct,
&readBuffer[0],
numAttrsToRead,
afterBuffer,
ZATTR_BUFFER_SIZE,
false);
ndbrequire(ret != -1);
noAfterWords= ret;
} else {
jam();
noAfterWords = 0;
}
//--------------------------------------------------------------------
// Read Copy tuple values for UPDATE's
//--------------------------------------------------------------------
// Initialise pagep and tuple offset for read of copy tuple
//--------------------------------------------------------------------
if ((regOperPtr->op_struct.op_type == ZUPDATE ||
regOperPtr->op_struct.op_type == ZDELETE) &&
(trigPtr->sendBeforeValues)) {
jam();
Tuple_header *save= req_struct->m_tuple_ptr;
PagePtr tmp;
if(regOperPtr->is_first_operation())
{
Uint32 *ptr= get_ptr(&tmp, ®OperPtr->m_tuple_location, regTabPtr);
req_struct->m_tuple_ptr= (Tuple_header*)ptr;
}
else
{
Uint32 *ptr=
c_undo_buffer.get_ptr(&req_struct->prevOpPtr.p->m_copy_tuple_location);
req_struct->m_tuple_ptr= (Tuple_header*)ptr;
}
if (regTabPtr->need_expand(disk))
prepare_read(req_struct, regTabPtr, disk);
int ret = readAttributes(req_struct,
&readBuffer[0],
numAttrsToRead,
beforeBuffer,
ZATTR_BUFFER_SIZE,
false);
req_struct->m_tuple_ptr= save;
ndbrequire(ret != -1);
noBeforeWords = ret;
if (trigPtr->m_receiverBlock != SUMA &&
(noAfterWords == noBeforeWords) &&
(memcmp(afterBuffer, beforeBuffer, noAfterWords << 2) == 0)) {
//--------------------------------------------------------------------
// Although a trigger was fired it was not necessary since the old
// value and the new value was exactly the same
//--------------------------------------------------------------------
jam();
//XXX does this work with collations?
return false;
}
}
return true;
}
void Dbtup::sendTrigAttrInfo(Signal* signal,
Uint32* data,
Uint32 dataLen,
bool executeDirect,
BlockReference receiverReference)
{
TrigAttrInfo* const trigAttrInfo = (TrigAttrInfo *)signal->getDataPtrSend();
Uint32 sigLen;
Uint32 dataIndex = 0;
do {
sigLen = dataLen - dataIndex;
if (sigLen > TrigAttrInfo::DataLength) {
jam();
sigLen = TrigAttrInfo::DataLength;
}
MEMCOPY_NO_WORDS(trigAttrInfo->getData(),
data + dataIndex,
sigLen);
if (executeDirect) {
jam();
EXECUTE_DIRECT(receiverReference,
GSN_TRIG_ATTRINFO,
signal,
TrigAttrInfo::StaticLength + sigLen);
jamEntry();
} else {
jam();
sendSignal(receiverReference,
GSN_TRIG_ATTRINFO,
signal,
TrigAttrInfo::StaticLength + sigLen,
JBB);
}
dataIndex += sigLen;
} while (dataLen != dataIndex);
}
void Dbtup::sendFireTrigOrd(Signal* signal,
KeyReqStruct *req_struct,
Operationrec * const regOperPtr,
TupTriggerData* const trigPtr,
Uint32 fragmentId,
Uint32 noPrimKeyWords,
Uint32 noBeforeValueWords,
Uint32 noAfterValueWords)
{
FireTrigOrd* const fireTrigOrd = (FireTrigOrd *)signal->getDataPtrSend();
fireTrigOrd->setConnectionPtr(req_struct->TC_index);
fireTrigOrd->setTriggerId(trigPtr->triggerId);
fireTrigOrd->fragId= fragmentId;
switch(regOperPtr->op_struct.op_type) {
case(ZINSERT):
jam();
fireTrigOrd->setTriggerEvent(TriggerEvent::TE_INSERT);
break;
case(ZDELETE):
jam();
fireTrigOrd->setTriggerEvent(TriggerEvent::TE_DELETE);
break;
case(ZUPDATE):
jam();
fireTrigOrd->setTriggerEvent(TriggerEvent::TE_UPDATE);
break;
default:
ndbrequire(false);
break;
}
fireTrigOrd->setNoOfPrimaryKeyWords(noPrimKeyWords);
fireTrigOrd->setNoOfBeforeValueWords(noBeforeValueWords);
fireTrigOrd->setNoOfAfterValueWords(noAfterValueWords);
switch(trigPtr->triggerType) {
case (TriggerType::SECONDARY_INDEX):
jam();
sendSignal(req_struct->TC_ref, GSN_FIRE_TRIG_ORD,
signal, FireTrigOrd::SignalLength, JBB);
break;
case (TriggerType::SUBSCRIPTION_BEFORE): // Only Suma
jam();
// Since only backup uses subscription triggers we
// send to backup directly for now
fireTrigOrd->setGCI(req_struct->gci);
fireTrigOrd->setHashValue(req_struct->hash_value);
fireTrigOrd->m_any_value = regOperPtr->m_any_value;
EXECUTE_DIRECT(trigPtr->m_receiverBlock,
GSN_FIRE_TRIG_ORD,
signal,
FireTrigOrd::SignalLengthSuma);
break;
case (TriggerType::SUBSCRIPTION):
jam();
// Since only backup uses subscription triggers we
// send to backup directly for now
fireTrigOrd->setGCI(req_struct->gci);
EXECUTE_DIRECT(trigPtr->m_receiverBlock,
GSN_FIRE_TRIG_ORD,
signal,
FireTrigOrd::SignalWithGCILength);
break;
default:
ndbrequire(false);
break;
}
}
/*
* Ordered index triggers.
*
* Insert: add entry to index
* Update: add entry to index, de|ay remove until commit
* Delete: do nothing, delay remove until commit
* Commit: remove entry delayed from update and delete
* Abort : remove entry added by insert and update
*
* See Notes.txt for the details.
*/
int
Dbtup::executeTuxInsertTriggers(Signal* signal,
Operationrec* regOperPtr,
Fragrecord* regFragPtr,
Tablerec* regTabPtr)
{
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
// fill in constant part
req->tableId = regFragPtr->fragTableId;
req->fragId = regFragPtr->fragmentId;
req->pageId = regOperPtr->m_tuple_location.m_page_no;
req->pageIndex = regOperPtr->m_tuple_location.m_page_idx;
req->tupVersion = regOperPtr->tupVersion;
req->opInfo = TuxMaintReq::OpAdd;
return addTuxEntries(signal, regOperPtr, regTabPtr);
}
int
Dbtup::executeTuxUpdateTriggers(Signal* signal,
Operationrec* regOperPtr,
Fragrecord* regFragPtr,
Tablerec* regTabPtr)
{
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
// fill in constant part
req->tableId = regFragPtr->fragTableId;
req->fragId = regFragPtr->fragmentId;
req->pageId = regOperPtr->m_tuple_location.m_page_no;
req->pageIndex = regOperPtr->m_tuple_location.m_page_idx;
req->tupVersion = regOperPtr->tupVersion;
req->opInfo = TuxMaintReq::OpAdd;
return addTuxEntries(signal, regOperPtr, regTabPtr);
}
int
Dbtup::addTuxEntries(Signal* signal,
Operationrec* regOperPtr,
Tablerec* regTabPtr)
{
if (ERROR_INSERTED(4022)) {
jam();
CLEAR_ERROR_INSERT_VALUE;
terrorCode = 9999;
return -1;
}
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
const DLList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
TriggerPtr triggerPtr;
Uint32 failPtrI;
triggerList.first(triggerPtr);
while (triggerPtr.i != RNIL) {
jam();
req->indexId = triggerPtr.p->indexId;
req->errorCode = RNIL;
if (ERROR_INSERTED(4023) &&
! triggerList.hasNext(triggerPtr)) {
jam();
CLEAR_ERROR_INSERT_VALUE;
terrorCode = 9999;
failPtrI = triggerPtr.i;
goto fail;
}
EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
signal, TuxMaintReq::SignalLength);
jamEntry();
if (req->errorCode != 0) {
jam();
terrorCode = req->errorCode;
failPtrI = triggerPtr.i;
goto fail;
}
triggerList.next(triggerPtr);
}
return 0;
fail:
req->opInfo = TuxMaintReq::OpRemove;
triggerList.first(triggerPtr);
while (triggerPtr.i != failPtrI) {
jam();
req->indexId = triggerPtr.p->indexId;
req->errorCode = RNIL;
EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
signal, TuxMaintReq::SignalLength);
jamEntry();
ndbrequire(req->errorCode == 0);
triggerList.next(triggerPtr);
}
#ifdef VM_TRACE
ndbout << "aborted partial tux update: op " << hex << regOperPtr << endl;
#endif
return -1;
}
int
Dbtup::executeTuxDeleteTriggers(Signal* signal,
Operationrec* const regOperPtr,
Fragrecord* const regFragPtr,
Tablerec* const regTabPtr)
{
// do nothing
return 0;
}
void
Dbtup::executeTuxCommitTriggers(Signal* signal,
Operationrec* regOperPtr,
Fragrecord* regFragPtr,
Tablerec* regTabPtr)
{
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
Uint32 tupVersion;
if (regOperPtr->op_struct.op_type == ZINSERT) {
if (! regOperPtr->op_struct.delete_insert_flag)
return;
jam();
tupVersion= decr_tup_version(regOperPtr->tupVersion);
} else if (regOperPtr->op_struct.op_type == ZUPDATE) {
jam();
tupVersion= decr_tup_version(regOperPtr->tupVersion);
} else if (regOperPtr->op_struct.op_type == ZDELETE) {
if (regOperPtr->op_struct.delete_insert_flag)
return;
jam();
tupVersion= regOperPtr->tupVersion;
} else {
ndbrequire(false);
tupVersion= 0; // remove warning
}
// fill in constant part
req->tableId = regFragPtr->fragTableId;
req->fragId = regFragPtr->fragmentId;
req->pageId = regOperPtr->m_tuple_location.m_page_no;
req->pageIndex = regOperPtr->m_tuple_location.m_page_idx;
req->tupVersion = tupVersion;
req->opInfo = TuxMaintReq::OpRemove;
removeTuxEntries(signal, regTabPtr);
}
void
Dbtup::executeTuxAbortTriggers(Signal* signal,
Operationrec* regOperPtr,
Fragrecord* regFragPtr,
Tablerec* regTabPtr)
{
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
// get version
Uint32 tupVersion;
if (regOperPtr->op_struct.op_type == ZINSERT) {
jam();
tupVersion = regOperPtr->tupVersion;
} else if (regOperPtr->op_struct.op_type == ZUPDATE) {
jam();
tupVersion = regOperPtr->tupVersion;
} else if (regOperPtr->op_struct.op_type == ZDELETE) {
jam();
return;
} else {
ndbrequire(false);
tupVersion= 0; // remove warning
}
// fill in constant part
req->tableId = regFragPtr->fragTableId;
req->fragId = regFragPtr->fragmentId;
req->pageId = regOperPtr->m_tuple_location.m_page_no;
req->pageIndex = regOperPtr->m_tuple_location.m_page_idx;
req->tupVersion = tupVersion;
req->opInfo = TuxMaintReq::OpRemove;
removeTuxEntries(signal, regTabPtr);
}
void
Dbtup::removeTuxEntries(Signal* signal,
Tablerec* regTabPtr)
{
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
const DLList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
TriggerPtr triggerPtr;
triggerList.first(triggerPtr);
while (triggerPtr.i != RNIL) {
jam();
req->indexId = triggerPtr.p->indexId;
req->errorCode = RNIL,
EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
signal, TuxMaintReq::SignalLength);
jamEntry();
// must succeed
ndbrequire(req->errorCode == 0);
triggerList.next(triggerPtr);
}
}
| gpl-2.0 |
ssloy/trik-linux | drivers/media/video/gspca/gl860/gl860.c | 287 | 19099 | /* GSPCA subdrivers for Genesys Logic webcams with the GL860 chip
* Subdriver core
*
* 2009/09/24 Olivier Lorin <o.lorin@laposte.net>
* GSPCA by Jean-Francois Moine <http://moinejf.free.fr>
* Thanks BUGabundo and Malmostoso for your amazing help!
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "gspca.h"
#include "gl860.h"
MODULE_AUTHOR("Olivier Lorin <o.lorin@laposte.net>");
MODULE_DESCRIPTION("Genesys Logic USB PC Camera Driver");
MODULE_LICENSE("GPL");
/*======================== static function declarations ====================*/
static void (*dev_init_settings)(struct gspca_dev *gspca_dev);
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id);
static int sd_init(struct gspca_dev *gspca_dev);
static int sd_isoc_init(struct gspca_dev *gspca_dev);
static int sd_start(struct gspca_dev *gspca_dev);
static void sd_stop0(struct gspca_dev *gspca_dev);
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, int len);
static void sd_callback(struct gspca_dev *gspca_dev);
static int gl860_guess_sensor(struct gspca_dev *gspca_dev,
u16 vendor_id, u16 product_id);
/*============================ driver options ==============================*/
static s32 AC50Hz = 0xff;
module_param(AC50Hz, int, 0644);
MODULE_PARM_DESC(AC50Hz, " Does AC power frequency is 50Hz? (0/1)");
static char sensor[7];
module_param_string(sensor, sensor, sizeof(sensor), 0644);
MODULE_PARM_DESC(sensor,
" Driver sensor ('MI1320'/'MI2020'/'OV9655'/'OV2640')");
/*============================ webcam controls =============================*/
/* Functions to get and set a control value */
#define SD_SETGET(thename) \
static int sd_set_##thename(struct gspca_dev *gspca_dev, s32 val)\
{\
struct sd *sd = (struct sd *) gspca_dev;\
\
sd->vcur.thename = val;\
if (gspca_dev->streaming)\
sd->waitSet = 1;\
return 0;\
} \
static int sd_get_##thename(struct gspca_dev *gspca_dev, s32 *val)\
{\
struct sd *sd = (struct sd *) gspca_dev;\
\
*val = sd->vcur.thename;\
return 0;\
}
SD_SETGET(mirror)
SD_SETGET(flip)
SD_SETGET(AC50Hz)
SD_SETGET(backlight)
SD_SETGET(brightness)
SD_SETGET(gamma)
SD_SETGET(hue)
SD_SETGET(saturation)
SD_SETGET(sharpness)
SD_SETGET(whitebal)
SD_SETGET(contrast)
#define GL860_NCTRLS 11
/* control table */
static struct ctrl sd_ctrls_mi1320[GL860_NCTRLS];
static struct ctrl sd_ctrls_mi2020[GL860_NCTRLS];
static struct ctrl sd_ctrls_ov2640[GL860_NCTRLS];
static struct ctrl sd_ctrls_ov9655[GL860_NCTRLS];
#define SET_MY_CTRL(theid, \
thetype, thelabel, thename) \
if (sd->vmax.thename != 0) {\
sd_ctrls[nCtrls].qctrl.id = theid;\
sd_ctrls[nCtrls].qctrl.type = thetype;\
strcpy(sd_ctrls[nCtrls].qctrl.name, thelabel);\
sd_ctrls[nCtrls].qctrl.minimum = 0;\
sd_ctrls[nCtrls].qctrl.maximum = sd->vmax.thename;\
sd_ctrls[nCtrls].qctrl.default_value = sd->vcur.thename;\
sd_ctrls[nCtrls].qctrl.step = \
(sd->vmax.thename < 16) ? 1 : sd->vmax.thename/16;\
sd_ctrls[nCtrls].set = sd_set_##thename;\
sd_ctrls[nCtrls].get = sd_get_##thename;\
nCtrls++;\
}
static int gl860_build_control_table(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
struct ctrl *sd_ctrls;
int nCtrls = 0;
if (_MI1320_)
sd_ctrls = sd_ctrls_mi1320;
else if (_MI2020_)
sd_ctrls = sd_ctrls_mi2020;
else if (_OV2640_)
sd_ctrls = sd_ctrls_ov2640;
else if (_OV9655_)
sd_ctrls = sd_ctrls_ov9655;
else
return 0;
memset(sd_ctrls, 0, GL860_NCTRLS * sizeof(struct ctrl));
SET_MY_CTRL(V4L2_CID_BRIGHTNESS,
V4L2_CTRL_TYPE_INTEGER, "Brightness", brightness)
SET_MY_CTRL(V4L2_CID_SHARPNESS,
V4L2_CTRL_TYPE_INTEGER, "Sharpness", sharpness)
SET_MY_CTRL(V4L2_CID_CONTRAST,
V4L2_CTRL_TYPE_INTEGER, "Contrast", contrast)
SET_MY_CTRL(V4L2_CID_GAMMA,
V4L2_CTRL_TYPE_INTEGER, "Gamma", gamma)
SET_MY_CTRL(V4L2_CID_HUE,
V4L2_CTRL_TYPE_INTEGER, "Palette", hue)
SET_MY_CTRL(V4L2_CID_SATURATION,
V4L2_CTRL_TYPE_INTEGER, "Saturation", saturation)
SET_MY_CTRL(V4L2_CID_WHITE_BALANCE_TEMPERATURE,
V4L2_CTRL_TYPE_INTEGER, "White Bal.", whitebal)
SET_MY_CTRL(V4L2_CID_BACKLIGHT_COMPENSATION,
V4L2_CTRL_TYPE_INTEGER, "Backlight" , backlight)
SET_MY_CTRL(V4L2_CID_HFLIP,
V4L2_CTRL_TYPE_BOOLEAN, "Mirror", mirror)
SET_MY_CTRL(V4L2_CID_VFLIP,
V4L2_CTRL_TYPE_BOOLEAN, "Flip", flip)
SET_MY_CTRL(V4L2_CID_POWER_LINE_FREQUENCY,
V4L2_CTRL_TYPE_BOOLEAN, "AC power 50Hz", AC50Hz)
return nCtrls;
}
/*==================== sud-driver structure initialisation =================*/
static const struct sd_desc sd_desc_mi1320 = {
.name = MODULE_NAME,
.ctrls = sd_ctrls_mi1320,
.nctrls = GL860_NCTRLS,
.config = sd_config,
.init = sd_init,
.isoc_init = sd_isoc_init,
.start = sd_start,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = sd_callback,
};
static const struct sd_desc sd_desc_mi2020 = {
.name = MODULE_NAME,
.ctrls = sd_ctrls_mi2020,
.nctrls = GL860_NCTRLS,
.config = sd_config,
.init = sd_init,
.isoc_init = sd_isoc_init,
.start = sd_start,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = sd_callback,
};
static const struct sd_desc sd_desc_ov2640 = {
.name = MODULE_NAME,
.ctrls = sd_ctrls_ov2640,
.nctrls = GL860_NCTRLS,
.config = sd_config,
.init = sd_init,
.isoc_init = sd_isoc_init,
.start = sd_start,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = sd_callback,
};
static const struct sd_desc sd_desc_ov9655 = {
.name = MODULE_NAME,
.ctrls = sd_ctrls_ov9655,
.nctrls = GL860_NCTRLS,
.config = sd_config,
.init = sd_init,
.isoc_init = sd_isoc_init,
.start = sd_start,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = sd_callback,
};
/*=========================== sub-driver image sizes =======================*/
static struct v4l2_pix_format mi2020_mode[] = {
{ 640, 480, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0
},
{ 800, 598, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
.bytesperline = 800,
.sizeimage = 800 * 598,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1
},
{1280, 1024, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
.bytesperline = 1280,
.sizeimage = 1280 * 1024,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2
},
{1600, 1198, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
.bytesperline = 1600,
.sizeimage = 1600 * 1198,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 3
},
};
static struct v4l2_pix_format ov2640_mode[] = {
{ 640, 480, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0
},
{ 800, 600, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
.bytesperline = 800,
.sizeimage = 800 * 600,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1
},
{1280, 960, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
.bytesperline = 1280,
.sizeimage = 1280 * 960,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2
},
{1600, 1200, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
.bytesperline = 1600,
.sizeimage = 1600 * 1200,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 3
},
};
static struct v4l2_pix_format mi1320_mode[] = {
{ 640, 480, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0
},
{ 800, 600, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
.bytesperline = 800,
.sizeimage = 800 * 600,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1
},
{1280, 960, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
.bytesperline = 1280,
.sizeimage = 1280 * 960,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2
},
};
static struct v4l2_pix_format ov9655_mode[] = {
{ 640, 480, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0
},
{1280, 960, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
.bytesperline = 1280,
.sizeimage = 1280 * 960,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1
},
};
/*========================= sud-driver functions ===========================*/
/* This function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
u16 vendor_id, product_id;
/* Get USB VendorID and ProductID */
vendor_id = id->idVendor;
product_id = id->idProduct;
sd->nbRightUp = 1;
sd->nbIm = -1;
sd->sensor = 0xff;
if (strcmp(sensor, "MI1320") == 0)
sd->sensor = ID_MI1320;
else if (strcmp(sensor, "OV2640") == 0)
sd->sensor = ID_OV2640;
else if (strcmp(sensor, "OV9655") == 0)
sd->sensor = ID_OV9655;
else if (strcmp(sensor, "MI2020") == 0)
sd->sensor = ID_MI2020;
/* Get sensor and set the suitable init/start/../stop functions */
if (gl860_guess_sensor(gspca_dev, vendor_id, product_id) == -1)
return -1;
cam = &gspca_dev->cam;
switch (sd->sensor) {
case ID_MI1320:
gspca_dev->sd_desc = &sd_desc_mi1320;
cam->cam_mode = mi1320_mode;
cam->nmodes = ARRAY_SIZE(mi1320_mode);
dev_init_settings = mi1320_init_settings;
break;
case ID_MI2020:
gspca_dev->sd_desc = &sd_desc_mi2020;
cam->cam_mode = mi2020_mode;
cam->nmodes = ARRAY_SIZE(mi2020_mode);
dev_init_settings = mi2020_init_settings;
break;
case ID_OV2640:
gspca_dev->sd_desc = &sd_desc_ov2640;
cam->cam_mode = ov2640_mode;
cam->nmodes = ARRAY_SIZE(ov2640_mode);
dev_init_settings = ov2640_init_settings;
break;
case ID_OV9655:
gspca_dev->sd_desc = &sd_desc_ov9655;
cam->cam_mode = ov9655_mode;
cam->nmodes = ARRAY_SIZE(ov9655_mode);
dev_init_settings = ov9655_init_settings;
break;
}
dev_init_settings(gspca_dev);
if (AC50Hz != 0xff)
((struct sd *) gspca_dev)->vcur.AC50Hz = AC50Hz;
gl860_build_control_table(gspca_dev);
return 0;
}
/* This function is called at probe time after sd_config */
static int sd_init(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
return sd->dev_init_at_startup(gspca_dev);
}
/* This function is called before to choose the alt setting */
static int sd_isoc_init(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
return sd->dev_configure_alt(gspca_dev);
}
/* This function is called to start the webcam */
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
return sd->dev_init_pre_alt(gspca_dev);
}
/* This function is called to stop the webcam */
static void sd_stop0(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
if (!sd->gspca_dev.present)
return;
return sd->dev_post_unset_alt(gspca_dev);
}
/* This function is called when an image is being received */
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, int len)
{
struct sd *sd = (struct sd *) gspca_dev;
static s32 nSkipped;
s32 mode = (s32) gspca_dev->curr_mode;
s32 nToSkip =
sd->swapRB * (gspca_dev->cam.cam_mode[mode].bytesperline + 1);
/* Test only against 0202h, so endianess does not matter */
switch (*(s16 *) data) {
case 0x0202: /* End of frame, start a new one */
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
nSkipped = 0;
if (sd->nbIm >= 0 && sd->nbIm < 10)
sd->nbIm++;
gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0);
break;
default:
data += 2;
len -= 2;
if (nSkipped + len <= nToSkip)
nSkipped += len;
else {
if (nSkipped < nToSkip && nSkipped + len > nToSkip) {
data += nToSkip - nSkipped;
len -= nToSkip - nSkipped;
nSkipped = nToSkip + 1;
}
gspca_frame_add(gspca_dev,
INTER_PACKET, data, len);
}
break;
}
}
/* This function is called when an image has been read */
/* This function is used to monitor webcam orientation */
static void sd_callback(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
if (!_OV9655_) {
u8 state;
u8 upsideDown;
/* Probe sensor orientation */
ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0000, 1, (void *)&state);
/* C8/40 means upside-down (looking backwards) */
/* D8/50 means right-up (looking onwards) */
upsideDown = (state == 0xc8 || state == 0x40);
if (upsideDown && sd->nbRightUp > -4) {
if (sd->nbRightUp > 0)
sd->nbRightUp = 0;
if (sd->nbRightUp == -3) {
sd->mirrorMask = 1;
sd->waitSet = 1;
}
sd->nbRightUp--;
}
if (!upsideDown && sd->nbRightUp < 4) {
if (sd->nbRightUp < 0)
sd->nbRightUp = 0;
if (sd->nbRightUp == 3) {
sd->mirrorMask = 0;
sd->waitSet = 1;
}
sd->nbRightUp++;
}
}
if (sd->waitSet)
sd->dev_camera_settings(gspca_dev);
}
/*=================== USB driver structure initialisation ==================*/
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x05e3, 0x0503)},
{USB_DEVICE(0x05e3, 0xf191)},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id,
&sd_desc_mi1320, sizeof(struct sd), THIS_MODULE);
}
static void sd_disconnect(struct usb_interface *intf)
{
gspca_disconnect(intf);
}
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
.disconnect = sd_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
.reset_resume = gspca_resume,
#endif
};
/*====================== Init and Exit module functions ====================*/
module_usb_driver(sd_driver);
/*==========================================================================*/
int gl860_RTx(struct gspca_dev *gspca_dev,
unsigned char pref, u32 req, u16 val, u16 index,
s32 len, void *pdata)
{
struct usb_device *udev = gspca_dev->dev;
s32 r = 0;
if (pref == 0x40) { /* Send */
if (len > 0) {
memcpy(gspca_dev->usb_buf, pdata, len);
r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
req, pref, val, index,
gspca_dev->usb_buf,
len, 400 + 200 * (len > 1));
} else {
r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
req, pref, val, index, NULL, len, 400);
}
} else { /* Receive */
if (len > 0) {
r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
req, pref, val, index,
gspca_dev->usb_buf,
len, 400 + 200 * (len > 1));
memcpy(pdata, gspca_dev->usb_buf, len);
} else {
r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
req, pref, val, index, NULL, len, 400);
}
}
if (r < 0)
pr_err("ctrl transfer failed %4d [p%02x r%d v%04x i%04x len%d]\n",
r, pref, req, val, index, len);
else if (len > 1 && r < len)
PDEBUG(D_ERR, "short ctrl transfer %d/%d", r, len);
msleep(1);
return r;
}
int fetch_validx(struct gspca_dev *gspca_dev, struct validx *tbl, int len)
{
int n;
for (n = 0; n < len; n++) {
if (tbl[n].idx != 0xffff)
ctrl_out(gspca_dev, 0x40, 1, tbl[n].val,
tbl[n].idx, 0, NULL);
else if (tbl[n].val == 0xffff)
break;
else
msleep(tbl[n].val);
}
return n;
}
int keep_on_fetching_validx(struct gspca_dev *gspca_dev, struct validx *tbl,
int len, int n)
{
while (++n < len) {
if (tbl[n].idx != 0xffff)
ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, tbl[n].idx,
0, NULL);
else if (tbl[n].val == 0xffff)
break;
else
msleep(tbl[n].val);
}
return n;
}
void fetch_idxdata(struct gspca_dev *gspca_dev, struct idxdata *tbl, int len)
{
int n;
for (n = 0; n < len; n++) {
if (memcmp(tbl[n].data, "\xff\xff\xff", 3) != 0)
ctrl_out(gspca_dev, 0x40, 3, 0x7a00, tbl[n].idx,
3, tbl[n].data);
else
msleep(tbl[n].idx);
}
}
static int gl860_guess_sensor(struct gspca_dev *gspca_dev,
u16 vendor_id, u16 product_id)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 probe, nb26, nb96, nOV, ntry;
if (product_id == 0xf191)
sd->sensor = ID_MI1320;
if (sd->sensor == 0xff) {
ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0004, 1, &probe);
ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0004, 1, &probe);
ctrl_out(gspca_dev, 0x40, 1, 0x0000, 0x0000, 0, NULL);
msleep(3);
ctrl_out(gspca_dev, 0x40, 1, 0x0010, 0x0010, 0, NULL);
msleep(3);
ctrl_out(gspca_dev, 0x40, 1, 0x0008, 0x00c0, 0, NULL);
msleep(3);
ctrl_out(gspca_dev, 0x40, 1, 0x0001, 0x00c1, 0, NULL);
msleep(3);
ctrl_out(gspca_dev, 0x40, 1, 0x0001, 0x00c2, 0, NULL);
msleep(3);
ctrl_out(gspca_dev, 0x40, 1, 0x0020, 0x0006, 0, NULL);
msleep(3);
ctrl_out(gspca_dev, 0x40, 1, 0x006a, 0x000d, 0, NULL);
msleep(56);
PDEBUG(D_PROBE, "probing for sensor MI2020 or OVXXXX");
nOV = 0;
for (ntry = 0; ntry < 4; ntry++) {
ctrl_out(gspca_dev, 0x40, 1, 0x0040, 0x0000, 0, NULL);
msleep(3);
ctrl_out(gspca_dev, 0x40, 1, 0x0063, 0x0006, 0, NULL);
msleep(3);
ctrl_out(gspca_dev, 0x40, 1, 0x7a00, 0x8030, 0, NULL);
msleep(10);
ctrl_in(gspca_dev, 0xc0, 2, 0x7a00, 0x8030, 1, &probe);
PDEBUG(D_PROBE, "probe=0x%02x", probe);
if (probe == 0xff)
nOV++;
}
if (nOV) {
PDEBUG(D_PROBE, "0xff -> OVXXXX");
PDEBUG(D_PROBE, "probing for sensor OV2640 or OV9655");
nb26 = nb96 = 0;
for (ntry = 0; ntry < 4; ntry++) {
ctrl_out(gspca_dev, 0x40, 1, 0x0040, 0x0000,
0, NULL);
msleep(3);
ctrl_out(gspca_dev, 0x40, 1, 0x6000, 0x800a,
0, NULL);
msleep(10);
/* Wait for 26(OV2640) or 96(OV9655) */
ctrl_in(gspca_dev, 0xc0, 2, 0x6000, 0x800a,
1, &probe);
if (probe == 0x26 || probe == 0x40) {
PDEBUG(D_PROBE,
"probe=0x%02x -> OV2640",
probe);
sd->sensor = ID_OV2640;
nb26 += 4;
break;
}
if (probe == 0x96 || probe == 0x55) {
PDEBUG(D_PROBE,
"probe=0x%02x -> OV9655",
probe);
sd->sensor = ID_OV9655;
nb96 += 4;
break;
}
PDEBUG(D_PROBE, "probe=0x%02x", probe);
if (probe == 0x00)
nb26++;
if (probe == 0xff)
nb96++;
msleep(3);
}
if (nb26 < 4 && nb96 < 4)
return -1;
} else {
PDEBUG(D_PROBE, "Not any 0xff -> MI2020");
sd->sensor = ID_MI2020;
}
}
if (_MI1320_) {
PDEBUG(D_PROBE, "05e3:f191 sensor MI1320 (1.3M)");
} else if (_MI2020_) {
PDEBUG(D_PROBE, "05e3:0503 sensor MI2020 (2.0M)");
} else if (_OV9655_) {
PDEBUG(D_PROBE, "05e3:0503 sensor OV9655 (1.3M)");
} else if (_OV2640_) {
PDEBUG(D_PROBE, "05e3:0503 sensor OV2640 (2.0M)");
} else {
PDEBUG(D_PROBE, "***** Unknown sensor *****");
return -1;
}
return 0;
}
| gpl-2.0 |
sirkay/xkernel-msm7x30 | arch/h8300/kernel/time.c | 799 | 1522 | /*
* linux/arch/h8300/kernel/time.c
*
* Yoshinori Sato <ysato@users.sourceforge.jp>
*
* Copied/hacked from:
*
* linux/arch/m68k/kernel/time.c
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
*
* This file contains the m68k-specific time handling details.
* Most of the stuff is located in the machine specific files.
*
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/timex.h>
#include <linux/profile.h>
#include <asm/io.h>
#include <asm/timer.h>
#define TICK_SIZE (tick_nsec / 1000)
void h8300_timer_tick(void)
{
if (current->pid)
profile_tick(CPU_PROFILING);
write_seqlock(&xtime_lock);
do_timer(1);
write_sequnlock(&xtime_lock);
update_process_times(user_mode(get_irq_regs()));
}
void __init time_init(void)
{
unsigned int year, mon, day, hour, min, sec;
/* FIX by dqg : Set to zero for platforms that don't have tod */
/* without this time is undefined and can overflow time_t, causing */
/* very strange errors */
year = 1980;
mon = day = 1;
hour = min = sec = 0;
#ifdef CONFIG_H8300_GETTOD
h8300_gettod (&year, &mon, &day, &hour, &min, &sec);
#endif
if ((year += 1900) < 1970)
year += 100;
xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
xtime.tv_nsec = 0;
h8300_timer_setup();
}
| gpl-2.0 |
mtk00874/kernel-mediatek | arch/arm/lib/uaccess_with_memcpy.c | 1311 | 6213 | /*
* linux/arch/arm/lib/uaccess_with_memcpy.c
*
* Written by: Lennert Buytenhek and Nicolas Pitre
* Copyright (C) 2009 Marvell Semiconductor
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/uaccess.h>
#include <linux/rwsem.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/hardirq.h> /* for in_atomic() */
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <asm/current.h>
#include <asm/page.h>
static int
pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
{
unsigned long addr = (unsigned long)_addr;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pud_t *pud;
spinlock_t *ptl;
pgd = pgd_offset(current->mm, addr);
if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
return 0;
pud = pud_offset(pgd, addr);
if (unlikely(pud_none(*pud) || pud_bad(*pud)))
return 0;
pmd = pmd_offset(pud, addr);
if (unlikely(pmd_none(*pmd)))
return 0;
/*
* A pmd can be bad if it refers to a HugeTLB or THP page.
*
* Both THP and HugeTLB pages have the same pmd layout
* and should not be manipulated by the pte functions.
*
* Lock the page table for the destination and check
* to see that it's still huge and whether or not we will
* need to fault on write, or if we have a splitting THP.
*/
if (unlikely(pmd_thp_or_huge(*pmd))) {
ptl = ¤t->mm->page_table_lock;
spin_lock(ptl);
if (unlikely(!pmd_thp_or_huge(*pmd)
|| pmd_hugewillfault(*pmd)
|| pmd_trans_splitting(*pmd))) {
spin_unlock(ptl);
return 0;
}
*ptep = NULL;
*ptlp = ptl;
return 1;
}
if (unlikely(pmd_bad(*pmd)))
return 0;
pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
!pte_write(*pte) || !pte_dirty(*pte))) {
pte_unmap_unlock(pte, ptl);
return 0;
}
*ptep = pte;
*ptlp = ptl;
return 1;
}
static unsigned long noinline
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
{
int atomic;
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
memcpy((void *)to, from, n);
return 0;
}
/* the mmap semaphore is taken only if not in an atomic context */
atomic = in_atomic();
if (!atomic)
down_read(¤t->mm->mmap_sem);
while (n) {
pte_t *pte;
spinlock_t *ptl;
int tocopy;
while (!pin_page_for_write(to, &pte, &ptl)) {
if (!atomic)
up_read(¤t->mm->mmap_sem);
if (__put_user(0, (char __user *)to))
goto out;
if (!atomic)
down_read(¤t->mm->mmap_sem);
}
tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
if (tocopy > n)
tocopy = n;
memcpy((void *)to, from, tocopy);
to += tocopy;
from += tocopy;
n -= tocopy;
if (pte)
pte_unmap_unlock(pte, ptl);
else
spin_unlock(ptl);
}
if (!atomic)
up_read(¤t->mm->mmap_sem);
out:
return n;
}
unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
/*
* This test is stubbed out of the main function above to keep
* the overhead for small copies low by avoiding a large
* register dump on the stack just to reload them right away.
* With frame pointer disabled, tail call optimization kicks in
* as well making this test almost invisible.
*/
if (n < 64)
return __copy_to_user_std(to, from, n);
return __copy_to_user_memcpy(to, from, n);
}
static unsigned long noinline
__clear_user_memset(void __user *addr, unsigned long n)
{
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
memset((void *)addr, 0, n);
return 0;
}
down_read(¤t->mm->mmap_sem);
while (n) {
pte_t *pte;
spinlock_t *ptl;
int tocopy;
while (!pin_page_for_write(addr, &pte, &ptl)) {
up_read(¤t->mm->mmap_sem);
if (__put_user(0, (char __user *)addr))
goto out;
down_read(¤t->mm->mmap_sem);
}
tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
if (tocopy > n)
tocopy = n;
memset((void *)addr, 0, tocopy);
addr += tocopy;
n -= tocopy;
if (pte)
pte_unmap_unlock(pte, ptl);
else
spin_unlock(ptl);
}
up_read(¤t->mm->mmap_sem);
out:
return n;
}
unsigned long __clear_user(void __user *addr, unsigned long n)
{
/* See rational for this in __copy_to_user() above. */
if (n < 64)
return __clear_user_std(addr, n);
return __clear_user_memset(addr, n);
}
#if 0
/*
* This code is disabled by default, but kept around in case the chosen
* thresholds need to be revalidated. Some overhead (small but still)
* would be implied by a runtime determined variable threshold, and
* so far the measurement on concerned targets didn't show a worthwhile
* variation.
*
* Note that a fairly precise sched_clock() implementation is needed
* for results to make some sense.
*/
#include <linux/vmalloc.h>
static int __init test_size_treshold(void)
{
struct page *src_page, *dst_page;
void *user_ptr, *kernel_ptr;
unsigned long long t0, t1, t2;
int size, ret;
ret = -ENOMEM;
src_page = alloc_page(GFP_KERNEL);
if (!src_page)
goto no_src;
dst_page = alloc_page(GFP_KERNEL);
if (!dst_page)
goto no_dst;
kernel_ptr = page_address(src_page);
user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
if (!user_ptr)
goto no_vmap;
/* warm up the src page dcache */
ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
for (size = PAGE_SIZE; size >= 4; size /= 2) {
t0 = sched_clock();
ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
t1 = sched_clock();
ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
t2 = sched_clock();
printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
}
for (size = PAGE_SIZE; size >= 4; size /= 2) {
t0 = sched_clock();
ret |= __clear_user_memset(user_ptr, size);
t1 = sched_clock();
ret |= __clear_user_std(user_ptr, size);
t2 = sched_clock();
printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
}
if (ret)
ret = -EFAULT;
vunmap(user_ptr);
no_vmap:
put_page(dst_page);
no_dst:
put_page(src_page);
no_src:
return ret;
}
subsys_initcall(test_size_treshold);
#endif
| gpl-2.0 |
spezi77/android_kernel_htc_qsd8k | arch/sparc/kernel/ds.c | 1567 | 25435 | /* ds.c: Domain Services driver for Logical Domains
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/reboot.h>
#include <linux/cpu.h>
#include <asm/ldc.h>
#include <asm/vio.h>
#include <asm/mdesc.h>
#include <asm/head.h>
#include <asm/irq.h>
#define DRV_MODULE_NAME "ds"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.0"
#define DRV_MODULE_RELDATE "Jul 11, 2007"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("Sun LDOM domain services driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
struct ds_msg_tag {
__u32 type;
#define DS_INIT_REQ 0x00
#define DS_INIT_ACK 0x01
#define DS_INIT_NACK 0x02
#define DS_REG_REQ 0x03
#define DS_REG_ACK 0x04
#define DS_REG_NACK 0x05
#define DS_UNREG_REQ 0x06
#define DS_UNREG_ACK 0x07
#define DS_UNREG_NACK 0x08
#define DS_DATA 0x09
#define DS_NACK 0x0a
__u32 len;
};
/* Result codes */
#define DS_OK 0x00
#define DS_REG_VER_NACK 0x01
#define DS_REG_DUP 0x02
#define DS_INV_HDL 0x03
#define DS_TYPE_UNKNOWN 0x04
struct ds_version {
__u16 major;
__u16 minor;
};
struct ds_ver_req {
struct ds_msg_tag tag;
struct ds_version ver;
};
struct ds_ver_ack {
struct ds_msg_tag tag;
__u16 minor;
};
struct ds_ver_nack {
struct ds_msg_tag tag;
__u16 major;
};
struct ds_reg_req {
struct ds_msg_tag tag;
__u64 handle;
__u16 major;
__u16 minor;
char svc_id[0];
};
struct ds_reg_ack {
struct ds_msg_tag tag;
__u64 handle;
__u16 minor;
};
struct ds_reg_nack {
struct ds_msg_tag tag;
__u64 handle;
__u16 major;
};
struct ds_unreg_req {
struct ds_msg_tag tag;
__u64 handle;
};
struct ds_unreg_ack {
struct ds_msg_tag tag;
__u64 handle;
};
struct ds_unreg_nack {
struct ds_msg_tag tag;
__u64 handle;
};
struct ds_data {
struct ds_msg_tag tag;
__u64 handle;
};
struct ds_data_nack {
struct ds_msg_tag tag;
__u64 handle;
__u64 result;
};
struct ds_info;
struct ds_cap_state {
__u64 handle;
void (*data)(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
const char *service_id;
u8 state;
#define CAP_STATE_UNKNOWN 0x00
#define CAP_STATE_REG_SENT 0x01
#define CAP_STATE_REGISTERED 0x02
};
static void md_update_data(struct ds_info *dp, struct ds_cap_state *cp,
void *buf, int len);
static void domain_shutdown_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
static void domain_panic_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
#ifdef CONFIG_HOTPLUG_CPU
static void dr_cpu_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
#endif
static void ds_pri_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
static void ds_var_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
static struct ds_cap_state ds_states_template[] = {
{
.service_id = "md-update",
.data = md_update_data,
},
{
.service_id = "domain-shutdown",
.data = domain_shutdown_data,
},
{
.service_id = "domain-panic",
.data = domain_panic_data,
},
#ifdef CONFIG_HOTPLUG_CPU
{
.service_id = "dr-cpu",
.data = dr_cpu_data,
},
#endif
{
.service_id = "pri",
.data = ds_pri_data,
},
{
.service_id = "var-config",
.data = ds_var_data,
},
{
.service_id = "var-config-backup",
.data = ds_var_data,
},
};
static DEFINE_SPINLOCK(ds_lock);
struct ds_info {
struct ldc_channel *lp;
u8 hs_state;
#define DS_HS_START 0x01
#define DS_HS_DONE 0x02
u64 id;
void *rcv_buf;
int rcv_buf_len;
struct ds_cap_state *ds_states;
int num_ds_states;
struct ds_info *next;
};
static struct ds_info *ds_info_list;
static struct ds_cap_state *find_cap(struct ds_info *dp, u64 handle)
{
unsigned int index = handle >> 32;
if (index >= dp->num_ds_states)
return NULL;
return &dp->ds_states[index];
}
static struct ds_cap_state *find_cap_by_string(struct ds_info *dp,
const char *name)
{
int i;
for (i = 0; i < dp->num_ds_states; i++) {
if (strcmp(dp->ds_states[i].service_id, name))
continue;
return &dp->ds_states[i];
}
return NULL;
}
static int __ds_send(struct ldc_channel *lp, void *data, int len)
{
int err, limit = 1000;
err = -EINVAL;
while (limit-- > 0) {
err = ldc_write(lp, data, len);
if (!err || (err != -EAGAIN))
break;
udelay(1);
}
return err;
}
static int ds_send(struct ldc_channel *lp, void *data, int len)
{
unsigned long flags;
int err;
spin_lock_irqsave(&ds_lock, flags);
err = __ds_send(lp, data, len);
spin_unlock_irqrestore(&ds_lock, flags);
return err;
}
struct ds_md_update_req {
__u64 req_num;
};
struct ds_md_update_res {
__u64 req_num;
__u32 result;
};
static void md_update_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ldc_channel *lp = dp->lp;
struct ds_data *dpkt = buf;
struct ds_md_update_req *rp;
struct {
struct ds_data data;
struct ds_md_update_res res;
} pkt;
rp = (struct ds_md_update_req *) (dpkt + 1);
printk(KERN_INFO "ds-%llu: Machine description update.\n", dp->id);
mdesc_update();
memset(&pkt, 0, sizeof(pkt));
pkt.data.tag.type = DS_DATA;
pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
pkt.data.handle = cp->handle;
pkt.res.req_num = rp->req_num;
pkt.res.result = DS_OK;
ds_send(lp, &pkt, sizeof(pkt));
}
struct ds_shutdown_req {
__u64 req_num;
__u32 ms_delay;
};
struct ds_shutdown_res {
__u64 req_num;
__u32 result;
char reason[1];
};
static void domain_shutdown_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ldc_channel *lp = dp->lp;
struct ds_data *dpkt = buf;
struct ds_shutdown_req *rp;
struct {
struct ds_data data;
struct ds_shutdown_res res;
} pkt;
rp = (struct ds_shutdown_req *) (dpkt + 1);
printk(KERN_ALERT "ds-%llu: Shutdown request from "
"LDOM manager received.\n", dp->id);
memset(&pkt, 0, sizeof(pkt));
pkt.data.tag.type = DS_DATA;
pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
pkt.data.handle = cp->handle;
pkt.res.req_num = rp->req_num;
pkt.res.result = DS_OK;
pkt.res.reason[0] = 0;
ds_send(lp, &pkt, sizeof(pkt));
orderly_poweroff(true);
}
struct ds_panic_req {
__u64 req_num;
};
struct ds_panic_res {
__u64 req_num;
__u32 result;
char reason[1];
};
static void domain_panic_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ldc_channel *lp = dp->lp;
struct ds_data *dpkt = buf;
struct ds_panic_req *rp;
struct {
struct ds_data data;
struct ds_panic_res res;
} pkt;
rp = (struct ds_panic_req *) (dpkt + 1);
printk(KERN_ALERT "ds-%llu: Panic request from "
"LDOM manager received.\n", dp->id);
memset(&pkt, 0, sizeof(pkt));
pkt.data.tag.type = DS_DATA;
pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
pkt.data.handle = cp->handle;
pkt.res.req_num = rp->req_num;
pkt.res.result = DS_OK;
pkt.res.reason[0] = 0;
ds_send(lp, &pkt, sizeof(pkt));
panic("PANIC requested by LDOM manager.");
}
#ifdef CONFIG_HOTPLUG_CPU
struct dr_cpu_tag {
__u64 req_num;
__u32 type;
#define DR_CPU_CONFIGURE 0x43
#define DR_CPU_UNCONFIGURE 0x55
#define DR_CPU_FORCE_UNCONFIGURE 0x46
#define DR_CPU_STATUS 0x53
/* Responses */
#define DR_CPU_OK 0x6f
#define DR_CPU_ERROR 0x65
__u32 num_records;
};
struct dr_cpu_resp_entry {
__u32 cpu;
__u32 result;
#define DR_CPU_RES_OK 0x00
#define DR_CPU_RES_FAILURE 0x01
#define DR_CPU_RES_BLOCKED 0x02
#define DR_CPU_RES_CPU_NOT_RESPONDING 0x03
#define DR_CPU_RES_NOT_IN_MD 0x04
__u32 stat;
#define DR_CPU_STAT_NOT_PRESENT 0x00
#define DR_CPU_STAT_UNCONFIGURED 0x01
#define DR_CPU_STAT_CONFIGURED 0x02
__u32 str_off;
};
static void __dr_cpu_send_error(struct ds_info *dp,
struct ds_cap_state *cp,
struct ds_data *data)
{
struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
struct {
struct ds_data data;
struct dr_cpu_tag tag;
} pkt;
int msg_len;
memset(&pkt, 0, sizeof(pkt));
pkt.data.tag.type = DS_DATA;
pkt.data.handle = cp->handle;
pkt.tag.req_num = tag->req_num;
pkt.tag.type = DR_CPU_ERROR;
pkt.tag.num_records = 0;
msg_len = (sizeof(struct ds_data) +
sizeof(struct dr_cpu_tag));
pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
__ds_send(dp->lp, &pkt, msg_len);
}
static void dr_cpu_send_error(struct ds_info *dp,
struct ds_cap_state *cp,
struct ds_data *data)
{
unsigned long flags;
spin_lock_irqsave(&ds_lock, flags);
__dr_cpu_send_error(dp, cp, data);
spin_unlock_irqrestore(&ds_lock, flags);
}
#define CPU_SENTINEL 0xffffffff
static void purge_dups(u32 *list, u32 num_ents)
{
unsigned int i;
for (i = 0; i < num_ents; i++) {
u32 cpu = list[i];
unsigned int j;
if (cpu == CPU_SENTINEL)
continue;
for (j = i + 1; j < num_ents; j++) {
if (list[j] == cpu)
list[j] = CPU_SENTINEL;
}
}
}
static int dr_cpu_size_response(int ncpus)
{
return (sizeof(struct ds_data) +
sizeof(struct dr_cpu_tag) +
(sizeof(struct dr_cpu_resp_entry) * ncpus));
}
static void dr_cpu_init_response(struct ds_data *resp, u64 req_num,
u64 handle, int resp_len, int ncpus,
cpumask_t *mask, u32 default_stat)
{
struct dr_cpu_resp_entry *ent;
struct dr_cpu_tag *tag;
int i, cpu;
tag = (struct dr_cpu_tag *) (resp + 1);
ent = (struct dr_cpu_resp_entry *) (tag + 1);
resp->tag.type = DS_DATA;
resp->tag.len = resp_len - sizeof(struct ds_msg_tag);
resp->handle = handle;
tag->req_num = req_num;
tag->type = DR_CPU_OK;
tag->num_records = ncpus;
i = 0;
for_each_cpu_mask(cpu, *mask) {
ent[i].cpu = cpu;
ent[i].result = DR_CPU_RES_OK;
ent[i].stat = default_stat;
i++;
}
BUG_ON(i != ncpus);
}
static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus,
u32 res, u32 stat)
{
struct dr_cpu_resp_entry *ent;
struct dr_cpu_tag *tag;
int i;
tag = (struct dr_cpu_tag *) (resp + 1);
ent = (struct dr_cpu_resp_entry *) (tag + 1);
for (i = 0; i < ncpus; i++) {
if (ent[i].cpu != cpu)
continue;
ent[i].result = res;
ent[i].stat = stat;
break;
}
}
static int __cpuinit dr_cpu_configure(struct ds_info *dp,
struct ds_cap_state *cp,
u64 req_num,
cpumask_t *mask)
{
struct ds_data *resp;
int resp_len, ncpus, cpu;
unsigned long flags;
ncpus = cpus_weight(*mask);
resp_len = dr_cpu_size_response(ncpus);
resp = kzalloc(resp_len, GFP_KERNEL);
if (!resp)
return -ENOMEM;
dr_cpu_init_response(resp, req_num, cp->handle,
resp_len, ncpus, mask,
DR_CPU_STAT_CONFIGURED);
mdesc_populate_present_mask(mask);
mdesc_fill_in_cpu_data(mask);
for_each_cpu_mask(cpu, *mask) {
int err;
printk(KERN_INFO "ds-%llu: Starting cpu %d...\n",
dp->id, cpu);
err = cpu_up(cpu);
if (err) {
__u32 res = DR_CPU_RES_FAILURE;
__u32 stat = DR_CPU_STAT_UNCONFIGURED;
if (!cpu_present(cpu)) {
/* CPU not present in MD */
res = DR_CPU_RES_NOT_IN_MD;
stat = DR_CPU_STAT_NOT_PRESENT;
} else if (err == -ENODEV) {
/* CPU did not call in successfully */
res = DR_CPU_RES_CPU_NOT_RESPONDING;
}
printk(KERN_INFO "ds-%llu: CPU startup failed err=%d\n",
dp->id, err);
dr_cpu_mark(resp, cpu, ncpus, res, stat);
}
}
spin_lock_irqsave(&ds_lock, flags);
__ds_send(dp->lp, resp, resp_len);
spin_unlock_irqrestore(&ds_lock, flags);
kfree(resp);
/* Redistribute IRQs, taking into account the new cpus. */
fixup_irqs();
return 0;
}
static int dr_cpu_unconfigure(struct ds_info *dp,
struct ds_cap_state *cp,
u64 req_num,
cpumask_t *mask)
{
struct ds_data *resp;
int resp_len, ncpus, cpu;
unsigned long flags;
ncpus = cpus_weight(*mask);
resp_len = dr_cpu_size_response(ncpus);
resp = kzalloc(resp_len, GFP_KERNEL);
if (!resp)
return -ENOMEM;
dr_cpu_init_response(resp, req_num, cp->handle,
resp_len, ncpus, mask,
DR_CPU_STAT_UNCONFIGURED);
for_each_cpu_mask(cpu, *mask) {
int err;
printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n",
dp->id, cpu);
err = cpu_down(cpu);
if (err)
dr_cpu_mark(resp, cpu, ncpus,
DR_CPU_RES_FAILURE,
DR_CPU_STAT_CONFIGURED);
}
spin_lock_irqsave(&ds_lock, flags);
__ds_send(dp->lp, resp, resp_len);
spin_unlock_irqrestore(&ds_lock, flags);
kfree(resp);
return 0;
}
static void __cpuinit dr_cpu_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ds_data *data = buf;
struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
u32 *cpu_list = (u32 *) (tag + 1);
u64 req_num = tag->req_num;
cpumask_t mask;
unsigned int i;
int err;
switch (tag->type) {
case DR_CPU_CONFIGURE:
case DR_CPU_UNCONFIGURE:
case DR_CPU_FORCE_UNCONFIGURE:
break;
default:
dr_cpu_send_error(dp, cp, data);
return;
}
purge_dups(cpu_list, tag->num_records);
cpus_clear(mask);
for (i = 0; i < tag->num_records; i++) {
if (cpu_list[i] == CPU_SENTINEL)
continue;
if (cpu_list[i] < nr_cpu_ids)
cpu_set(cpu_list[i], mask);
}
if (tag->type == DR_CPU_CONFIGURE)
err = dr_cpu_configure(dp, cp, req_num, &mask);
else
err = dr_cpu_unconfigure(dp, cp, req_num, &mask);
if (err)
dr_cpu_send_error(dp, cp, data);
}
#endif /* CONFIG_HOTPLUG_CPU */
struct ds_pri_msg {
__u64 req_num;
__u64 type;
#define DS_PRI_REQUEST 0x00
#define DS_PRI_DATA 0x01
#define DS_PRI_UPDATE 0x02
};
static void ds_pri_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ds_data *dpkt = buf;
struct ds_pri_msg *rp;
rp = (struct ds_pri_msg *) (dpkt + 1);
printk(KERN_INFO "ds-%llu: PRI REQ [%llx:%llx], len=%d\n",
dp->id, rp->req_num, rp->type, len);
}
struct ds_var_hdr {
__u32 type;
#define DS_VAR_SET_REQ 0x00
#define DS_VAR_DELETE_REQ 0x01
#define DS_VAR_SET_RESP 0x02
#define DS_VAR_DELETE_RESP 0x03
};
struct ds_var_set_msg {
struct ds_var_hdr hdr;
char name_and_value[0];
};
struct ds_var_delete_msg {
struct ds_var_hdr hdr;
char name[0];
};
struct ds_var_resp {
struct ds_var_hdr hdr;
__u32 result;
#define DS_VAR_SUCCESS 0x00
#define DS_VAR_NO_SPACE 0x01
#define DS_VAR_INVALID_VAR 0x02
#define DS_VAR_INVALID_VAL 0x03
#define DS_VAR_NOT_PRESENT 0x04
};
static DEFINE_MUTEX(ds_var_mutex);
static int ds_var_doorbell;
static int ds_var_response;
static void ds_var_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ds_data *dpkt = buf;
struct ds_var_resp *rp;
rp = (struct ds_var_resp *) (dpkt + 1);
if (rp->hdr.type != DS_VAR_SET_RESP &&
rp->hdr.type != DS_VAR_DELETE_RESP)
return;
ds_var_response = rp->result;
wmb();
ds_var_doorbell = 1;
}
void ldom_set_var(const char *var, const char *value)
{
struct ds_cap_state *cp;
struct ds_info *dp;
unsigned long flags;
spin_lock_irqsave(&ds_lock, flags);
cp = NULL;
for (dp = ds_info_list; dp; dp = dp->next) {
struct ds_cap_state *tmp;
tmp = find_cap_by_string(dp, "var-config");
if (tmp && tmp->state == CAP_STATE_REGISTERED) {
cp = tmp;
break;
}
}
if (!cp) {
for (dp = ds_info_list; dp; dp = dp->next) {
struct ds_cap_state *tmp;
tmp = find_cap_by_string(dp, "var-config-backup");
if (tmp && tmp->state == CAP_STATE_REGISTERED) {
cp = tmp;
break;
}
}
}
spin_unlock_irqrestore(&ds_lock, flags);
if (cp) {
union {
struct {
struct ds_data data;
struct ds_var_set_msg msg;
} header;
char all[512];
} pkt;
char *base, *p;
int msg_len, loops;
memset(&pkt, 0, sizeof(pkt));
pkt.header.data.tag.type = DS_DATA;
pkt.header.data.handle = cp->handle;
pkt.header.msg.hdr.type = DS_VAR_SET_REQ;
base = p = &pkt.header.msg.name_and_value[0];
strcpy(p, var);
p += strlen(var) + 1;
strcpy(p, value);
p += strlen(value) + 1;
msg_len = (sizeof(struct ds_data) +
sizeof(struct ds_var_set_msg) +
(p - base));
msg_len = (msg_len + 3) & ~3;
pkt.header.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
mutex_lock(&ds_var_mutex);
spin_lock_irqsave(&ds_lock, flags);
ds_var_doorbell = 0;
ds_var_response = -1;
__ds_send(dp->lp, &pkt, msg_len);
spin_unlock_irqrestore(&ds_lock, flags);
loops = 1000;
while (ds_var_doorbell == 0) {
if (loops-- < 0)
break;
barrier();
udelay(100);
}
mutex_unlock(&ds_var_mutex);
if (ds_var_doorbell == 0 ||
ds_var_response != DS_VAR_SUCCESS)
printk(KERN_ERR "ds-%llu: var-config [%s:%s] "
"failed, response(%d).\n",
dp->id, var, value,
ds_var_response);
} else {
printk(KERN_ERR PFX "var-config not registered so "
"could not set (%s) variable to (%s).\n",
var, value);
}
}
void ldom_reboot(const char *boot_command)
{
/* Don't bother with any of this if the boot_command
* is empty.
*/
if (boot_command && strlen(boot_command)) {
char full_boot_str[256];
strcpy(full_boot_str, "boot ");
strcpy(full_boot_str + strlen("boot "), boot_command);
ldom_set_var("reboot-command", full_boot_str);
}
sun4v_mach_sir();
}
void ldom_power_off(void)
{
sun4v_mach_exit(0);
}
static void ds_conn_reset(struct ds_info *dp)
{
printk(KERN_ERR "ds-%llu: ds_conn_reset() from %p\n",
dp->id, __builtin_return_address(0));
}
static int register_services(struct ds_info *dp)
{
struct ldc_channel *lp = dp->lp;
int i;
for (i = 0; i < dp->num_ds_states; i++) {
struct {
struct ds_reg_req req;
u8 id_buf[256];
} pbuf;
struct ds_cap_state *cp = &dp->ds_states[i];
int err, msg_len;
u64 new_count;
if (cp->state == CAP_STATE_REGISTERED)
continue;
new_count = sched_clock() & 0xffffffff;
cp->handle = ((u64) i << 32) | new_count;
msg_len = (sizeof(struct ds_reg_req) +
strlen(cp->service_id));
memset(&pbuf, 0, sizeof(pbuf));
pbuf.req.tag.type = DS_REG_REQ;
pbuf.req.tag.len = (msg_len - sizeof(struct ds_msg_tag));
pbuf.req.handle = cp->handle;
pbuf.req.major = 1;
pbuf.req.minor = 0;
strcpy(pbuf.req.svc_id, cp->service_id);
err = __ds_send(lp, &pbuf, msg_len);
if (err > 0)
cp->state = CAP_STATE_REG_SENT;
}
return 0;
}
static int ds_handshake(struct ds_info *dp, struct ds_msg_tag *pkt)
{
if (dp->hs_state == DS_HS_START) {
if (pkt->type != DS_INIT_ACK)
goto conn_reset;
dp->hs_state = DS_HS_DONE;
return register_services(dp);
}
if (dp->hs_state != DS_HS_DONE)
goto conn_reset;
if (pkt->type == DS_REG_ACK) {
struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt;
struct ds_cap_state *cp = find_cap(dp, ap->handle);
if (!cp) {
printk(KERN_ERR "ds-%llu: REG ACK for unknown "
"handle %llx\n", dp->id, ap->handle);
return 0;
}
printk(KERN_INFO "ds-%llu: Registered %s service.\n",
dp->id, cp->service_id);
cp->state = CAP_STATE_REGISTERED;
} else if (pkt->type == DS_REG_NACK) {
struct ds_reg_nack *np = (struct ds_reg_nack *) pkt;
struct ds_cap_state *cp = find_cap(dp, np->handle);
if (!cp) {
printk(KERN_ERR "ds-%llu: REG NACK for "
"unknown handle %llx\n",
dp->id, np->handle);
return 0;
}
cp->state = CAP_STATE_UNKNOWN;
}
return 0;
conn_reset:
ds_conn_reset(dp);
return -ECONNRESET;
}
static void __send_ds_nack(struct ds_info *dp, u64 handle)
{
struct ds_data_nack nack = {
.tag = {
.type = DS_NACK,
.len = (sizeof(struct ds_data_nack) -
sizeof(struct ds_msg_tag)),
},
.handle = handle,
.result = DS_INV_HDL,
};
__ds_send(dp->lp, &nack, sizeof(nack));
}
static LIST_HEAD(ds_work_list);
static DECLARE_WAIT_QUEUE_HEAD(ds_wait);
struct ds_queue_entry {
struct list_head list;
struct ds_info *dp;
int req_len;
int __pad;
u64 req[0];
};
static void process_ds_work(void)
{
struct ds_queue_entry *qp, *tmp;
unsigned long flags;
LIST_HEAD(todo);
spin_lock_irqsave(&ds_lock, flags);
list_splice_init(&ds_work_list, &todo);
spin_unlock_irqrestore(&ds_lock, flags);
list_for_each_entry_safe(qp, tmp, &todo, list) {
struct ds_data *dpkt = (struct ds_data *) qp->req;
struct ds_info *dp = qp->dp;
struct ds_cap_state *cp = find_cap(dp, dpkt->handle);
int req_len = qp->req_len;
if (!cp) {
printk(KERN_ERR "ds-%llu: Data for unknown "
"handle %llu\n",
dp->id, dpkt->handle);
spin_lock_irqsave(&ds_lock, flags);
__send_ds_nack(dp, dpkt->handle);
spin_unlock_irqrestore(&ds_lock, flags);
} else {
cp->data(dp, cp, dpkt, req_len);
}
list_del(&qp->list);
kfree(qp);
}
}
static int ds_thread(void *__unused)
{
DEFINE_WAIT(wait);
while (1) {
prepare_to_wait(&ds_wait, &wait, TASK_INTERRUPTIBLE);
if (list_empty(&ds_work_list))
schedule();
finish_wait(&ds_wait, &wait);
if (kthread_should_stop())
break;
process_ds_work();
}
return 0;
}
static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len)
{
struct ds_data *dpkt = (struct ds_data *) pkt;
struct ds_queue_entry *qp;
qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC);
if (!qp) {
__send_ds_nack(dp, dpkt->handle);
} else {
qp->dp = dp;
memcpy(&qp->req, pkt, len);
list_add_tail(&qp->list, &ds_work_list);
wake_up(&ds_wait);
}
return 0;
}
static void ds_up(struct ds_info *dp)
{
struct ldc_channel *lp = dp->lp;
struct ds_ver_req req;
int err;
req.tag.type = DS_INIT_REQ;
req.tag.len = sizeof(req) - sizeof(struct ds_msg_tag);
req.ver.major = 1;
req.ver.minor = 0;
err = __ds_send(lp, &req, sizeof(req));
if (err > 0)
dp->hs_state = DS_HS_START;
}
static void ds_reset(struct ds_info *dp)
{
int i;
dp->hs_state = 0;
for (i = 0; i < dp->num_ds_states; i++) {
struct ds_cap_state *cp = &dp->ds_states[i];
cp->state = CAP_STATE_UNKNOWN;
}
}
static void ds_event(void *arg, int event)
{
struct ds_info *dp = arg;
struct ldc_channel *lp = dp->lp;
unsigned long flags;
int err;
spin_lock_irqsave(&ds_lock, flags);
if (event == LDC_EVENT_UP) {
ds_up(dp);
spin_unlock_irqrestore(&ds_lock, flags);
return;
}
if (event == LDC_EVENT_RESET) {
ds_reset(dp);
spin_unlock_irqrestore(&ds_lock, flags);
return;
}
if (event != LDC_EVENT_DATA_READY) {
printk(KERN_WARNING "ds-%llu: Unexpected LDC event %d\n",
dp->id, event);
spin_unlock_irqrestore(&ds_lock, flags);
return;
}
err = 0;
while (1) {
struct ds_msg_tag *tag;
err = ldc_read(lp, dp->rcv_buf, sizeof(*tag));
if (unlikely(err < 0)) {
if (err == -ECONNRESET)
ds_conn_reset(dp);
break;
}
if (err == 0)
break;
tag = dp->rcv_buf;
err = ldc_read(lp, tag + 1, tag->len);
if (unlikely(err < 0)) {
if (err == -ECONNRESET)
ds_conn_reset(dp);
break;
}
if (err < tag->len)
break;
if (tag->type < DS_DATA)
err = ds_handshake(dp, dp->rcv_buf);
else
err = ds_data(dp, dp->rcv_buf,
sizeof(*tag) + err);
if (err == -ECONNRESET)
break;
}
spin_unlock_irqrestore(&ds_lock, flags);
}
static int __devinit ds_probe(struct vio_dev *vdev,
const struct vio_device_id *id)
{
static int ds_version_printed;
struct ldc_channel_config ds_cfg = {
.event = ds_event,
.mtu = 4096,
.mode = LDC_MODE_STREAM,
};
struct mdesc_handle *hp;
struct ldc_channel *lp;
struct ds_info *dp;
const u64 *val;
int err, i;
if (ds_version_printed++ == 0)
printk(KERN_INFO "%s", version);
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
err = -ENOMEM;
if (!dp)
goto out_err;
hp = mdesc_grab();
val = mdesc_get_property(hp, vdev->mp, "id", NULL);
if (val)
dp->id = *val;
mdesc_release(hp);
dp->rcv_buf = kzalloc(4096, GFP_KERNEL);
if (!dp->rcv_buf)
goto out_free_dp;
dp->rcv_buf_len = 4096;
dp->ds_states = kzalloc(sizeof(ds_states_template),
GFP_KERNEL);
if (!dp->ds_states)
goto out_free_rcv_buf;
memcpy(dp->ds_states, ds_states_template,
sizeof(ds_states_template));
dp->num_ds_states = ARRAY_SIZE(ds_states_template);
for (i = 0; i < dp->num_ds_states; i++)
dp->ds_states[i].handle = ((u64)i << 32);
ds_cfg.tx_irq = vdev->tx_irq;
ds_cfg.rx_irq = vdev->rx_irq;
lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp);
if (IS_ERR(lp)) {
err = PTR_ERR(lp);
goto out_free_ds_states;
}
dp->lp = lp;
err = ldc_bind(lp, "DS");
if (err)
goto out_free_ldc;
spin_lock_irq(&ds_lock);
dp->next = ds_info_list;
ds_info_list = dp;
spin_unlock_irq(&ds_lock);
return err;
out_free_ldc:
ldc_free(dp->lp);
out_free_ds_states:
kfree(dp->ds_states);
out_free_rcv_buf:
kfree(dp->rcv_buf);
out_free_dp:
kfree(dp);
out_err:
return err;
}
static int ds_remove(struct vio_dev *vdev)
{
return 0;
}
static struct vio_device_id __initdata ds_match[] = {
{
.type = "domain-services-port",
},
{},
};
static struct vio_driver ds_driver = {
.id_table = ds_match,
.probe = ds_probe,
.remove = ds_remove,
.driver = {
.name = "ds",
.owner = THIS_MODULE,
}
};
static int __init ds_init(void)
{
kthread_run(ds_thread, NULL, "kldomd");
return vio_register_driver(&ds_driver);
}
subsys_initcall(ds_init);
| gpl-2.0 |
shakalaca/ASUS_ZenFone_ZD551KL | kernel/drivers/staging/rtl8192u/r8192U_core.c | 2079 | 171413 | /******************************************************************************
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
* Linux device driver for RTL8192U
*
* Based on the r8187 driver, which is:
* Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Jerry chuang <wlanfae@realtek.com>
*/
#ifndef CONFIG_FORCE_HARD_FLOAT
double __floatsidf (int i) { return i; }
unsigned int __fixunsdfsi (double d) { return d; }
double __adddf3(double a, double b) { return a+b; }
double __addsf3(float a, float b) { return a+b; }
double __subdf3(double a, double b) { return a-b; }
double __extendsfdf2(float a) {return a;}
#endif
#undef LOOP_TEST
#undef DUMP_RX
#undef DUMP_TX
#undef DEBUG_TX_DESC2
#undef RX_DONT_PASS_UL
#undef DEBUG_EPROM
#undef DEBUG_RX_VERBOSE
#undef DUMMY_RX
#undef DEBUG_ZERO_RX
#undef DEBUG_RX_SKB
#undef DEBUG_TX_FRAG
#undef DEBUG_RX_FRAG
#undef DEBUG_TX_FILLDESC
#undef DEBUG_TX
#undef DEBUG_IRQ
#undef DEBUG_RX
#undef DEBUG_RXALLOC
#undef DEBUG_REGISTERS
#undef DEBUG_RING
#undef DEBUG_IRQ_TASKLET
#undef DEBUG_TX_ALLOC
#undef DEBUG_TX_DESC
#define CONFIG_RTL8192_IO_MAP
#include <asm/uaccess.h>
#include "r8192U_hw.h"
#include "r8192U.h"
#include "r8190_rtl8256.h" /* RTL8225 Radio frontend */
#include "r8180_93cx6.h" /* Card EEPROM */
#include "r8192U_wx.h"
#include "r819xU_phy.h" //added by WB 4.30.2008
#include "r819xU_phyreg.h"
#include "r819xU_cmdpkt.h"
#include "r8192U_dm.h"
//#include "r8192xU_phyreg.h"
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
// FIXME: check if 2.6.7 is ok
#ifdef CONFIG_RTL8192_PM
#include "r8192_pm.h"
#endif
#include "dot11d.h"
//set here to open your trace code. //WB
u32 rt_global_debug_component = \
// COMP_INIT |
// COMP_DBG |
// COMP_EPROM |
// COMP_PHY |
// COMP_RF |
// COMP_FIRMWARE |
// COMP_CH |
// COMP_POWER_TRACKING |
// COMP_RATE |
// COMP_TXAGC |
// COMP_TRACE |
COMP_DOWN |
// COMP_RECV |
// COMP_SWBW |
COMP_SEC |
// COMP_RESET |
// COMP_SEND |
// COMP_EVENTS |
COMP_ERR ; //always open err flags on
#define TOTAL_CAM_ENTRY 32
#define CAM_CONTENT_COUNT 8
static const struct usb_device_id rtl8192_usb_id_tbl[] = {
/* Realtek */
{USB_DEVICE(0x0bda, 0x8709)},
/* Corega */
{USB_DEVICE(0x07aa, 0x0043)},
/* Belkin */
{USB_DEVICE(0x050d, 0x805E)},
/* Sitecom */
{USB_DEVICE(0x0df6, 0x0031)},
/* EnGenius */
{USB_DEVICE(0x1740, 0x9201)},
/* Dlink */
{USB_DEVICE(0x2001, 0x3301)},
/* Zinwell */
{USB_DEVICE(0x5a57, 0x0290)},
/* LG */
{USB_DEVICE(0x043e, 0x7a01)},
{}
};
MODULE_LICENSE("GPL");
MODULE_VERSION("V 1.1");
MODULE_DEVICE_TABLE(usb, rtl8192_usb_id_tbl);
MODULE_DESCRIPTION("Linux driver for Realtek RTL8192 USB WiFi cards");
static char* ifname = "wlan%d";
static int hwwep = 1; //default use hw. set 0 to use software security
static int channels = 0x3fff;
module_param(ifname, charp, S_IRUGO|S_IWUSR );
//module_param(hwseqnum,int, S_IRUGO|S_IWUSR);
module_param(hwwep,int, S_IRUGO|S_IWUSR);
module_param(channels,int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ifname," Net interface name, wlan%d=default");
//MODULE_PARM_DESC(hwseqnum," Try to use hardware 802.11 header sequence numbers. Zero=default");
MODULE_PARM_DESC(hwwep," Try to use hardware security support. ");
MODULE_PARM_DESC(channels," Channel bitmask for specific locales. NYI");
static int rtl8192_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void rtl8192_usb_disconnect(struct usb_interface *intf);
static struct usb_driver rtl8192_usb_driver = {
.name = RTL819xU_MODULE_NAME, /* Driver name */
.id_table = rtl8192_usb_id_tbl, /* PCI_ID table */
.probe = rtl8192_usb_probe, /* probe fn */
.disconnect = rtl8192_usb_disconnect, /* remove fn */
#ifdef CONFIG_RTL8192_PM
.suspend = rtl8192_suspend, /* PM suspend fn */
.resume = rtl8192_resume, /* PM resume fn */
#else
.suspend = NULL, /* PM suspend fn */
.resume = NULL, /* PM resume fn */
#endif
};
typedef struct _CHANNEL_LIST {
u8 Channel[32];
u8 Len;
}CHANNEL_LIST, *PCHANNEL_LIST;
static CHANNEL_LIST ChannelPlan[] = {
{{1,2,3,4,5,6,7,8,9,10,11,36,40,44,48,52,56,60,64,149,153,157,161,165},24}, //FCC
{{1,2,3,4,5,6,7,8,9,10,11},11}, //IC
{{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, //ETSI
{{1,2,3,4,5,6,7,8,9,10,11,12,13},13}, //Spain. Change to ETSI.
{{1,2,3,4,5,6,7,8,9,10,11,12,13},13}, //France. Change to ETSI.
{{1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,40,44,48,52,56,60,64},22}, //MKK //MKK
{{1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,40,44,48,52,56,60,64},22},//MKK1
{{1,2,3,4,5,6,7,8,9,10,11,12,13},13}, //Israel.
{{1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,40,44,48,52,56,60,64},22}, // For 11a , TELEC
{{1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,40,44,48,52,56,60,64}, 22}, //MIC
{{1,2,3,4,5,6,7,8,9,10,11,12,13,14},14} //For Global Domain. 1-11:active scan, 12-14 passive scan. //+YJ, 080626
};
static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv* priv)
{
int i, max_chan=-1, min_chan=-1;
struct ieee80211_device* ieee = priv->ieee80211;
switch (channel_plan)
{
case COUNTRY_CODE_FCC:
case COUNTRY_CODE_IC:
case COUNTRY_CODE_ETSI:
case COUNTRY_CODE_SPAIN:
case COUNTRY_CODE_FRANCE:
case COUNTRY_CODE_MKK:
case COUNTRY_CODE_MKK1:
case COUNTRY_CODE_ISRAEL:
case COUNTRY_CODE_TELEC:
case COUNTRY_CODE_MIC:
Dot11d_Init(ieee);
ieee->bGlobalDomain = false;
//actually 8225 & 8256 rf chips only support B,G,24N mode
if ((priv->rf_chip == RF_8225) || (priv->rf_chip == RF_8256)) {
min_chan = 1;
max_chan = 14;
}
else {
RT_TRACE(COMP_ERR, "unknown rf chip, can't set channel map in function:%s()\n", __FUNCTION__);
}
if (ChannelPlan[channel_plan].Len != 0) {
// Clear old channel map
memset(GET_DOT11D_INFO(ieee)->channel_map, 0, sizeof(GET_DOT11D_INFO(ieee)->channel_map));
// Set new channel map
for (i=0;i<ChannelPlan[channel_plan].Len;i++) {
if (ChannelPlan[channel_plan].Channel[i] < min_chan || ChannelPlan[channel_plan].Channel[i] > max_chan)
break;
GET_DOT11D_INFO(ieee)->channel_map[ChannelPlan[channel_plan].Channel[i]] = 1;
}
}
break;
case COUNTRY_CODE_GLOBAL_DOMAIN:
GET_DOT11D_INFO(ieee)->bEnabled = 0;//this flag enabled to follow 11d country IE setting, otherwise, it shall follow global domain settings.
Dot11d_Reset(ieee);
ieee->bGlobalDomain = true;
break;
default:
break;
}
}
#define rx_hal_is_cck_rate(_pdrvinfo)\
(_pdrvinfo->RxRate == DESC90_RATE1M ||\
_pdrvinfo->RxRate == DESC90_RATE2M ||\
_pdrvinfo->RxRate == DESC90_RATE5_5M ||\
_pdrvinfo->RxRate == DESC90_RATE11M) &&\
!_pdrvinfo->RxHT\
void CamResetAllEntry(struct net_device *dev)
{
u32 ulcommand = 0;
//2004/02/11 In static WEP, OID_ADD_KEY or OID_ADD_WEP are set before STA associate to AP.
// However, ResetKey is called on OID_802_11_INFRASTRUCTURE_MODE and MlmeAssociateRequest
// In this condition, Cam can not be reset because upper layer will not set this static key again.
//if(Adapter->EncAlgorithm == WEP_Encryption)
// return;
//debug
//DbgPrint("========================================\n");
//DbgPrint(" Call ResetAllEntry \n");
//DbgPrint("========================================\n\n");
ulcommand |= BIT31|BIT30;
write_nic_dword(dev, RWCAM, ulcommand);
}
void write_cam(struct net_device *dev, u8 addr, u32 data)
{
write_nic_dword(dev, WCAMI, data);
write_nic_dword(dev, RWCAM, BIT31|BIT16|(addr&0xff) );
}
u32 read_cam(struct net_device *dev, u8 addr)
{
write_nic_dword(dev, RWCAM, 0x80000000|(addr&0xff) );
return read_nic_dword(dev, 0xa8);
}
void write_nic_byte_E(struct net_device *dev, int indx, u8 data)
{
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
indx|0xfe00, 0, &data, 1, HZ / 2);
if (status < 0)
{
printk("write_nic_byte_E TimeOut! status:%d\n", status);
}
}
u8 read_nic_byte_E(struct net_device *dev, int indx)
{
int status;
u8 data;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
indx|0xfe00, 0, &data, 1, HZ / 2);
if (status < 0)
{
printk("read_nic_byte_E TimeOut! status:%d\n", status);
}
return data;
}
//as 92U has extend page from 4 to 16, so modify functions below.
void write_nic_byte(struct net_device *dev, int indx, u8 data)
{
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
(indx&0xff)|0xff00, (indx>>8)&0x0f, &data, 1, HZ / 2);
if (status < 0)
{
printk("write_nic_byte TimeOut! status:%d\n", status);
}
}
void write_nic_word(struct net_device *dev, int indx, u16 data)
{
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
(indx&0xff)|0xff00, (indx>>8)&0x0f, &data, 2, HZ / 2);
if (status < 0)
{
printk("write_nic_word TimeOut! status:%d\n", status);
}
}
void write_nic_dword(struct net_device *dev, int indx, u32 data)
{
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
(indx&0xff)|0xff00, (indx>>8)&0x0f, &data, 4, HZ / 2);
if (status < 0)
{
printk("write_nic_dword TimeOut! status:%d\n", status);
}
}
u8 read_nic_byte(struct net_device *dev, int indx)
{
u8 data;
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
(indx&0xff)|0xff00, (indx>>8)&0x0f, &data, 1, HZ / 2);
if (status < 0)
{
printk("read_nic_byte TimeOut! status:%d\n", status);
}
return data;
}
u16 read_nic_word(struct net_device *dev, int indx)
{
u16 data;
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
(indx&0xff)|0xff00, (indx>>8)&0x0f,
&data, 2, HZ / 2);
if (status < 0)
printk("read_nic_word TimeOut! status:%d\n", status);
return data;
}
u16 read_nic_word_E(struct net_device *dev, int indx)
{
u16 data;
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
indx|0xfe00, 0, &data, 2, HZ / 2);
if (status < 0)
printk("read_nic_word TimeOut! status:%d\n", status);
return data;
}
u32 read_nic_dword(struct net_device *dev, int indx)
{
u32 data;
int status;
/* int result; */
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
(indx&0xff)|0xff00, (indx>>8)&0x0f,
&data, 4, HZ / 2);
/* if(0 != result) {
* printk(KERN_WARNING "read size of data = %d\, date = %d\n",
* result, data);
* }
*/
if (status < 0)
printk("read_nic_dword TimeOut! status:%d\n", status);
return data;
}
/* u8 read_phy_cck(struct net_device *dev, u8 adr); */
/* u8 read_phy_ofdm(struct net_device *dev, u8 adr); */
/* this might still called in what was the PHY rtl8185/rtl8192 common code
* plans are to possibility turn it again in one common code...
*/
inline void force_pci_posting(struct net_device *dev)
{
}
static struct net_device_stats *rtl8192_stats(struct net_device *dev);
void rtl8192_commit(struct net_device *dev);
/* void rtl8192_restart(struct net_device *dev); */
void rtl8192_restart(struct work_struct *work);
/* void rtl8192_rq_tx_ack(struct work_struct *work); */
void watch_dog_timer_callback(unsigned long data);
/****************************************************************************
* -----------------------------PROCFS STUFF-------------------------
*****************************************************************************
*/
static struct proc_dir_entry *rtl8192_proc;
static int proc_get_stats_ap(struct seq_file *m, void *v)
{
struct net_device *dev = m->private;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
struct ieee80211_network *target;
list_for_each_entry(target, &ieee->network_list, list) {
const char *wpa = "non_WPA";
if (target->wpa_ie_len > 0 || target->rsn_ie_len > 0)
wpa = "WPA";
seq_printf(m, "%s %s\n", target->ssid, wpa);
}
return 0;
}
static int proc_get_registers(struct seq_file *m, void *v)
{
struct net_device *dev = m->private;
int i,n, max = 0xff;
seq_puts(m, "\n####################page 0##################\n ");
for (n=0;n<=max;) {
//printk( "\nD: %2x> ", n);
seq_printf(m, "\nD: %2x > ",n);
for (i=0;i<16 && n<=max;i++,n++)
seq_printf(m, "%2x ",read_nic_byte(dev,0x000|n));
// printk("%2x ",read_nic_byte(dev,n));
}
seq_puts(m, "\n####################page 1##################\n ");
for (n=0;n<=max;) {
//printk( "\nD: %2x> ", n);
seq_printf(m, "\nD: %2x > ",n);
for (i=0;i<16 && n<=max;i++,n++)
seq_printf(m, "%2x ",read_nic_byte(dev,0x100|n));
// printk("%2x ",read_nic_byte(dev,n));
}
seq_puts(m, "\n####################page 3##################\n ");
for (n=0;n<=max;) {
//printk( "\nD: %2x> ", n);
seq_printf(m, "\nD: %2x > ",n);
for(i=0;i<16 && n<=max;i++,n++)
seq_printf(m, "%2x ",read_nic_byte(dev,0x300|n));
// printk("%2x ",read_nic_byte(dev,n));
}
seq_putc(m, '\n');
return 0;
}
static int proc_get_stats_tx(struct seq_file *m, void *v)
{
struct net_device *dev = m->private;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
seq_printf(m,
"TX VI priority ok int: %lu\n"
"TX VI priority error int: %lu\n"
"TX VO priority ok int: %lu\n"
"TX VO priority error int: %lu\n"
"TX BE priority ok int: %lu\n"
"TX BE priority error int: %lu\n"
"TX BK priority ok int: %lu\n"
"TX BK priority error int: %lu\n"
"TX MANAGE priority ok int: %lu\n"
"TX MANAGE priority error int: %lu\n"
"TX BEACON priority ok int: %lu\n"
"TX BEACON priority error int: %lu\n"
// "TX high priority ok int: %lu\n"
// "TX high priority failed error int: %lu\n"
"TX queue resume: %lu\n"
"TX queue stopped?: %d\n"
"TX fifo overflow: %lu\n"
// "TX beacon: %lu\n"
"TX VI queue: %d\n"
"TX VO queue: %d\n"
"TX BE queue: %d\n"
"TX BK queue: %d\n"
// "TX HW queue: %d\n"
"TX VI dropped: %lu\n"
"TX VO dropped: %lu\n"
"TX BE dropped: %lu\n"
"TX BK dropped: %lu\n"
"TX total data packets %lu\n",
// "TX beacon aborted: %lu\n",
priv->stats.txviokint,
priv->stats.txvierr,
priv->stats.txvookint,
priv->stats.txvoerr,
priv->stats.txbeokint,
priv->stats.txbeerr,
priv->stats.txbkokint,
priv->stats.txbkerr,
priv->stats.txmanageokint,
priv->stats.txmanageerr,
priv->stats.txbeaconokint,
priv->stats.txbeaconerr,
// priv->stats.txhpokint,
// priv->stats.txhperr,
priv->stats.txresumed,
netif_queue_stopped(dev),
priv->stats.txoverflow,
// priv->stats.txbeacon,
atomic_read(&(priv->tx_pending[VI_PRIORITY])),
atomic_read(&(priv->tx_pending[VO_PRIORITY])),
atomic_read(&(priv->tx_pending[BE_PRIORITY])),
atomic_read(&(priv->tx_pending[BK_PRIORITY])),
// read_nic_byte(dev, TXFIFOCOUNT),
priv->stats.txvidrop,
priv->stats.txvodrop,
priv->stats.txbedrop,
priv->stats.txbkdrop,
priv->stats.txdatapkt
// priv->stats.txbeaconerr
);
return 0;
}
static int proc_get_stats_rx(struct seq_file *m, void *v)
{
struct net_device *dev = m->private;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
seq_printf(m,
"RX packets: %lu\n"
"RX urb status error: %lu\n"
"RX invalid urb error: %lu\n",
priv->stats.rxoktotal,
priv->stats.rxstaterr,
priv->stats.rxurberr);
return 0;
}
void rtl8192_proc_module_init(void)
{
RT_TRACE(COMP_INIT, "Initializing proc filesystem");
rtl8192_proc = proc_mkdir(RTL819xU_MODULE_NAME, init_net.proc_net);
}
void rtl8192_proc_module_remove(void)
{
remove_proc_entry(RTL819xU_MODULE_NAME, init_net.proc_net);
}
/*
* seq_file wrappers for procfile show routines.
*/
static int rtl8192_proc_open(struct inode *inode, struct file *file)
{
struct net_device *dev = proc_get_parent_data(inode);
int (*show)(struct seq_file *, void *) = PDE_DATA(inode);
return single_open(file, show, dev);
}
static const struct file_operations rtl8192_proc_fops = {
.open = rtl8192_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* Table of proc files we need to create.
*/
struct rtl8192_proc_file {
char name[12];
int (*show)(struct seq_file *, void *);
};
static const struct rtl8192_proc_file rtl8192_proc_files[] = {
{ "stats-rx", &proc_get_stats_rx },
{ "stats-tx", &proc_get_stats_tx },
{ "stats-ap", &proc_get_stats_ap },
{ "registers", &proc_get_registers },
{ "" }
};
void rtl8192_proc_init_one(struct net_device *dev)
{
const struct rtl8192_proc_file *f;
struct proc_dir_entry *dir;
if (rtl8192_proc) {
dir = proc_mkdir_data(dev->name, 0, rtl8192_proc, dev);
if (!dir) {
RT_TRACE(COMP_ERR, "Unable to initialize /proc/net/rtl8192/%s\n",
dev->name);
return;
}
for (f = rtl8192_proc_files; f->name[0]; f++) {
if (!proc_create_data(f->name, S_IFREG | S_IRUGO, dir,
&rtl8192_proc_fops, f->show)) {
RT_TRACE(COMP_ERR, "Unable to initialize "
"/proc/net/rtl8192/%s/%s\n",
dev->name, f->name);
return;
}
}
}
}
void rtl8192_proc_remove_one(struct net_device *dev)
{
remove_proc_subtree(dev->name, rtl8192_proc);
}
/****************************************************************************
-----------------------------MISC STUFF-------------------------
*****************************************************************************/
/* this is only for debugging */
void print_buffer(u32 *buffer, int len)
{
int i;
u8 *buf =(u8*)buffer;
printk("ASCII BUFFER DUMP (len: %x):\n",len);
for(i=0;i<len;i++)
printk("%c",buf[i]);
printk("\nBINARY BUFFER DUMP (len: %x):\n",len);
for(i=0;i<len;i++)
printk("%x",buf[i]);
printk("\n");
}
//short check_nic_enough_desc(struct net_device *dev, priority_t priority)
short check_nic_enough_desc(struct net_device *dev,int queue_index)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int used = atomic_read(&priv->tx_pending[queue_index]);
return (used < MAX_TX_URB);
}
void tx_timeout(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
//rtl8192_commit(dev);
schedule_work(&priv->reset_wq);
//DMESG("TXTIMEOUT");
}
/* this is only for debug */
void dump_eprom(struct net_device *dev)
{
int i;
for(i=0; i<63; i++)
RT_TRACE(COMP_EPROM, "EEPROM addr %x : %x", i, eprom_read(dev,i));
}
/* this is only for debug */
void rtl8192_dump_reg(struct net_device *dev)
{
int i;
int n;
int max=0x1ff;
RT_TRACE(COMP_PHY, "Dumping NIC register map");
for(n=0;n<=max;)
{
printk( "\nD: %2x> ", n);
for(i=0;i<16 && n<=max;i++,n++)
printk("%2x ",read_nic_byte(dev,n));
}
printk("\n");
}
/****************************************************************************
------------------------------HW STUFF---------------------------
*****************************************************************************/
void rtl8192_set_mode(struct net_device *dev,int mode)
{
u8 ecmd;
ecmd=read_nic_byte(dev, EPROM_CMD);
ecmd=ecmd &~ EPROM_CMD_OPERATING_MODE_MASK;
ecmd=ecmd | (mode<<EPROM_CMD_OPERATING_MODE_SHIFT);
ecmd=ecmd &~ (1<<EPROM_CS_SHIFT);
ecmd=ecmd &~ (1<<EPROM_CK_SHIFT);
write_nic_byte(dev, EPROM_CMD, ecmd);
}
void rtl8192_update_msr(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 msr;
msr = read_nic_byte(dev, MSR);
msr &= ~ MSR_LINK_MASK;
/* do not change in link_state != WLAN_LINK_ASSOCIATED.
* msr must be updated if the state is ASSOCIATING.
* this is intentional and make sense for ad-hoc and
* master (see the create BSS/IBSS func)
*/
if (priv->ieee80211->state == IEEE80211_LINKED){
if (priv->ieee80211->iw_mode == IW_MODE_INFRA)
msr |= (MSR_LINK_MANAGED<<MSR_LINK_SHIFT);
else if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
msr |= (MSR_LINK_ADHOC<<MSR_LINK_SHIFT);
else if (priv->ieee80211->iw_mode == IW_MODE_MASTER)
msr |= (MSR_LINK_MASTER<<MSR_LINK_SHIFT);
}else
msr |= (MSR_LINK_NONE<<MSR_LINK_SHIFT);
write_nic_byte(dev, MSR, msr);
}
void rtl8192_set_chan(struct net_device *dev,short ch)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
// u32 tx;
RT_TRACE(COMP_CH, "=====>%s()====ch:%d\n", __FUNCTION__, ch);
priv->chan=ch;
/* this hack should avoid frame TX during channel setting*/
// tx = read_nic_dword(dev,TX_CONF);
// tx &= ~TX_LOOPBACK_MASK;
#ifndef LOOP_TEST
// write_nic_dword(dev,TX_CONF, tx |( TX_LOOPBACK_MAC<<TX_LOOPBACK_SHIFT));
//need to implement rf set channel here WB
if (priv->rf_set_chan)
priv->rf_set_chan(dev,priv->chan);
mdelay(10);
// write_nic_dword(dev,TX_CONF,tx | (TX_LOOPBACK_NONE<<TX_LOOPBACK_SHIFT));
#endif
}
static void rtl8192_rx_isr(struct urb *urb);
//static void rtl8192_rx_isr(struct urb *rx_urb);
u32 get_rxpacket_shiftbytes_819xusb(struct ieee80211_rx_stats *pstats)
{
#ifdef USB_RX_AGGREGATION_SUPPORT
if (pstats->bisrxaggrsubframe)
return (sizeof(rx_desc_819x_usb) + pstats->RxDrvInfoSize
+ pstats->RxBufShift + 8);
else
#endif
return (sizeof(rx_desc_819x_usb) + pstats->RxDrvInfoSize
+ pstats->RxBufShift);
}
static int rtl8192_rx_initiate(struct net_device*dev)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct urb *entry;
struct sk_buff *skb;
struct rtl8192_rx_info *info;
/* nomal packet rx procedure */
while (skb_queue_len(&priv->rx_queue) < MAX_RX_URB) {
skb = __dev_alloc_skb(RX_URB_SIZE, GFP_KERNEL);
if (!skb)
break;
entry = usb_alloc_urb(0, GFP_KERNEL);
if (!entry) {
kfree_skb(skb);
break;
}
// printk("nomal packet IN request!\n");
usb_fill_bulk_urb(entry, priv->udev,
usb_rcvbulkpipe(priv->udev, 3), skb_tail_pointer(skb),
RX_URB_SIZE, rtl8192_rx_isr, skb);
info = (struct rtl8192_rx_info *) skb->cb;
info->urb = entry;
info->dev = dev;
info->out_pipe = 3; //denote rx normal packet queue
skb_queue_tail(&priv->rx_queue, skb);
usb_submit_urb(entry, GFP_KERNEL);
}
/* command packet rx procedure */
while (skb_queue_len(&priv->rx_queue) < MAX_RX_URB + 3) {
// printk("command packet IN request!\n");
skb = __dev_alloc_skb(RX_URB_SIZE ,GFP_KERNEL);
if (!skb)
break;
entry = usb_alloc_urb(0, GFP_KERNEL);
if (!entry) {
kfree_skb(skb);
break;
}
usb_fill_bulk_urb(entry, priv->udev,
usb_rcvbulkpipe(priv->udev, 9), skb_tail_pointer(skb),
RX_URB_SIZE, rtl8192_rx_isr, skb);
info = (struct rtl8192_rx_info *) skb->cb;
info->urb = entry;
info->dev = dev;
info->out_pipe = 9; //denote rx cmd packet queue
skb_queue_tail(&priv->rx_queue, skb);
usb_submit_urb(entry, GFP_KERNEL);
}
return 0;
}
void rtl8192_set_rxconf(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
u32 rxconf;
rxconf=read_nic_dword(dev,RCR);
rxconf = rxconf &~ MAC_FILTER_MASK;
rxconf = rxconf | RCR_AMF;
rxconf = rxconf | RCR_ADF;
rxconf = rxconf | RCR_AB;
rxconf = rxconf | RCR_AM;
//rxconf = rxconf | RCR_ACF;
if (dev->flags & IFF_PROMISC) {DMESG ("NIC in promisc mode");}
if(priv->ieee80211->iw_mode == IW_MODE_MONITOR || \
dev->flags & IFF_PROMISC){
rxconf = rxconf | RCR_AAP;
} /*else if(priv->ieee80211->iw_mode == IW_MODE_MASTER){
rxconf = rxconf | (1<<ACCEPT_ALLMAC_FRAME_SHIFT);
rxconf = rxconf | (1<<RX_CHECK_BSSID_SHIFT);
}*/else{
rxconf = rxconf | RCR_APM;
rxconf = rxconf | RCR_CBSSID;
}
if(priv->ieee80211->iw_mode == IW_MODE_MONITOR){
rxconf = rxconf | RCR_AICV;
rxconf = rxconf | RCR_APWRMGT;
}
if( priv->crcmon == 1 && priv->ieee80211->iw_mode == IW_MODE_MONITOR)
rxconf = rxconf | RCR_ACRC32;
rxconf = rxconf &~ RX_FIFO_THRESHOLD_MASK;
rxconf = rxconf | (RX_FIFO_THRESHOLD_NONE<<RX_FIFO_THRESHOLD_SHIFT);
rxconf = rxconf &~ MAX_RX_DMA_MASK;
rxconf = rxconf | ((u32)7<<RCR_MXDMA_OFFSET);
// rxconf = rxconf | (1<<RX_AUTORESETPHY_SHIFT);
rxconf = rxconf | RCR_ONLYERLPKT;
// rxconf = rxconf &~ RCR_CS_MASK;
// rxconf = rxconf | (1<<RCR_CS_SHIFT);
write_nic_dword(dev, RCR, rxconf);
#ifdef DEBUG_RX
DMESG("rxconf: %x %x",rxconf ,read_nic_dword(dev,RCR));
#endif
}
//wait to be removed
void rtl8192_rx_enable(struct net_device *dev)
{
//u8 cmd;
//struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
rtl8192_rx_initiate(dev);
// rtl8192_set_rxconf(dev);
}
void rtl8192_tx_enable(struct net_device *dev)
{
}
void rtl8192_rtx_disable(struct net_device *dev)
{
u8 cmd;
struct r8192_priv *priv = ieee80211_priv(dev);
struct sk_buff *skb;
struct rtl8192_rx_info *info;
cmd=read_nic_byte(dev,CMDR);
write_nic_byte(dev, CMDR, cmd &~ \
(CR_TE|CR_RE));
force_pci_posting(dev);
mdelay(10);
while ((skb = __skb_dequeue(&priv->rx_queue))) {
info = (struct rtl8192_rx_info *) skb->cb;
if (!info->urb)
continue;
usb_kill_urb(info->urb);
kfree_skb(skb);
}
if (skb_queue_len(&priv->skb_queue)) {
printk(KERN_WARNING "skb_queue not empty\n");
}
skb_queue_purge(&priv->skb_queue);
return;
}
int alloc_tx_beacon_desc_ring(struct net_device *dev, int count)
{
return 0;
}
inline u16 ieeerate2rtlrate(int rate)
{
switch(rate){
case 10:
return 0;
case 20:
return 1;
case 55:
return 2;
case 110:
return 3;
case 60:
return 4;
case 90:
return 5;
case 120:
return 6;
case 180:
return 7;
case 240:
return 8;
case 360:
return 9;
case 480:
return 10;
case 540:
return 11;
default:
return 3;
}
}
static u16 rtl_rate[] = {10,20,55,110,60,90,120,180,240,360,480,540};
inline u16 rtl8192_rate2rate(short rate)
{
if (rate >11) return 0;
return rtl_rate[rate];
}
/* The prototype of rx_isr has changed since one version of Linux Kernel */
static void rtl8192_rx_isr(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
struct net_device *dev = info->dev;
struct r8192_priv *priv = ieee80211_priv(dev);
int out_pipe = info->out_pipe;
int err;
if(!priv->up)
return;
if (unlikely(urb->status)) {
info->urb = NULL;
priv->stats.rxstaterr++;
priv->ieee80211->stats.rx_errors++;
usb_free_urb(urb);
// printk("%s():rx status err\n",__FUNCTION__);
return;
}
skb_unlink(skb, &priv->rx_queue);
skb_put(skb, urb->actual_length);
skb_queue_tail(&priv->skb_queue, skb);
tasklet_schedule(&priv->irq_rx_tasklet);
skb = dev_alloc_skb(RX_URB_SIZE);
if (unlikely(!skb)) {
usb_free_urb(urb);
printk("%s():can,t alloc skb\n",__FUNCTION__);
/* TODO check rx queue length and refill *somewhere* */
return;
}
usb_fill_bulk_urb(urb, priv->udev,
usb_rcvbulkpipe(priv->udev, out_pipe), skb_tail_pointer(skb),
RX_URB_SIZE, rtl8192_rx_isr, skb);
info = (struct rtl8192_rx_info *) skb->cb;
info->urb = urb;
info->dev = dev;
info->out_pipe = out_pipe;
urb->transfer_buffer = skb_tail_pointer(skb);
urb->context = skb;
skb_queue_tail(&priv->rx_queue, skb);
err = usb_submit_urb(urb, GFP_ATOMIC);
if(err && err != EPERM)
printk("can not submit rxurb, err is %x,URB status is %x\n",err,urb->status);
}
u32
rtl819xusb_rx_command_packet(
struct net_device *dev,
struct ieee80211_rx_stats *pstats
)
{
u32 status;
//RT_TRACE(COMP_RECV, DBG_TRACE, ("---> RxCommandPacketHandle819xUsb()\n"));
status = cmpk_message_handle_rx(dev, pstats);
if (status)
{
DMESG("rxcommandpackethandle819xusb: It is a command packet\n");
}
else
{
//RT_TRACE(COMP_RECV, DBG_TRACE, ("RxCommandPacketHandle819xUsb: It is not a command packet\n"));
}
//RT_TRACE(COMP_RECV, DBG_TRACE, ("<--- RxCommandPacketHandle819xUsb()\n"));
return status;
}
void rtl8192_data_hard_stop(struct net_device *dev)
{
//FIXME !!
}
void rtl8192_data_hard_resume(struct net_device *dev)
{
// FIXME !!
}
/* this function TX data frames when the ieee80211 stack requires this.
* It checks also if we need to stop the ieee tx queue, eventually do it
*/
void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, int rate)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
int ret;
unsigned long flags;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
/* shall not be referred by command packet */
assert(queue_index != TXCMD_QUEUE);
spin_lock_irqsave(&priv->tx_lock,flags);
memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
// tcb_desc->RATRIndex = 7;
// tcb_desc->bTxDisableRateFallBack = 1;
// tcb_desc->bTxUseDriverAssingedRate = 1;
tcb_desc->bTxEnableFwCalcDur = 1;
skb_push(skb, priv->ieee80211->tx_headroom);
ret = rtl8192_tx(dev, skb);
//priv->ieee80211->stats.tx_bytes+=(skb->len - priv->ieee80211->tx_headroom);
//priv->ieee80211->stats.tx_packets++;
spin_unlock_irqrestore(&priv->tx_lock,flags);
// return ret;
return;
}
/* This is a rough attempt to TX a frame
* This is called by the ieee 80211 stack to TX management frames.
* If the ring is full packet are dropped (for data frame the queue
* is stopped before this can happen).
*/
int rtl8192_hard_start_xmit(struct sk_buff *skb,struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
int ret;
unsigned long flags;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
spin_lock_irqsave(&priv->tx_lock,flags);
memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
if(queue_index == TXCMD_QUEUE) {
skb_push(skb, USB_HWDESC_HEADER_LEN);
rtl819xU_tx_cmd(dev, skb);
ret = 1;
spin_unlock_irqrestore(&priv->tx_lock,flags);
return ret;
} else {
skb_push(skb, priv->ieee80211->tx_headroom);
ret = rtl8192_tx(dev, skb);
}
spin_unlock_irqrestore(&priv->tx_lock,flags);
return ret;
}
void rtl8192_try_wake_queue(struct net_device *dev, int pri);
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
u16 DrvAggr_PaddingAdd(struct net_device *dev, struct sk_buff *skb)
{
u16 PaddingNum = 256 - ((skb->len + TX_PACKET_DRVAGGR_SUBFRAME_SHIFT_BYTES) % 256);
return (PaddingNum&0xff);
}
u8 MRateToHwRate8190Pci(u8 rate);
u8 QueryIsShort(u8 TxHT, u8 TxRate, cb_desc *tcb_desc);
u8 MapHwQueueToFirmwareQueue(u8 QueueID);
struct sk_buff *DrvAggr_Aggregation(struct net_device *dev, struct ieee80211_drv_agg_txb *pSendList)
{
struct ieee80211_device *ieee = netdev_priv(dev);
struct r8192_priv *priv = ieee80211_priv(dev);
cb_desc *tcb_desc = NULL;
u8 i;
u32 TotalLength;
struct sk_buff *skb;
struct sk_buff *agg_skb;
tx_desc_819x_usb_aggr_subframe *tx_agg_desc = NULL;
tx_fwinfo_819x_usb *tx_fwinfo = NULL;
//
// Local variable initialization.
//
/* first skb initialization */
skb = pSendList->tx_agg_frames[0];
TotalLength = skb->len;
/* Get the total aggregation length including the padding space and
* sub frame header.
*/
for(i = 1; i < pSendList->nr_drv_agg_frames; i++) {
TotalLength += DrvAggr_PaddingAdd(dev, skb);
skb = pSendList->tx_agg_frames[i];
TotalLength += (skb->len + TX_PACKET_DRVAGGR_SUBFRAME_SHIFT_BYTES);
}
/* allocate skb to contain the aggregated packets */
agg_skb = dev_alloc_skb(TotalLength + ieee->tx_headroom);
memset(agg_skb->data, 0, agg_skb->len);
skb_reserve(agg_skb, ieee->tx_headroom);
// RT_DEBUG_DATA(COMP_SEND, skb->cb, sizeof(skb->cb));
/* reserve info for first subframe Tx descriptor to be set in the tx function */
skb = pSendList->tx_agg_frames[0];
tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->drv_agg_enable = 1;
tcb_desc->pkt_size = skb->len;
tcb_desc->DrvAggrNum = pSendList->nr_drv_agg_frames;
printk("DrvAggNum = %d\n", tcb_desc->DrvAggrNum);
// RT_DEBUG_DATA(COMP_SEND, skb->cb, sizeof(skb->cb));
// printk("========>skb->data ======> \n");
// RT_DEBUG_DATA(COMP_SEND, skb->data, skb->len);
memcpy(agg_skb->cb, skb->cb, sizeof(skb->cb));
memcpy(skb_put(agg_skb,skb->len),skb->data,skb->len);
for(i = 1; i < pSendList->nr_drv_agg_frames; i++) {
/* push the next sub frame to be 256 byte aline */
skb_put(agg_skb,DrvAggr_PaddingAdd(dev,skb));
/* Subframe drv Tx descriptor and firmware info setting */
skb = pSendList->tx_agg_frames[i];
tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tx_agg_desc = (tx_desc_819x_usb_aggr_subframe *)agg_skb->tail;
tx_fwinfo = (tx_fwinfo_819x_usb *)(agg_skb->tail + sizeof(tx_desc_819x_usb_aggr_subframe));
memset(tx_fwinfo,0,sizeof(tx_fwinfo_819x_usb));
/* DWORD 0 */
tx_fwinfo->TxHT = (tcb_desc->data_rate&0x80)?1:0;
tx_fwinfo->TxRate = MRateToHwRate8190Pci(tcb_desc->data_rate);
tx_fwinfo->EnableCPUDur = tcb_desc->bTxEnableFwCalcDur;
tx_fwinfo->Short = QueryIsShort(tx_fwinfo->TxHT, tx_fwinfo->TxRate, tcb_desc);
if(tcb_desc->bAMPDUEnable) {//AMPDU enabled
tx_fwinfo->AllowAggregation = 1;
/* DWORD 1 */
tx_fwinfo->RxMF = tcb_desc->ampdu_factor;
tx_fwinfo->RxAMD = tcb_desc->ampdu_density&0x07;//ampdudensity
} else {
tx_fwinfo->AllowAggregation = 0;
/* DWORD 1 */
tx_fwinfo->RxMF = 0;
tx_fwinfo->RxAMD = 0;
}
/* Protection mode related */
tx_fwinfo->RtsEnable = (tcb_desc->bRTSEnable)?1:0;
tx_fwinfo->CtsEnable = (tcb_desc->bCTSEnable)?1:0;
tx_fwinfo->RtsSTBC = (tcb_desc->bRTSSTBC)?1:0;
tx_fwinfo->RtsHT = (tcb_desc->rts_rate&0x80)?1:0;
tx_fwinfo->RtsRate = MRateToHwRate8190Pci((u8)tcb_desc->rts_rate);
tx_fwinfo->RtsSubcarrier = (tx_fwinfo->RtsHT==0)?(tcb_desc->RTSSC):0;
tx_fwinfo->RtsBandwidth = (tx_fwinfo->RtsHT==1)?((tcb_desc->bRTSBW)?1:0):0;
tx_fwinfo->RtsShort = (tx_fwinfo->RtsHT==0)?(tcb_desc->bRTSUseShortPreamble?1:0):\
(tcb_desc->bRTSUseShortGI?1:0);
/* Set Bandwidth and sub-channel settings. */
if(priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20_40)
{
if(tcb_desc->bPacketBW) {
tx_fwinfo->TxBandwidth = 1;
tx_fwinfo->TxSubCarrier = 0; //By SD3's Jerry suggestion, use duplicated mode
} else {
tx_fwinfo->TxBandwidth = 0;
tx_fwinfo->TxSubCarrier = priv->nCur40MhzPrimeSC;
}
} else {
tx_fwinfo->TxBandwidth = 0;
tx_fwinfo->TxSubCarrier = 0;
}
/* Fill Tx descriptor */
memset(tx_agg_desc, 0, sizeof(tx_desc_819x_usb_aggr_subframe));
/* DWORD 0 */
//tx_agg_desc->LINIP = 0;
//tx_agg_desc->CmdInit = 1;
tx_agg_desc->Offset = sizeof(tx_fwinfo_819x_usb) + 8;
/* already raw data, need not to subtract header length */
tx_agg_desc->PktSize = skb->len & 0xffff;
/*DWORD 1*/
tx_agg_desc->SecCAMID= 0;
tx_agg_desc->RATid = tcb_desc->RATRIndex;
{
//MPDUOverhead = 0;
tx_agg_desc->NoEnc = 1;
}
tx_agg_desc->SecType = 0x0;
if (tcb_desc->bHwSec) {
switch (priv->ieee80211->pairwise_key_type)
{
case KEY_TYPE_WEP40:
case KEY_TYPE_WEP104:
tx_agg_desc->SecType = 0x1;
tx_agg_desc->NoEnc = 0;
break;
case KEY_TYPE_TKIP:
tx_agg_desc->SecType = 0x2;
tx_agg_desc->NoEnc = 0;
break;
case KEY_TYPE_CCMP:
tx_agg_desc->SecType = 0x3;
tx_agg_desc->NoEnc = 0;
break;
case KEY_TYPE_NA:
tx_agg_desc->SecType = 0x0;
tx_agg_desc->NoEnc = 1;
break;
}
}
tx_agg_desc->QueueSelect = MapHwQueueToFirmwareQueue(tcb_desc->queue_index);
tx_agg_desc->TxFWInfoSize = sizeof(tx_fwinfo_819x_usb);
tx_agg_desc->DISFB = tcb_desc->bTxDisableRateFallBack;
tx_agg_desc->USERATE = tcb_desc->bTxUseDriverAssingedRate;
tx_agg_desc->OWN = 1;
//DWORD 2
/* According windows driver, it seems that there no need to fill this field */
//tx_agg_desc->TxBufferSize= (u32)(skb->len - USB_HWDESC_HEADER_LEN);
/* to fill next packet */
skb_put(agg_skb,TX_PACKET_DRVAGGR_SUBFRAME_SHIFT_BYTES);
memcpy(skb_put(agg_skb,skb->len),skb->data,skb->len);
}
for(i = 0; i < pSendList->nr_drv_agg_frames; i++) {
dev_kfree_skb_any(pSendList->tx_agg_frames[i]);
}
return agg_skb;
}
/* NOTE:
This function return a list of PTCB which is proper to be aggregate with the input TCB.
If no proper TCB is found to do aggregation, SendList will only contain the input TCB.
*/
u8 DrvAggr_GetAggregatibleList(struct net_device *dev, struct sk_buff *skb,
struct ieee80211_drv_agg_txb *pSendList)
{
struct ieee80211_device *ieee = netdev_priv(dev);
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
u16 nMaxAggrNum = pHTInfo->UsbTxAggrNum;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 QueueID = tcb_desc->queue_index;
do {
pSendList->tx_agg_frames[pSendList->nr_drv_agg_frames++] = skb;
if(pSendList->nr_drv_agg_frames >= nMaxAggrNum) {
break;
}
} while((skb = skb_dequeue(&ieee->skb_drv_aggQ[QueueID])));
RT_TRACE(COMP_AMSDU, "DrvAggr_GetAggregatibleList, nAggrTcbNum = %d \n", pSendList->nr_drv_agg_frames);
return pSendList->nr_drv_agg_frames;
}
#endif
static void rtl8192_tx_isr(struct urb *tx_urb)
{
struct sk_buff *skb = (struct sk_buff*)tx_urb->context;
struct net_device *dev = NULL;
struct r8192_priv *priv = NULL;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
// bool bToSend0Byte;
// u16 BufLen = skb->len;
memcpy(&dev,(struct net_device*)(skb->cb),sizeof(struct net_device*));
priv = ieee80211_priv(dev);
if(tcb_desc->queue_index != TXCMD_QUEUE) {
if(tx_urb->status == 0) {
dev->trans_start = jiffies;
// Act as station mode, destination shall be unicast address.
//priv->ieee80211->stats.tx_bytes+=(skb->len - priv->ieee80211->tx_headroom);
//priv->ieee80211->stats.tx_packets++;
priv->stats.txoktotal++;
priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++;
priv->stats.txbytesunicast += (skb->len - priv->ieee80211->tx_headroom);
} else {
priv->ieee80211->stats.tx_errors++;
//priv->stats.txmanageerr++;
/* TODO */
}
}
/* free skb and tx_urb */
if(skb != NULL) {
dev_kfree_skb_any(skb);
usb_free_urb(tx_urb);
atomic_dec(&priv->tx_pending[queue_index]);
}
{
//
// Handle HW Beacon:
// We had transfer our beacon frame to host controller at this moment.
//
//
// Caution:
// Handling the wait queue of command packets.
// For Tx command packets, we must not do TCB fragment because it is not handled right now.
// We must cut the packets to match the size of TX_CMD_PKT before we send it.
//
/* Handle MPDU in wait queue. */
if(queue_index != BEACON_QUEUE) {
/* Don't send data frame during scanning.*/
if((skb_queue_len(&priv->ieee80211->skb_waitQ[queue_index]) != 0)&&\
(!(priv->ieee80211->queue_stop))) {
if(NULL != (skb = skb_dequeue(&(priv->ieee80211->skb_waitQ[queue_index]))))
priv->ieee80211->softmac_hard_start_xmit(skb, dev);
return; //modified by david to avoid further processing AMSDU
}
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
else if ((skb_queue_len(&priv->ieee80211->skb_drv_aggQ[queue_index])!= 0)&&\
(!(priv->ieee80211->queue_stop))) {
// Tx Driver Aggregation process
/* The driver will aggregation the packets according to the following stats
* 1. check whether there's tx irq available, for it's a completion return
* function, it should contain enough tx irq;
* 2. check packet type;
* 3. initialize sendlist, check whether the to-be send packet no greater than 1
* 4. aggregates the packets, and fill firmware info and tx desc into it, etc.
* 5. check whether the packet could be sent, otherwise just insert into wait head
* */
skb = skb_dequeue(&priv->ieee80211->skb_drv_aggQ[queue_index]);
if(!check_nic_enough_desc(dev, queue_index)) {
skb_queue_head(&(priv->ieee80211->skb_drv_aggQ[queue_index]), skb);
return;
}
{
/*TODO*/
/*
u8* pHeader = skb->data;
if(IsMgntQosData(pHeader) ||
IsMgntQData_Ack(pHeader) ||
IsMgntQData_Poll(pHeader) ||
IsMgntQData_Poll_Ack(pHeader)
)
*/
{
struct ieee80211_drv_agg_txb SendList;
memset(&SendList, 0, sizeof(struct ieee80211_drv_agg_txb));
if(DrvAggr_GetAggregatibleList(dev, skb, &SendList) > 1) {
skb = DrvAggr_Aggregation(dev, &SendList);
}
}
priv->ieee80211->softmac_hard_start_xmit(skb, dev);
}
}
#endif
}
}
}
void rtl8192_beacon_stop(struct net_device *dev)
{
u8 msr, msrm, msr2;
struct r8192_priv *priv = ieee80211_priv(dev);
msr = read_nic_byte(dev, MSR);
msrm = msr & MSR_LINK_MASK;
msr2 = msr & ~MSR_LINK_MASK;
if(NIC_8192U == priv->card_8192) {
usb_kill_urb(priv->rx_urb[MAX_RX_URB]);
}
if ((msrm == (MSR_LINK_ADHOC<<MSR_LINK_SHIFT) ||
(msrm == (MSR_LINK_MASTER<<MSR_LINK_SHIFT)))){
write_nic_byte(dev, MSR, msr2 | MSR_LINK_NONE);
write_nic_byte(dev, MSR, msr);
}
}
void rtl8192_config_rate(struct net_device* dev, u16* rate_config)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_network *net;
u8 i=0, basic_rate = 0;
net = & priv->ieee80211->current_network;
for (i=0; i<net->rates_len; i++)
{
basic_rate = net->rates[i]&0x7f;
switch(basic_rate)
{
case MGN_1M: *rate_config |= RRSR_1M; break;
case MGN_2M: *rate_config |= RRSR_2M; break;
case MGN_5_5M: *rate_config |= RRSR_5_5M; break;
case MGN_11M: *rate_config |= RRSR_11M; break;
case MGN_6M: *rate_config |= RRSR_6M; break;
case MGN_9M: *rate_config |= RRSR_9M; break;
case MGN_12M: *rate_config |= RRSR_12M; break;
case MGN_18M: *rate_config |= RRSR_18M; break;
case MGN_24M: *rate_config |= RRSR_24M; break;
case MGN_36M: *rate_config |= RRSR_36M; break;
case MGN_48M: *rate_config |= RRSR_48M; break;
case MGN_54M: *rate_config |= RRSR_54M; break;
}
}
for (i=0; i<net->rates_ex_len; i++)
{
basic_rate = net->rates_ex[i]&0x7f;
switch(basic_rate)
{
case MGN_1M: *rate_config |= RRSR_1M; break;
case MGN_2M: *rate_config |= RRSR_2M; break;
case MGN_5_5M: *rate_config |= RRSR_5_5M; break;
case MGN_11M: *rate_config |= RRSR_11M; break;
case MGN_6M: *rate_config |= RRSR_6M; break;
case MGN_9M: *rate_config |= RRSR_9M; break;
case MGN_12M: *rate_config |= RRSR_12M; break;
case MGN_18M: *rate_config |= RRSR_18M; break;
case MGN_24M: *rate_config |= RRSR_24M; break;
case MGN_36M: *rate_config |= RRSR_36M; break;
case MGN_48M: *rate_config |= RRSR_48M; break;
case MGN_54M: *rate_config |= RRSR_54M; break;
}
}
}
#define SHORT_SLOT_TIME 9
#define NON_SHORT_SLOT_TIME 20
void rtl8192_update_cap(struct net_device* dev, u16 cap)
{
u32 tmp = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_network *net = &priv->ieee80211->current_network;
priv->short_preamble = cap & WLAN_CAPABILITY_SHORT_PREAMBLE;
tmp = priv->basic_rate;
if (priv->short_preamble)
tmp |= BRSR_AckShortPmb;
write_nic_dword(dev, RRSR, tmp);
if (net->mode & (IEEE_G|IEEE_N_24G))
{
u8 slot_time = 0;
if ((cap & WLAN_CAPABILITY_SHORT_SLOT)&&(!priv->ieee80211->pHTInfo->bCurrentRT2RTLongSlotTime))
{//short slot time
slot_time = SHORT_SLOT_TIME;
}
else //long slot time
slot_time = NON_SHORT_SLOT_TIME;
priv->slot_time = slot_time;
write_nic_byte(dev, SLOT_TIME, slot_time);
}
}
void rtl8192_net_update(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_network *net;
u16 BcnTimeCfg = 0, BcnCW = 6, BcnIFS = 0xf;
u16 rate_config = 0;
net = & priv->ieee80211->current_network;
rtl8192_config_rate(dev, &rate_config);
priv->basic_rate = rate_config &= 0x15f;
write_nic_dword(dev,BSSIDR,((u32*)net->bssid)[0]);
write_nic_word(dev,BSSIDR+4,((u16*)net->bssid)[2]);
//for(i=0;i<ETH_ALEN;i++)
// write_nic_byte(dev,BSSID+i,net->bssid[i]);
rtl8192_update_msr(dev);
// rtl8192_update_cap(dev, net->capability);
if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
{
write_nic_word(dev, ATIMWND, 2);
write_nic_word(dev, BCN_DMATIME, 1023);
write_nic_word(dev, BCN_INTERVAL, net->beacon_interval);
// write_nic_word(dev, BcnIntTime, 100);
write_nic_word(dev, BCN_DRV_EARLY_INT, 1);
write_nic_byte(dev, BCN_ERR_THRESH, 100);
BcnTimeCfg |= (BcnCW<<BCN_TCFG_CW_SHIFT);
// TODO: BcnIFS may required to be changed on ASIC
BcnTimeCfg |= BcnIFS<<BCN_TCFG_IFS;
write_nic_word(dev, BCN_TCFG, BcnTimeCfg);
}
}
//temporary hw beacon is not used any more.
//open it when necessary
void rtl819xusb_beacon_tx(struct net_device *dev,u16 tx_rate)
{
}
inline u8 rtl8192_IsWirelessBMode(u16 rate)
{
if( ((rate <= 110) && (rate != 60) && (rate != 90)) || (rate == 220) )
return 1;
else return 0;
}
u16 N_DBPSOfRate(u16 DataRate);
u16 ComputeTxTime(
u16 FrameLength,
u16 DataRate,
u8 bManagementFrame,
u8 bShortPreamble
)
{
u16 FrameTime;
u16 N_DBPS;
u16 Ceiling;
if( rtl8192_IsWirelessBMode(DataRate) )
{
if( bManagementFrame || !bShortPreamble || DataRate == 10 )
{ // long preamble
FrameTime = (u16)(144+48+(FrameLength*8/(DataRate/10)));
}
else
{ // Short preamble
FrameTime = (u16)(72+24+(FrameLength*8/(DataRate/10)));
}
if( ( FrameLength*8 % (DataRate/10) ) != 0 ) //Get the Ceilling
FrameTime ++;
} else { //802.11g DSSS-OFDM PLCP length field calculation.
N_DBPS = N_DBPSOfRate(DataRate);
Ceiling = (16 + 8*FrameLength + 6) / N_DBPS
+ (((16 + 8*FrameLength + 6) % N_DBPS) ? 1 : 0);
FrameTime = (u16)(16 + 4 + 4*Ceiling + 6);
}
return FrameTime;
}
u16 N_DBPSOfRate(u16 DataRate)
{
u16 N_DBPS = 24;
switch(DataRate)
{
case 60:
N_DBPS = 24;
break;
case 90:
N_DBPS = 36;
break;
case 120:
N_DBPS = 48;
break;
case 180:
N_DBPS = 72;
break;
case 240:
N_DBPS = 96;
break;
case 360:
N_DBPS = 144;
break;
case 480:
N_DBPS = 192;
break;
case 540:
N_DBPS = 216;
break;
default:
break;
}
return N_DBPS;
}
void rtl819xU_cmd_isr(struct urb *tx_cmd_urb, struct pt_regs *regs)
{
usb_free_urb(tx_cmd_urb);
}
unsigned int txqueue2outpipe(struct r8192_priv* priv,unsigned int tx_queue) {
if(tx_queue >= 9)
{
RT_TRACE(COMP_ERR,"%s():Unknown queue ID!!!\n",__FUNCTION__);
return 0x04;
}
return priv->txqueue_to_outpipemap[tx_queue];
}
short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
{
struct r8192_priv *priv = ieee80211_priv(dev);
//u8 *tx;
int status;
struct urb *tx_urb;
//int urb_buf_len;
unsigned int idx_pipe;
tx_desc_cmd_819x_usb *pdesc = (tx_desc_cmd_819x_usb *)skb->data;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
//printk("\n %s::queue_index = %d\n",__FUNCTION__, queue_index);
atomic_inc(&priv->tx_pending[queue_index]);
tx_urb = usb_alloc_urb(0,GFP_ATOMIC);
if(!tx_urb){
dev_kfree_skb(skb);
return -ENOMEM;
}
memset(pdesc, 0, USB_HWDESC_HEADER_LEN);
/* Tx descriptor ought to be set according to the skb->cb */
pdesc->FirstSeg = 1;//bFirstSeg;
pdesc->LastSeg = 1;//bLastSeg;
pdesc->CmdInit = tcb_desc->bCmdOrInit;
pdesc->TxBufferSize = tcb_desc->txbuf_size;
pdesc->OWN = 1;
pdesc->LINIP = tcb_desc->bLastIniPkt;
//----------------------------------------------------------------------------
// Fill up USB_OUT_CONTEXT.
//----------------------------------------------------------------------------
// Get index to out pipe from specified QueueID.
#ifndef USE_ONE_PIPE
idx_pipe = txqueue2outpipe(priv,queue_index);
#else
idx_pipe = 0x04;
#endif
#ifdef JOHN_DUMP_TXDESC
int i;
printk("<Tx descriptor>--rate %x---",rate);
for (i = 0; i < 8; i++)
printk("%8x ", tx[i]);
printk("\n");
#endif
usb_fill_bulk_urb(tx_urb,priv->udev, usb_sndbulkpipe(priv->udev,idx_pipe), \
skb->data, skb->len, rtl8192_tx_isr, skb);
status = usb_submit_urb(tx_urb, GFP_ATOMIC);
if (!status){
return 0;
}else{
DMESGE("Error TX CMD URB, error %d",
status);
return -1;
}
}
/*
* Mapping Software/Hardware descriptor queue id to "Queue Select Field"
* in TxFwInfo data structure
* 2006.10.30 by Emily
*
* \param QUEUEID Software Queue
*/
u8 MapHwQueueToFirmwareQueue(u8 QueueID)
{
u8 QueueSelect = 0x0; //defualt set to
switch(QueueID) {
case BE_QUEUE:
QueueSelect = QSLT_BE; //or QSelect = pTcb->priority;
break;
case BK_QUEUE:
QueueSelect = QSLT_BK; //or QSelect = pTcb->priority;
break;
case VO_QUEUE:
QueueSelect = QSLT_VO; //or QSelect = pTcb->priority;
break;
case VI_QUEUE:
QueueSelect = QSLT_VI; //or QSelect = pTcb->priority;
break;
case MGNT_QUEUE:
QueueSelect = QSLT_MGNT;
break;
case BEACON_QUEUE:
QueueSelect = QSLT_BEACON;
break;
// TODO: 2006.10.30 mark other queue selection until we verify it is OK
// TODO: Remove Assertions
//#if (RTL819X_FPGA_VER & RTL819X_FPGA_GUANGAN_070502)
case TXCMD_QUEUE:
QueueSelect = QSLT_CMD;
break;
//#endif
case HIGH_QUEUE:
QueueSelect = QSLT_HIGH;
break;
default:
RT_TRACE(COMP_ERR, "TransmitTCB(): Impossible Queue Selection: %d \n", QueueID);
break;
}
return QueueSelect;
}
u8 MRateToHwRate8190Pci(u8 rate)
{
u8 ret = DESC90_RATE1M;
switch(rate) {
case MGN_1M: ret = DESC90_RATE1M; break;
case MGN_2M: ret = DESC90_RATE2M; break;
case MGN_5_5M: ret = DESC90_RATE5_5M; break;
case MGN_11M: ret = DESC90_RATE11M; break;
case MGN_6M: ret = DESC90_RATE6M; break;
case MGN_9M: ret = DESC90_RATE9M; break;
case MGN_12M: ret = DESC90_RATE12M; break;
case MGN_18M: ret = DESC90_RATE18M; break;
case MGN_24M: ret = DESC90_RATE24M; break;
case MGN_36M: ret = DESC90_RATE36M; break;
case MGN_48M: ret = DESC90_RATE48M; break;
case MGN_54M: ret = DESC90_RATE54M; break;
// HT rate since here
case MGN_MCS0: ret = DESC90_RATEMCS0; break;
case MGN_MCS1: ret = DESC90_RATEMCS1; break;
case MGN_MCS2: ret = DESC90_RATEMCS2; break;
case MGN_MCS3: ret = DESC90_RATEMCS3; break;
case MGN_MCS4: ret = DESC90_RATEMCS4; break;
case MGN_MCS5: ret = DESC90_RATEMCS5; break;
case MGN_MCS6: ret = DESC90_RATEMCS6; break;
case MGN_MCS7: ret = DESC90_RATEMCS7; break;
case MGN_MCS8: ret = DESC90_RATEMCS8; break;
case MGN_MCS9: ret = DESC90_RATEMCS9; break;
case MGN_MCS10: ret = DESC90_RATEMCS10; break;
case MGN_MCS11: ret = DESC90_RATEMCS11; break;
case MGN_MCS12: ret = DESC90_RATEMCS12; break;
case MGN_MCS13: ret = DESC90_RATEMCS13; break;
case MGN_MCS14: ret = DESC90_RATEMCS14; break;
case MGN_MCS15: ret = DESC90_RATEMCS15; break;
case (0x80|0x20): ret = DESC90_RATEMCS32; break;
default: break;
}
return ret;
}
u8 QueryIsShort(u8 TxHT, u8 TxRate, cb_desc *tcb_desc)
{
u8 tmp_Short;
tmp_Short = (TxHT==1)?((tcb_desc->bUseShortGI)?1:0):((tcb_desc->bUseShortPreamble)?1:0);
if(TxHT==1 && TxRate != DESC90_RATEMCS15)
tmp_Short = 0;
return tmp_Short;
}
static void tx_zero_isr(struct urb *tx_urb)
{
return;
}
/*
* The tx procedure is just as following,
* skb->cb will contain all the following information,
* priority, morefrag, rate, &dev.
* */
short rtl8192_tx(struct net_device *dev, struct sk_buff* skb)
{
struct r8192_priv *priv = ieee80211_priv(dev);
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tx_desc_819x_usb *tx_desc = (tx_desc_819x_usb *)skb->data;
tx_fwinfo_819x_usb *tx_fwinfo = (tx_fwinfo_819x_usb *)(skb->data + USB_HWDESC_HEADER_LEN);
struct usb_device *udev = priv->udev;
int pend;
int status;
struct urb *tx_urb = NULL, *tx_urb_zero = NULL;
//int urb_len;
unsigned int idx_pipe;
// RT_DEBUG_DATA(COMP_SEND, tcb_desc, sizeof(cb_desc));
// printk("=============> %s\n", __FUNCTION__);
pend = atomic_read(&priv->tx_pending[tcb_desc->queue_index]);
/* we are locked here so the two atomic_read and inc are executed
* without interleaves
* !!! For debug purpose
*/
if( pend > MAX_TX_URB){
printk("To discard skb packet!\n");
dev_kfree_skb_any(skb);
return -1;
}
tx_urb = usb_alloc_urb(0,GFP_ATOMIC);
if(!tx_urb){
dev_kfree_skb_any(skb);
return -ENOMEM;
}
/* Fill Tx firmware info */
memset(tx_fwinfo,0,sizeof(tx_fwinfo_819x_usb));
/* DWORD 0 */
tx_fwinfo->TxHT = (tcb_desc->data_rate&0x80)?1:0;
tx_fwinfo->TxRate = MRateToHwRate8190Pci(tcb_desc->data_rate);
tx_fwinfo->EnableCPUDur = tcb_desc->bTxEnableFwCalcDur;
tx_fwinfo->Short = QueryIsShort(tx_fwinfo->TxHT, tx_fwinfo->TxRate, tcb_desc);
if(tcb_desc->bAMPDUEnable) {//AMPDU enabled
tx_fwinfo->AllowAggregation = 1;
/* DWORD 1 */
tx_fwinfo->RxMF = tcb_desc->ampdu_factor;
tx_fwinfo->RxAMD = tcb_desc->ampdu_density&0x07;//ampdudensity
} else {
tx_fwinfo->AllowAggregation = 0;
/* DWORD 1 */
tx_fwinfo->RxMF = 0;
tx_fwinfo->RxAMD = 0;
}
/* Protection mode related */
tx_fwinfo->RtsEnable = (tcb_desc->bRTSEnable)?1:0;
tx_fwinfo->CtsEnable = (tcb_desc->bCTSEnable)?1:0;
tx_fwinfo->RtsSTBC = (tcb_desc->bRTSSTBC)?1:0;
tx_fwinfo->RtsHT = (tcb_desc->rts_rate&0x80)?1:0;
tx_fwinfo->RtsRate = MRateToHwRate8190Pci((u8)tcb_desc->rts_rate);
tx_fwinfo->RtsSubcarrier = (tx_fwinfo->RtsHT==0)?(tcb_desc->RTSSC):0;
tx_fwinfo->RtsBandwidth = (tx_fwinfo->RtsHT==1)?((tcb_desc->bRTSBW)?1:0):0;
tx_fwinfo->RtsShort = (tx_fwinfo->RtsHT==0)?(tcb_desc->bRTSUseShortPreamble?1:0):\
(tcb_desc->bRTSUseShortGI?1:0);
/* Set Bandwidth and sub-channel settings. */
if(priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20_40)
{
if(tcb_desc->bPacketBW) {
tx_fwinfo->TxBandwidth = 1;
tx_fwinfo->TxSubCarrier = 0; //By SD3's Jerry suggestion, use duplicated mode
} else {
tx_fwinfo->TxBandwidth = 0;
tx_fwinfo->TxSubCarrier = priv->nCur40MhzPrimeSC;
}
} else {
tx_fwinfo->TxBandwidth = 0;
tx_fwinfo->TxSubCarrier = 0;
}
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
if (tcb_desc->drv_agg_enable)
{
tx_fwinfo->Tx_INFO_RSVD = (tcb_desc->DrvAggrNum & 0x1f) << 1;
}
#endif
/* Fill Tx descriptor */
memset(tx_desc, 0, sizeof(tx_desc_819x_usb));
/* DWORD 0 */
tx_desc->LINIP = 0;
tx_desc->CmdInit = 1;
tx_desc->Offset = sizeof(tx_fwinfo_819x_usb) + 8;
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
if (tcb_desc->drv_agg_enable) {
tx_desc->PktSize = tcb_desc->pkt_size;
} else
#endif
{
tx_desc->PktSize = (skb->len - TX_PACKET_SHIFT_BYTES) & 0xffff;
}
/*DWORD 1*/
tx_desc->SecCAMID= 0;
tx_desc->RATid = tcb_desc->RATRIndex;
{
//MPDUOverhead = 0;
tx_desc->NoEnc = 1;
}
tx_desc->SecType = 0x0;
if (tcb_desc->bHwSec)
{
switch (priv->ieee80211->pairwise_key_type)
{
case KEY_TYPE_WEP40:
case KEY_TYPE_WEP104:
tx_desc->SecType = 0x1;
tx_desc->NoEnc = 0;
break;
case KEY_TYPE_TKIP:
tx_desc->SecType = 0x2;
tx_desc->NoEnc = 0;
break;
case KEY_TYPE_CCMP:
tx_desc->SecType = 0x3;
tx_desc->NoEnc = 0;
break;
case KEY_TYPE_NA:
tx_desc->SecType = 0x0;
tx_desc->NoEnc = 1;
break;
}
}
tx_desc->QueueSelect = MapHwQueueToFirmwareQueue(tcb_desc->queue_index);
tx_desc->TxFWInfoSize = sizeof(tx_fwinfo_819x_usb);
tx_desc->DISFB = tcb_desc->bTxDisableRateFallBack;
tx_desc->USERATE = tcb_desc->bTxUseDriverAssingedRate;
/* Fill fields that are required to be initialized in all of the descriptors */
//DWORD 0
tx_desc->FirstSeg = 1;
tx_desc->LastSeg = 1;
tx_desc->OWN = 1;
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
if (tcb_desc->drv_agg_enable) {
tx_desc->TxBufferSize = tcb_desc->pkt_size + sizeof(tx_fwinfo_819x_usb);
} else
#endif
{
//DWORD 2
tx_desc->TxBufferSize = (u32)(skb->len - USB_HWDESC_HEADER_LEN);
}
/* Get index to out pipe from specified QueueID */
#ifndef USE_ONE_PIPE
idx_pipe = txqueue2outpipe(priv,tcb_desc->queue_index);
#else
idx_pipe = 0x5;
#endif
//RT_DEBUG_DATA(COMP_SEND,tx_fwinfo,sizeof(tx_fwinfo_819x_usb));
//RT_DEBUG_DATA(COMP_SEND,tx_desc,sizeof(tx_desc_819x_usb));
/* To submit bulk urb */
usb_fill_bulk_urb(tx_urb,udev,
usb_sndbulkpipe(udev,idx_pipe), skb->data,
skb->len, rtl8192_tx_isr, skb);
status = usb_submit_urb(tx_urb, GFP_ATOMIC);
if (!status){
//we need to send 0 byte packet whenever 512N bytes/64N(HIGN SPEED/NORMAL SPEED) bytes packet has been transmitted. Otherwise, it will be halt to wait for another packet. WB. 2008.08.27
bool bSend0Byte = false;
u8 zero = 0;
if(udev->speed == USB_SPEED_HIGH)
{
if (skb->len > 0 && skb->len % 512 == 0)
bSend0Byte = true;
}
else
{
if (skb->len > 0 && skb->len % 64 == 0)
bSend0Byte = true;
}
if (bSend0Byte)
{
tx_urb_zero = usb_alloc_urb(0,GFP_ATOMIC);
if(!tx_urb_zero){
RT_TRACE(COMP_ERR, "can't alloc urb for zero byte\n");
return -ENOMEM;
}
usb_fill_bulk_urb(tx_urb_zero,udev,
usb_sndbulkpipe(udev,idx_pipe), &zero,
0, tx_zero_isr, dev);
status = usb_submit_urb(tx_urb_zero, GFP_ATOMIC);
if (status){
RT_TRACE(COMP_ERR, "Error TX URB for zero byte %d, error %d", atomic_read(&priv->tx_pending[tcb_desc->queue_index]), status);
return -1;
}
}
dev->trans_start = jiffies;
atomic_inc(&priv->tx_pending[tcb_desc->queue_index]);
return 0;
} else {
RT_TRACE(COMP_ERR, "Error TX URB %d, error %d", atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
status);
return -1;
}
}
short rtl8192_usb_initendpoints(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
priv->rx_urb = kmalloc(sizeof(struct urb *) * (MAX_RX_URB+1),
GFP_KERNEL);
if (priv->rx_urb == NULL)
return -ENOMEM;
#ifndef JACKSON_NEW_RX
for(i=0;i<(MAX_RX_URB+1);i++){
priv->rx_urb[i] = usb_alloc_urb(0,GFP_KERNEL);
priv->rx_urb[i]->transfer_buffer = kmalloc(RX_URB_SIZE, GFP_KERNEL);
priv->rx_urb[i]->transfer_buffer_length = RX_URB_SIZE;
}
#endif
#ifdef THOMAS_BEACON
{
long align = 0;
void *oldaddr, *newaddr;
priv->rx_urb[16] = usb_alloc_urb(0, GFP_KERNEL);
priv->oldaddr = kmalloc(16, GFP_KERNEL);
oldaddr = priv->oldaddr;
align = ((long)oldaddr) & 3;
if (align) {
newaddr = oldaddr + 4 - align;
priv->rx_urb[16]->transfer_buffer_length = 16 - 4 + align;
} else {
newaddr = oldaddr;
priv->rx_urb[16]->transfer_buffer_length = 16;
}
priv->rx_urb[16]->transfer_buffer = newaddr;
}
#endif
memset(priv->rx_urb, 0, sizeof(struct urb*) * MAX_RX_URB);
priv->pp_rxskb = kcalloc(MAX_RX_URB, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->pp_rxskb) {
kfree(priv->rx_urb);
priv->pp_rxskb = NULL;
priv->rx_urb = NULL;
DMESGE("Endpoint Alloc Failure");
return -ENOMEM;
}
printk("End of initendpoints\n");
return 0;
}
#ifdef THOMAS_BEACON
void rtl8192_usb_deleteendpoints(struct net_device *dev)
{
int i;
struct r8192_priv *priv = ieee80211_priv(dev);
if(priv->rx_urb){
for(i=0;i<(MAX_RX_URB+1);i++){
usb_kill_urb(priv->rx_urb[i]);
usb_free_urb(priv->rx_urb[i]);
}
kfree(priv->rx_urb);
priv->rx_urb = NULL;
}
kfree(priv->oldaddr);
priv->oldaddr = NULL;
if (priv->pp_rxskb) {
kfree(priv->pp_rxskb);
priv->pp_rxskb = 0;
}
}
#else
void rtl8192_usb_deleteendpoints(struct net_device *dev)
{
int i;
struct r8192_priv *priv = ieee80211_priv(dev);
#ifndef JACKSON_NEW_RX
if(priv->rx_urb){
for(i=0;i<(MAX_RX_URB+1);i++){
usb_kill_urb(priv->rx_urb[i]);
kfree(priv->rx_urb[i]->transfer_buffer);
usb_free_urb(priv->rx_urb[i]);
}
kfree(priv->rx_urb);
priv->rx_urb = NULL;
}
#else
kfree(priv->rx_urb);
priv->rx_urb = NULL;
kfree(priv->oldaddr);
priv->oldaddr = NULL;
if (priv->pp_rxskb) {
kfree(priv->pp_rxskb);
priv->pp_rxskb = 0;
}
#endif
}
#endif
extern void rtl8192_update_ratr_table(struct net_device* dev);
void rtl8192_link_change(struct net_device *dev)
{
// int i;
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_device* ieee = priv->ieee80211;
//write_nic_word(dev, BCN_INTR_ITV, net->beacon_interval);
if (ieee->state == IEEE80211_LINKED)
{
rtl8192_net_update(dev);
rtl8192_update_ratr_table(dev);
//add this as in pure N mode, wep encryption will use software way, but there is no chance to set this as wep will not set group key in wext. WB.2008.07.08
if ((KEY_TYPE_WEP40 == ieee->pairwise_key_type) || (KEY_TYPE_WEP104 == ieee->pairwise_key_type))
EnableHWSecurityConfig8192(dev);
}
/*update timing params*/
// RT_TRACE(COMP_CH, "========>%s(), chan:%d\n", __FUNCTION__, priv->chan);
// rtl8192_set_chan(dev, priv->chan);
if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC)
{
u32 reg = 0;
reg = read_nic_dword(dev, RCR);
if (priv->ieee80211->state == IEEE80211_LINKED)
priv->ReceiveConfig = reg |= RCR_CBSSID;
else
priv->ReceiveConfig = reg &= ~RCR_CBSSID;
write_nic_dword(dev, RCR, reg);
}
// rtl8192_set_rxconf(dev);
}
static struct ieee80211_qos_parameters def_qos_parameters = {
{3,3,3,3},/* cw_min */
{7,7,7,7},/* cw_max */
{2,2,2,2},/* aifs */
{0,0,0,0},/* flags */
{0,0,0,0} /* tx_op_limit */
};
void rtl8192_update_beacon(struct work_struct * work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv, update_beacon_wq.work);
struct net_device *dev = priv->ieee80211->dev;
struct ieee80211_device* ieee = priv->ieee80211;
struct ieee80211_network* net = &ieee->current_network;
if (ieee->pHTInfo->bCurrentHTSupport)
HTUpdateSelfAndPeerSetting(ieee, net);
ieee->pHTInfo->bCurrentRT2RTLongSlotTime = net->bssht.bdRT2RTLongSlotTime;
rtl8192_update_cap(dev, net->capability);
}
/*
* background support to run QoS activate functionality
*/
int WDCAPARA_ADD[] = {EDCAPARA_BE,EDCAPARA_BK,EDCAPARA_VI,EDCAPARA_VO};
void rtl8192_qos_activate(struct work_struct * work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv, qos_activate);
struct net_device *dev = priv->ieee80211->dev;
struct ieee80211_qos_parameters *qos_parameters = &priv->ieee80211->current_network.qos_data.parameters;
u8 mode = priv->ieee80211->current_network.mode;
//u32 size = sizeof(struct ieee80211_qos_parameters);
u8 u1bAIFS;
u32 u4bAcParam;
int i;
if (priv == NULL)
return;
mutex_lock(&priv->mutex);
if(priv->ieee80211->state != IEEE80211_LINKED)
goto success;
RT_TRACE(COMP_QOS,"qos active process with associate response received\n");
/* It better set slot time at first */
/* For we just support b/g mode at present, let the slot time at 9/20 selection */
/* update the ac parameter to related registers */
for(i = 0; i < QOS_QUEUE_NUM; i++) {
//Mode G/A: slotTimeTimer = 9; Mode B: 20
u1bAIFS = qos_parameters->aifs[i] * ((mode&(IEEE_G|IEEE_N_24G)) ?9:20) + aSifsTime;
u4bAcParam = ((((u32)(qos_parameters->tx_op_limit[i]))<< AC_PARAM_TXOP_LIMIT_OFFSET)|
(((u32)(qos_parameters->cw_max[i]))<< AC_PARAM_ECW_MAX_OFFSET)|
(((u32)(qos_parameters->cw_min[i]))<< AC_PARAM_ECW_MIN_OFFSET)|
((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
write_nic_dword(dev, WDCAPARA_ADD[i], u4bAcParam);
//write_nic_dword(dev, WDCAPARA_ADD[i], 0x005e4332);
}
success:
mutex_unlock(&priv->mutex);
}
static int rtl8192_qos_handle_probe_response(struct r8192_priv *priv,
int active_network,
struct ieee80211_network *network)
{
int ret = 0;
u32 size = sizeof(struct ieee80211_qos_parameters);
if(priv->ieee80211->state !=IEEE80211_LINKED)
return ret;
if ((priv->ieee80211->iw_mode != IW_MODE_INFRA))
return ret;
if (network->flags & NETWORK_HAS_QOS_MASK) {
if (active_network &&
(network->flags & NETWORK_HAS_QOS_PARAMETERS))
network->qos_data.active = network->qos_data.supported;
if ((network->qos_data.active == 1) && (active_network == 1) &&
(network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
(network->qos_data.old_param_count !=
network->qos_data.param_count)) {
network->qos_data.old_param_count =
network->qos_data.param_count;
queue_work(priv->priv_wq, &priv->qos_activate);
RT_TRACE (COMP_QOS, "QoS parameters change call "
"qos_activate\n");
}
} else {
memcpy(&priv->ieee80211->current_network.qos_data.parameters,\
&def_qos_parameters, size);
if ((network->qos_data.active == 1) && (active_network == 1)) {
queue_work(priv->priv_wq, &priv->qos_activate);
RT_TRACE(COMP_QOS, "QoS was disabled call qos_activate \n");
}
network->qos_data.active = 0;
network->qos_data.supported = 0;
}
return 0;
}
/* handle and manage frame from beacon and probe response */
static int rtl8192_handle_beacon(struct net_device * dev,
struct ieee80211_beacon * beacon,
struct ieee80211_network * network)
{
struct r8192_priv *priv = ieee80211_priv(dev);
rtl8192_qos_handle_probe_response(priv,1,network);
queue_delayed_work(priv->priv_wq, &priv->update_beacon_wq, 0);
return 0;
}
/*
* handling the beaconing responses. if we get different QoS setting
* off the network from the associated setting, adjust the QoS
* setting
*/
static int rtl8192_qos_association_resp(struct r8192_priv *priv,
struct ieee80211_network *network)
{
int ret = 0;
unsigned long flags;
u32 size = sizeof(struct ieee80211_qos_parameters);
int set_qos_param = 0;
if ((priv == NULL) || (network == NULL))
return ret;
if(priv->ieee80211->state !=IEEE80211_LINKED)
return ret;
if ((priv->ieee80211->iw_mode != IW_MODE_INFRA))
return ret;
spin_lock_irqsave(&priv->ieee80211->lock, flags);
if(network->flags & NETWORK_HAS_QOS_PARAMETERS) {
memcpy(&priv->ieee80211->current_network.qos_data.parameters,\
&network->qos_data.parameters,\
sizeof(struct ieee80211_qos_parameters));
priv->ieee80211->current_network.qos_data.active = 1;
{
set_qos_param = 1;
/* update qos parameter for current network */
priv->ieee80211->current_network.qos_data.old_param_count = \
priv->ieee80211->current_network.qos_data.param_count;
priv->ieee80211->current_network.qos_data.param_count = \
network->qos_data.param_count;
}
} else {
memcpy(&priv->ieee80211->current_network.qos_data.parameters,\
&def_qos_parameters, size);
priv->ieee80211->current_network.qos_data.active = 0;
priv->ieee80211->current_network.qos_data.supported = 0;
set_qos_param = 1;
}
spin_unlock_irqrestore(&priv->ieee80211->lock, flags);
RT_TRACE(COMP_QOS, "%s: network->flags = %d,%d\n",__FUNCTION__,network->flags ,priv->ieee80211->current_network.qos_data.active);
if (set_qos_param == 1)
queue_work(priv->priv_wq, &priv->qos_activate);
return ret;
}
static int rtl8192_handle_assoc_response(struct net_device *dev,
struct ieee80211_assoc_response_frame *resp,
struct ieee80211_network *network)
{
struct r8192_priv *priv = ieee80211_priv(dev);
rtl8192_qos_association_resp(priv, network);
return 0;
}
void rtl8192_update_ratr_table(struct net_device* dev)
// POCTET_STRING posLegacyRate,
// u8* pMcsRate)
// PRT_WLAN_STA pEntry)
{
struct r8192_priv* priv = ieee80211_priv(dev);
struct ieee80211_device* ieee = priv->ieee80211;
u8* pMcsRate = ieee->dot11HTOperationalRateSet;
//struct ieee80211_network *net = &ieee->current_network;
u32 ratr_value = 0;
u8 rate_index = 0;
rtl8192_config_rate(dev, (u16*)(&ratr_value));
ratr_value |= (*(u16*)(pMcsRate)) << 12;
// switch (net->mode)
switch (ieee->mode)
{
case IEEE_A:
ratr_value &= 0x00000FF0;
break;
case IEEE_B:
ratr_value &= 0x0000000F;
break;
case IEEE_G:
ratr_value &= 0x00000FF7;
break;
case IEEE_N_24G:
case IEEE_N_5G:
if (ieee->pHTInfo->PeerMimoPs == 0) //MIMO_PS_STATIC
ratr_value &= 0x0007F007;
else{
if (priv->rf_type == RF_1T2R)
ratr_value &= 0x000FF007;
else
ratr_value &= 0x0F81F007;
}
break;
default:
break;
}
ratr_value &= 0x0FFFFFFF;
if(ieee->pHTInfo->bCurTxBW40MHz && ieee->pHTInfo->bCurShortGI40MHz){
ratr_value |= 0x80000000;
}else if(!ieee->pHTInfo->bCurTxBW40MHz && ieee->pHTInfo->bCurShortGI20MHz){
ratr_value |= 0x80000000;
}
write_nic_dword(dev, RATR0+rate_index*4, ratr_value);
write_nic_byte(dev, UFWP, 1);
}
static u8 ccmp_ie[4] = {0x00,0x50,0xf2,0x04};
static u8 ccmp_rsn_ie[4] = {0x00, 0x0f, 0xac, 0x04};
bool GetNmodeSupportBySecCfg8192(struct net_device*dev)
{
struct r8192_priv* priv = ieee80211_priv(dev);
struct ieee80211_device* ieee = priv->ieee80211;
struct ieee80211_network * network = &ieee->current_network;
int wpa_ie_len= ieee->wpa_ie_len;
struct ieee80211_crypt_data* crypt;
int encrypt;
crypt = ieee->crypt[ieee->tx_keyidx];
//we use connecting AP's capability instead of only security config on our driver to distinguish whether it should use N mode or G mode
encrypt = (network->capability & WLAN_CAPABILITY_PRIVACY) || (ieee->host_encrypt && crypt && crypt->ops && (0 == strcmp(crypt->ops->name,"WEP")));
/* simply judge */
if(encrypt && (wpa_ie_len == 0)) {
/* wep encryption, no N mode setting */
return false;
// } else if((wpa_ie_len != 0)&&(memcmp(&(ieee->wpa_ie[14]),ccmp_ie,4))) {
} else if((wpa_ie_len != 0)) {
/* parse pairwise key type */
//if((pairwisekey = WEP40)||(pairwisekey = WEP104)||(pairwisekey = TKIP))
if (((ieee->wpa_ie[0] == 0xdd) && (!memcmp(&(ieee->wpa_ie[14]),ccmp_ie,4))) || ((ieee->wpa_ie[0] == 0x30) && (!memcmp(&ieee->wpa_ie[10],ccmp_rsn_ie, 4))))
return true;
else
return false;
} else {
return true;
}
return true;
}
bool GetHalfNmodeSupportByAPs819xUsb(struct net_device* dev)
{
bool Reval;
struct r8192_priv* priv = ieee80211_priv(dev);
struct ieee80211_device* ieee = priv->ieee80211;
if(ieee->bHalfWirelessN24GMode == true)
Reval = true;
else
Reval = false;
return Reval;
}
void rtl8192_refresh_supportrate(struct r8192_priv* priv)
{
struct ieee80211_device* ieee = priv->ieee80211;
//we do not consider set support rate for ABG mode, only HT MCS rate is set here.
if (ieee->mode == WIRELESS_MODE_N_24G || ieee->mode == WIRELESS_MODE_N_5G)
{
memcpy(ieee->Regdot11HTOperationalRateSet, ieee->RegHTSuppRateSet, 16);
//RT_DEBUG_DATA(COMP_INIT, ieee->RegHTSuppRateSet, 16);
//RT_DEBUG_DATA(COMP_INIT, ieee->Regdot11HTOperationalRateSet, 16);
}
else
memset(ieee->Regdot11HTOperationalRateSet, 0, 16);
return;
}
u8 rtl8192_getSupportedWireleeMode(struct net_device*dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 ret = 0;
switch(priv->rf_chip)
{
case RF_8225:
case RF_8256:
case RF_PSEUDO_11N:
ret = (WIRELESS_MODE_N_24G|WIRELESS_MODE_G|WIRELESS_MODE_B);
break;
case RF_8258:
ret = (WIRELESS_MODE_A|WIRELESS_MODE_N_5G);
break;
default:
ret = WIRELESS_MODE_B;
break;
}
return ret;
}
void rtl8192_SetWirelessMode(struct net_device* dev, u8 wireless_mode)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 bSupportMode = rtl8192_getSupportedWireleeMode(dev);
if ((wireless_mode == WIRELESS_MODE_AUTO) || ((wireless_mode&bSupportMode)==0))
{
if(bSupportMode & WIRELESS_MODE_N_24G)
{
wireless_mode = WIRELESS_MODE_N_24G;
}
else if(bSupportMode & WIRELESS_MODE_N_5G)
{
wireless_mode = WIRELESS_MODE_N_5G;
}
else if((bSupportMode & WIRELESS_MODE_A))
{
wireless_mode = WIRELESS_MODE_A;
}
else if((bSupportMode & WIRELESS_MODE_G))
{
wireless_mode = WIRELESS_MODE_G;
}
else if((bSupportMode & WIRELESS_MODE_B))
{
wireless_mode = WIRELESS_MODE_B;
}
else{
RT_TRACE(COMP_ERR, "%s(), No valid wireless mode supported, SupportedWirelessMode(%x)!!!\n", __FUNCTION__,bSupportMode);
wireless_mode = WIRELESS_MODE_B;
}
}
#ifdef TO_DO_LIST //// TODO: this function doesn't work well at this time, we should wait for FPGA
ActUpdateChannelAccessSetting( pAdapter, pHalData->CurrentWirelessMode, &pAdapter->MgntInfo.Info8185.ChannelAccessSetting );
#endif
priv->ieee80211->mode = wireless_mode;
if ((wireless_mode == WIRELESS_MODE_N_24G) || (wireless_mode == WIRELESS_MODE_N_5G))
priv->ieee80211->pHTInfo->bEnableHT = 1;
else
priv->ieee80211->pHTInfo->bEnableHT = 0;
RT_TRACE(COMP_INIT, "Current Wireless Mode is %x\n", wireless_mode);
rtl8192_refresh_supportrate(priv);
}
//init priv variables here. only non_zero value should be initialized here.
static void rtl8192_init_priv_variable(struct net_device* dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 i;
priv->card_8192 = NIC_8192U;
priv->chan = 1; //set to channel 1
priv->ieee80211->mode = WIRELESS_MODE_AUTO; //SET AUTO
priv->ieee80211->iw_mode = IW_MODE_INFRA;
priv->ieee80211->ieee_up=0;
priv->retry_rts = DEFAULT_RETRY_RTS;
priv->retry_data = DEFAULT_RETRY_DATA;
priv->ieee80211->rts = DEFAULT_RTS_THRESHOLD;
priv->ieee80211->rate = 110; //11 mbps
priv->ieee80211->short_slot = 1;
priv->promisc = (dev->flags & IFF_PROMISC) ? 1:0;
priv->CckPwEnl = 6;
//for silent reset
priv->IrpPendingCount = 1;
priv->ResetProgress = RESET_TYPE_NORESET;
priv->bForcedSilentReset = 0;
priv->bDisableNormalResetCheck = false;
priv->force_reset = false;
priv->ieee80211->FwRWRF = 0; //we don't use FW read/write RF until stable firmware is available.
priv->ieee80211->current_network.beacon_interval = DEFAULT_BEACONINTERVAL;
priv->ieee80211->iw_mode = IW_MODE_INFRA;
priv->ieee80211->softmac_features = IEEE_SOFTMAC_SCAN |
IEEE_SOFTMAC_ASSOCIATE | IEEE_SOFTMAC_PROBERQ |
IEEE_SOFTMAC_PROBERS | IEEE_SOFTMAC_TX_QUEUE |
IEEE_SOFTMAC_BEACONS;//added by amy 080604 //| //IEEE_SOFTMAC_SINGLE_QUEUE;
priv->ieee80211->active_scan = 1;
priv->ieee80211->modulation = IEEE80211_CCK_MODULATION | IEEE80211_OFDM_MODULATION;
priv->ieee80211->host_encrypt = 1;
priv->ieee80211->host_decrypt = 1;
priv->ieee80211->start_send_beacons = NULL;//rtl819xusb_beacon_tx;//-by amy 080604
priv->ieee80211->stop_send_beacons = NULL;//rtl8192_beacon_stop;//-by amy 080604
priv->ieee80211->softmac_hard_start_xmit = rtl8192_hard_start_xmit;
priv->ieee80211->set_chan = rtl8192_set_chan;
priv->ieee80211->link_change = rtl8192_link_change;
priv->ieee80211->softmac_data_hard_start_xmit = rtl8192_hard_data_xmit;
priv->ieee80211->data_hard_stop = rtl8192_data_hard_stop;
priv->ieee80211->data_hard_resume = rtl8192_data_hard_resume;
priv->ieee80211->init_wmmparam_flag = 0;
priv->ieee80211->fts = DEFAULT_FRAG_THRESHOLD;
priv->ieee80211->check_nic_enough_desc = check_nic_enough_desc;
priv->ieee80211->tx_headroom = TX_PACKET_SHIFT_BYTES;
priv->ieee80211->qos_support = 1;
//added by WB
// priv->ieee80211->SwChnlByTimerHandler = rtl8192_phy_SwChnl;
priv->ieee80211->SetBWModeHandler = rtl8192_SetBWMode;
priv->ieee80211->handle_assoc_response = rtl8192_handle_assoc_response;
priv->ieee80211->handle_beacon = rtl8192_handle_beacon;
//added by david
priv->ieee80211->GetNmodeSupportBySecCfg = GetNmodeSupportBySecCfg8192;
priv->ieee80211->GetHalfNmodeSupportByAPsHandler = GetHalfNmodeSupportByAPs819xUsb;
priv->ieee80211->SetWirelessMode = rtl8192_SetWirelessMode;
//added by amy
priv->ieee80211->InitialGainHandler = InitialGain819xUsb;
priv->card_type = USB;
#ifdef TO_DO_LIST
if(Adapter->bInHctTest)
{
pHalData->ShortRetryLimit = 7;
pHalData->LongRetryLimit = 7;
}
#endif
{
priv->ShortRetryLimit = 0x30;
priv->LongRetryLimit = 0x30;
}
priv->EarlyRxThreshold = 7;
priv->enable_gpio0 = 0;
priv->TransmitConfig =
// TCR_DurProcMode | //for RTL8185B, duration setting by HW
//? TCR_DISReqQsize |
(TCR_MXDMA_2048<<TCR_MXDMA_OFFSET)| // Max DMA Burst Size per Tx DMA Burst, 7: reserved.
(priv->ShortRetryLimit<<TCR_SRL_OFFSET)| // Short retry limit
(priv->LongRetryLimit<<TCR_LRL_OFFSET) | // Long retry limit
(false ? TCR_SAT: 0); // FALSE: HW provides PLCP length and LENGEXT, TRUE: SW provides them
#ifdef TO_DO_LIST
if(Adapter->bInHctTest)
pHalData->ReceiveConfig = pHalData->CSMethod |
RCR_AMF | RCR_ADF | //RCR_AAP | //accept management/data
//guangan200710
RCR_ACF | //accept control frame for SW AP needs PS-poll, 2005.07.07, by rcnjko.
RCR_AB | RCR_AM | RCR_APM | //accept BC/MC/UC
RCR_AICV | RCR_ACRC32 | //accept ICV/CRC error packet
((u32)7<<RCR_MXDMA_OFFSET) | // Max DMA Burst Size per Rx DMA Burst, 7: unlimited.
(pHalData->EarlyRxThreshold<<RCR_FIFO_OFFSET) | // Rx FIFO Threshold, 7: No Rx threshold.
(pHalData->EarlyRxThreshold == 7 ? RCR_OnlyErlPkt:0);
else
#endif
priv->ReceiveConfig =
RCR_AMF | RCR_ADF | //accept management/data
RCR_ACF | //accept control frame for SW AP needs PS-poll, 2005.07.07, by rcnjko.
RCR_AB | RCR_AM | RCR_APM | //accept BC/MC/UC
//RCR_AICV | RCR_ACRC32 | //accept ICV/CRC error packet
((u32)7<<RCR_MXDMA_OFFSET)| // Max DMA Burst Size per Rx DMA Burst, 7: unlimited.
(priv->EarlyRxThreshold<<RX_FIFO_THRESHOLD_SHIFT) | // Rx FIFO Threshold, 7: No Rx threshold.
(priv->EarlyRxThreshold == 7 ? RCR_ONLYERLPKT:0);
priv->AcmControl = 0;
priv->pFirmware = kzalloc(sizeof(rt_firmware), GFP_KERNEL);
/* rx related queue */
skb_queue_head_init(&priv->rx_queue);
skb_queue_head_init(&priv->skb_queue);
/* Tx related queue */
for(i = 0; i < MAX_QUEUE_SIZE; i++) {
skb_queue_head_init(&priv->ieee80211->skb_waitQ [i]);
}
for(i = 0; i < MAX_QUEUE_SIZE; i++) {
skb_queue_head_init(&priv->ieee80211->skb_aggQ [i]);
}
for(i = 0; i < MAX_QUEUE_SIZE; i++) {
skb_queue_head_init(&priv->ieee80211->skb_drv_aggQ [i]);
}
priv->rf_set_chan = rtl8192_phy_SwChnl;
}
//init lock here
static void rtl8192_init_priv_lock(struct r8192_priv* priv)
{
spin_lock_init(&priv->tx_lock);
spin_lock_init(&priv->irq_lock);//added by thomas
//spin_lock_init(&priv->rf_lock);
sema_init(&priv->wx_sem,1);
sema_init(&priv->rf_sem,1);
mutex_init(&priv->mutex);
}
extern void rtl819x_watchdog_wqcallback(struct work_struct *work);
void rtl8192_irq_rx_tasklet(struct r8192_priv *priv);
//init tasklet and wait_queue here. only 2.6 above kernel is considered
#define DRV_NAME "wlan0"
static void rtl8192_init_priv_task(struct net_device* dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
priv->priv_wq = create_workqueue(DRV_NAME);
INIT_WORK(&priv->reset_wq, rtl8192_restart);
//INIT_DELAYED_WORK(&priv->watch_dog_wq, hal_dm_watchdog);
INIT_DELAYED_WORK(&priv->watch_dog_wq, rtl819x_watchdog_wqcallback);
INIT_DELAYED_WORK(&priv->txpower_tracking_wq, dm_txpower_trackingcallback);
// INIT_DELAYED_WORK(&priv->gpio_change_rf_wq, dm_gpio_change_rf_callback);
INIT_DELAYED_WORK(&priv->rfpath_check_wq, dm_rf_pathcheck_workitemcallback);
INIT_DELAYED_WORK(&priv->update_beacon_wq, rtl8192_update_beacon);
INIT_DELAYED_WORK(&priv->initialgain_operate_wq, InitialGainOperateWorkItemCallBack);
//INIT_WORK(&priv->SwChnlWorkItem, rtl8192_SwChnl_WorkItem);
//INIT_WORK(&priv->SetBWModeWorkItem, rtl8192_SetBWModeWorkItem);
INIT_WORK(&priv->qos_activate, rtl8192_qos_activate);
tasklet_init(&priv->irq_rx_tasklet,
(void(*)(unsigned long))rtl8192_irq_rx_tasklet,
(unsigned long)priv);
}
static void rtl8192_get_eeprom_size(struct net_device* dev)
{
u16 curCR = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
RT_TRACE(COMP_EPROM, "===========>%s()\n", __FUNCTION__);
curCR = read_nic_word_E(dev,EPROM_CMD);
RT_TRACE(COMP_EPROM, "read from Reg EPROM_CMD(%x):%x\n", EPROM_CMD, curCR);
//whether need I consider BIT5?
priv->epromtype = (curCR & Cmd9346CR_9356SEL) ? EPROM_93c56 : EPROM_93c46;
RT_TRACE(COMP_EPROM, "<===========%s(), epromtype:%d\n", __FUNCTION__, priv->epromtype);
}
//used to swap endian. as ntohl & htonl are not necessary to swap endian, so use this instead.
static inline u16 endian_swap(u16* data)
{
u16 tmp = *data;
*data = (tmp >> 8) | (tmp << 8);
return *data;
}
static void rtl8192_read_eeprom_info(struct net_device* dev)
{
u16 wEPROM_ID = 0;
u8 bMac_Tmp_Addr[6] = {0x00, 0xe0, 0x4c, 0x00, 0x00, 0x02};
u8 bLoad_From_EEPOM = false;
struct r8192_priv *priv = ieee80211_priv(dev);
u16 tmpValue = 0;
RT_TRACE(COMP_EPROM, "===========>%s()\n", __FUNCTION__);
wEPROM_ID = eprom_read(dev, 0); //first read EEPROM ID out;
RT_TRACE(COMP_EPROM, "EEPROM ID is 0x%x\n", wEPROM_ID);
if (wEPROM_ID != RTL8190_EEPROM_ID)
{
RT_TRACE(COMP_ERR, "EEPROM ID is invalid(is 0x%x(should be 0x%x)\n", wEPROM_ID, RTL8190_EEPROM_ID);
}
else
bLoad_From_EEPOM = true;
if (bLoad_From_EEPOM)
{
tmpValue = eprom_read(dev, (EEPROM_VID>>1));
priv->eeprom_vid = endian_swap(&tmpValue);
priv->eeprom_pid = eprom_read(dev, (EEPROM_PID>>1));
tmpValue = eprom_read(dev, (EEPROM_ChannelPlan>>1));
priv->eeprom_ChannelPlan =((tmpValue&0xff00)>>8);
priv->btxpowerdata_readfromEEPORM = true;
priv->eeprom_CustomerID = eprom_read(dev, (EEPROM_Customer_ID>>1)) >>8;
}
else
{
priv->eeprom_vid = 0;
priv->eeprom_pid = 0;
priv->card_8192_version = VERSION_819xU_B;
priv->eeprom_ChannelPlan = 0;
priv->eeprom_CustomerID = 0;
}
RT_TRACE(COMP_EPROM, "vid:0x%4x, pid:0x%4x, CustomID:0x%2x, ChanPlan:0x%x\n", priv->eeprom_vid, priv->eeprom_pid, priv->eeprom_CustomerID, priv->eeprom_ChannelPlan);
//set channelplan from eeprom
priv->ChannelPlan = priv->eeprom_ChannelPlan;
if (bLoad_From_EEPOM)
{
int i;
for (i=0; i<6; i+=2)
{
u16 tmp = 0;
tmp = eprom_read(dev, (u16)((EEPROM_NODE_ADDRESS_BYTE_0 + i)>>1));
*(u16*)(&dev->dev_addr[i]) = tmp;
}
}
else
{
memcpy(dev->dev_addr, bMac_Tmp_Addr, 6);
//should I set IDR0 here?
}
RT_TRACE(COMP_EPROM, "MAC addr:%pM\n", dev->dev_addr);
priv->rf_type = RTL819X_DEFAULT_RF_TYPE; //default 1T2R
priv->rf_chip = RF_8256;
if (priv->card_8192_version == (u8)VERSION_819xU_A)
{
//read Tx power gain offset of legacy OFDM to HT rate
if (bLoad_From_EEPOM)
priv->EEPROMTxPowerDiff = (eprom_read(dev, (EEPROM_TxPowerDiff>>1))&0xff00) >> 8;
else
priv->EEPROMTxPowerDiff = EEPROM_Default_TxPower;
RT_TRACE(COMP_EPROM, "TxPowerDiff:%d\n", priv->EEPROMTxPowerDiff);
//read ThermalMeter from EEPROM
if (bLoad_From_EEPOM)
priv->EEPROMThermalMeter = (u8)(eprom_read(dev, (EEPROM_ThermalMeter>>1))&0x00ff);
else
priv->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
RT_TRACE(COMP_EPROM, "ThermalMeter:%d\n", priv->EEPROMThermalMeter);
//vivi, for tx power track
priv->TSSI_13dBm = priv->EEPROMThermalMeter *100;
//read antenna tx power offset of B/C/D to A from EEPROM
if (bLoad_From_EEPOM)
priv->EEPROMPwDiff = (eprom_read(dev, (EEPROM_PwDiff>>1))&0x0f00)>>8;
else
priv->EEPROMPwDiff = EEPROM_Default_PwDiff;
RT_TRACE(COMP_EPROM, "TxPwDiff:%d\n", priv->EEPROMPwDiff);
// Read CrystalCap from EEPROM
if (bLoad_From_EEPOM)
priv->EEPROMCrystalCap = (eprom_read(dev, (EEPROM_CrystalCap>>1))&0x0f);
else
priv->EEPROMCrystalCap = EEPROM_Default_CrystalCap;
RT_TRACE(COMP_EPROM, "CrystalCap = %d\n", priv->EEPROMCrystalCap);
//get per-channel Tx power level
if (bLoad_From_EEPOM)
priv->EEPROM_Def_Ver = (eprom_read(dev, (EEPROM_TxPwIndex_Ver>>1))&0xff00)>>8;
else
priv->EEPROM_Def_Ver = 1;
RT_TRACE(COMP_EPROM, "EEPROM_DEF_VER:%d\n", priv->EEPROM_Def_Ver);
if (priv->EEPROM_Def_Ver == 0) //old eeprom definition
{
int i;
if (bLoad_From_EEPOM)
priv->EEPROMTxPowerLevelCCK = (eprom_read(dev, (EEPROM_TxPwIndex_CCK>>1))&0xff) >> 8;
else
priv->EEPROMTxPowerLevelCCK = 0x10;
RT_TRACE(COMP_EPROM, "CCK Tx Power Levl: 0x%02x\n", priv->EEPROMTxPowerLevelCCK);
for (i=0; i<3; i++)
{
if (bLoad_From_EEPOM)
{
tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_OFDM_24G+i)>>1);
if (((EEPROM_TxPwIndex_OFDM_24G+i) % 2) == 0)
tmpValue = tmpValue & 0x00ff;
else
tmpValue = (tmpValue & 0xff00) >> 8;
}
else
tmpValue = 0x10;
priv->EEPROMTxPowerLevelOFDM24G[i] = (u8) tmpValue;
RT_TRACE(COMP_EPROM, "OFDM 2.4G Tx Power Level, Index %d = 0x%02x\n", i, priv->EEPROMTxPowerLevelCCK);
}
}//end if EEPROM_DEF_VER == 0
else if (priv->EEPROM_Def_Ver == 1)
{
if (bLoad_From_EEPOM)
{
tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_CCK_V1>>1));
tmpValue = (tmpValue & 0xff00) >> 8;
}
else
tmpValue = 0x10;
priv->EEPROMTxPowerLevelCCK_V1[0] = (u8)tmpValue;
if (bLoad_From_EEPOM)
tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_CCK_V1 + 2)>>1);
else
tmpValue = 0x1010;
*((u16*)(&priv->EEPROMTxPowerLevelCCK_V1[1])) = tmpValue;
if (bLoad_From_EEPOM)
tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_OFDM_24G_V1>>1));
else
tmpValue = 0x1010;
*((u16*)(&priv->EEPROMTxPowerLevelOFDM24G[0])) = tmpValue;
if (bLoad_From_EEPOM)
tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_OFDM_24G_V1+2)>>1);
else
tmpValue = 0x10;
priv->EEPROMTxPowerLevelOFDM24G[2] = (u8)tmpValue;
}//endif EEPROM_Def_Ver == 1
//update HAL variables
//
{
int i;
for (i=0; i<14; i++)
{
if (i<=3)
priv->TxPowerLevelOFDM24G[i] = priv->EEPROMTxPowerLevelOFDM24G[0];
else if (i>=4 && i<=9)
priv->TxPowerLevelOFDM24G[i] = priv->EEPROMTxPowerLevelOFDM24G[1];
else
priv->TxPowerLevelOFDM24G[i] = priv->EEPROMTxPowerLevelOFDM24G[2];
}
for (i=0; i<14; i++)
{
if (priv->EEPROM_Def_Ver == 0)
{
if (i<=3)
priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelOFDM24G[0] + (priv->EEPROMTxPowerLevelCCK - priv->EEPROMTxPowerLevelOFDM24G[1]);
else if (i>=4 && i<=9)
priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK;
else
priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelOFDM24G[2] + (priv->EEPROMTxPowerLevelCCK - priv->EEPROMTxPowerLevelOFDM24G[1]);
}
else if (priv->EEPROM_Def_Ver == 1)
{
if (i<=3)
priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK_V1[0];
else if (i>=4 && i<=9)
priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK_V1[1];
else
priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK_V1[2];
}
}
}//end update HAL variables
priv->TxPowerDiff = priv->EEPROMPwDiff;
// Antenna B gain offset to antenna A, bit0~3
priv->AntennaTxPwDiff[0] = (priv->EEPROMTxPowerDiff & 0xf);
// Antenna C gain offset to antenna A, bit4~7
priv->AntennaTxPwDiff[1] = ((priv->EEPROMTxPowerDiff & 0xf0)>>4);
// CrystalCap, bit12~15
priv->CrystalCap = priv->EEPROMCrystalCap;
// ThermalMeter, bit0~3 for RFIC1, bit4~7 for RFIC2
// 92U does not enable TX power tracking.
priv->ThermalMeter[0] = priv->EEPROMThermalMeter;
}//end if VersionID == VERSION_819xU_A
//added by vivi, for dlink led, 20080416
switch(priv->eeprom_CustomerID)
{
case EEPROM_CID_RUNTOP:
priv->CustomerID = RT_CID_819x_RUNTOP;
break;
case EEPROM_CID_DLINK:
priv->CustomerID = RT_CID_DLINK;
break;
default:
priv->CustomerID = RT_CID_DEFAULT;
break;
}
switch(priv->CustomerID)
{
case RT_CID_819x_RUNTOP:
priv->LedStrategy = SW_LED_MODE2;
break;
case RT_CID_DLINK:
priv->LedStrategy = SW_LED_MODE4;
break;
default:
priv->LedStrategy = SW_LED_MODE0;
break;
}
if(priv->rf_type == RF_1T2R)
{
RT_TRACE(COMP_EPROM, "\n1T2R config\n");
}
else
{
RT_TRACE(COMP_EPROM, "\n2T4R config\n");
}
// 2008/01/16 MH We can only know RF type in the function. So we have to init
// DIG RATR table again.
init_rate_adaptive(dev);
//we need init DIG RATR table here again.
RT_TRACE(COMP_EPROM, "<===========%s()\n", __FUNCTION__);
return;
}
short rtl8192_get_channel_map(struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
if(priv->ChannelPlan > COUNTRY_CODE_GLOBAL_DOMAIN){
printk("rtl8180_init:Error channel plan! Set to default.\n");
priv->ChannelPlan= 0;
}
RT_TRACE(COMP_INIT, "Channel plan is %d\n",priv->ChannelPlan);
rtl819x_set_channel_map(priv->ChannelPlan, priv);
return 0;
}
short rtl8192_init(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
memset(&(priv->stats),0,sizeof(struct Stats));
memset(priv->txqueue_to_outpipemap,0,9);
#ifdef PIPE12
{
int i=0;
u8 queuetopipe[]={3,2,1,0,4,8,7,6,5};
memcpy(priv->txqueue_to_outpipemap,queuetopipe,9);
/* for(i=0;i<9;i++)
printk("%d ",priv->txqueue_to_outpipemap[i]);
printk("\n");*/
}
#else
{
u8 queuetopipe[]={3,2,1,0,4,4,0,4,4};
memcpy(priv->txqueue_to_outpipemap,queuetopipe,9);
/* for(i=0;i<9;i++)
printk("%d ",priv->txqueue_to_outpipemap[i]);
printk("\n");*/
}
#endif
rtl8192_init_priv_variable(dev);
rtl8192_init_priv_lock(priv);
rtl8192_init_priv_task(dev);
rtl8192_get_eeprom_size(dev);
rtl8192_read_eeprom_info(dev);
rtl8192_get_channel_map(dev);
init_hal_dm(dev);
init_timer(&priv->watch_dog_timer);
priv->watch_dog_timer.data = (unsigned long)dev;
priv->watch_dog_timer.function = watch_dog_timer_callback;
if(rtl8192_usb_initendpoints(dev)!=0){
DMESG("Endopoints initialization failed");
return -ENOMEM;
}
//rtl8192_adapter_start(dev);
#ifdef DEBUG_EPROM
dump_eprom(dev);
#endif
return 0;
}
/******************************************************************************
*function: This function actually only set RRSR, RATR and BW_OPMODE registers
* not to do all the hw config as its name says
* input: net_device dev
* output: none
* return: none
* notice: This part need to modified according to the rate set we filtered
* ****************************************************************************/
void rtl8192_hwconfig(struct net_device* dev)
{
u32 regRATR = 0, regRRSR = 0;
u8 regBwOpMode = 0, regTmp = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
// Set RRSR, RATR, and BW_OPMODE registers
//
switch(priv->ieee80211->mode)
{
case WIRELESS_MODE_B:
regBwOpMode = BW_OPMODE_20MHZ;
regRATR = RATE_ALL_CCK;
regRRSR = RATE_ALL_CCK;
break;
case WIRELESS_MODE_A:
regBwOpMode = BW_OPMODE_5G |BW_OPMODE_20MHZ;
regRATR = RATE_ALL_OFDM_AG;
regRRSR = RATE_ALL_OFDM_AG;
break;
case WIRELESS_MODE_G:
regBwOpMode = BW_OPMODE_20MHZ;
regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
break;
case WIRELESS_MODE_AUTO:
#ifdef TO_DO_LIST
if (Adapter->bInHctTest)
{
regBwOpMode = BW_OPMODE_20MHZ;
regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
}
else
#endif
{
regBwOpMode = BW_OPMODE_20MHZ;
regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
}
break;
case WIRELESS_MODE_N_24G:
// It support CCK rate by default.
// CCK rate will be filtered out only when associated AP does not support it.
regBwOpMode = BW_OPMODE_20MHZ;
regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
break;
case WIRELESS_MODE_N_5G:
regBwOpMode = BW_OPMODE_5G;
regRATR = RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
regRRSR = RATE_ALL_OFDM_AG;
break;
}
write_nic_byte(dev, BW_OPMODE, regBwOpMode);
{
u32 ratr_value = 0;
ratr_value = regRATR;
if (priv->rf_type == RF_1T2R)
{
ratr_value &= ~(RATE_ALL_OFDM_2SS);
}
write_nic_dword(dev, RATR0, ratr_value);
write_nic_byte(dev, UFWP, 1);
}
regTmp = read_nic_byte(dev, 0x313);
regRRSR = ((regTmp) << 24) | (regRRSR & 0x00ffffff);
write_nic_dword(dev, RRSR, regRRSR);
//
// Set Retry Limit here
//
write_nic_word(dev, RETRY_LIMIT,
priv->ShortRetryLimit << RETRY_LIMIT_SHORT_SHIFT | \
priv->LongRetryLimit << RETRY_LIMIT_LONG_SHIFT);
// Set Contention Window here
// Set Tx AGC
// Set Tx Antenna including Feedback control
// Set Auto Rate fallback control
}
//InitializeAdapter and PhyCfg
bool rtl8192_adapter_start(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 dwRegRead = 0;
bool init_status = true;
RT_TRACE(COMP_INIT, "====>%s()\n", __FUNCTION__);
priv->Rf_Mode = RF_OP_By_SW_3wire;
//for ASIC power on sequence
write_nic_byte_E(dev, 0x5f, 0x80);
mdelay(50);
write_nic_byte_E(dev, 0x5f, 0xf0);
write_nic_byte_E(dev, 0x5d, 0x00);
write_nic_byte_E(dev, 0x5e, 0x80);
write_nic_byte(dev, 0x17, 0x37);
mdelay(10);
//#ifdef TO_DO_LIST
priv->pFirmware->firmware_status = FW_STATUS_0_INIT;
//config CPUReset Register
//Firmware Reset or not?
dwRegRead = read_nic_dword(dev, CPU_GEN);
if (priv->pFirmware->firmware_status == FW_STATUS_0_INIT)
dwRegRead |= CPU_GEN_SYSTEM_RESET; //do nothing here?
else if (priv->pFirmware->firmware_status == FW_STATUS_5_READY)
dwRegRead |= CPU_GEN_FIRMWARE_RESET;
else
RT_TRACE(COMP_ERR, "ERROR in %s(): undefined firmware state(%d)\n", __FUNCTION__, priv->pFirmware->firmware_status);
write_nic_dword(dev, CPU_GEN, dwRegRead);
//mdelay(30);
//config BB.
rtl8192_BBConfig(dev);
//Loopback mode or not
priv->LoopbackMode = RTL819xU_NO_LOOPBACK;
// priv->LoopbackMode = RTL819xU_MAC_LOOPBACK;
dwRegRead = read_nic_dword(dev, CPU_GEN);
if (priv->LoopbackMode == RTL819xU_NO_LOOPBACK)
dwRegRead = ((dwRegRead & CPU_GEN_NO_LOOPBACK_MSK) | CPU_GEN_NO_LOOPBACK_SET);
else if (priv->LoopbackMode == RTL819xU_MAC_LOOPBACK)
dwRegRead |= CPU_CCK_LOOPBACK;
else
RT_TRACE(COMP_ERR, "Serious error in %s(): wrong loopback mode setting(%d)\n", __FUNCTION__, priv->LoopbackMode);
write_nic_dword(dev, CPU_GEN, dwRegRead);
//after reset cpu, we need wait for a seconds to write in register.
udelay(500);
//xiong add for new bitfile:usb suspend reset pin set to 1. //do we need?
write_nic_byte_E(dev, 0x5f, (read_nic_byte_E(dev, 0x5f)|0x20));
//Set Hardware
rtl8192_hwconfig(dev);
//turn on Tx/Rx
write_nic_byte(dev, CMDR, CR_RE|CR_TE);
//set IDR0 here
write_nic_dword(dev, MAC0, ((u32*)dev->dev_addr)[0]);
write_nic_word(dev, MAC4, ((u16*)(dev->dev_addr + 4))[0]);
//set RCR
write_nic_dword(dev, RCR, priv->ReceiveConfig);
//Initialize Number of Reserved Pages in Firmware Queue
write_nic_dword(dev, RQPN1, NUM_OF_PAGE_IN_FW_QUEUE_BK << RSVD_FW_QUEUE_PAGE_BK_SHIFT |\
NUM_OF_PAGE_IN_FW_QUEUE_BE << RSVD_FW_QUEUE_PAGE_BE_SHIFT | \
NUM_OF_PAGE_IN_FW_QUEUE_VI << RSVD_FW_QUEUE_PAGE_VI_SHIFT | \
NUM_OF_PAGE_IN_FW_QUEUE_VO <<RSVD_FW_QUEUE_PAGE_VO_SHIFT);
write_nic_dword(dev, RQPN2, NUM_OF_PAGE_IN_FW_QUEUE_MGNT << RSVD_FW_QUEUE_PAGE_MGNT_SHIFT |\
NUM_OF_PAGE_IN_FW_QUEUE_CMD << RSVD_FW_QUEUE_PAGE_CMD_SHIFT);
write_nic_dword(dev, RQPN3, APPLIED_RESERVED_QUEUE_IN_FW| \
NUM_OF_PAGE_IN_FW_QUEUE_BCN<<RSVD_FW_QUEUE_PAGE_BCN_SHIFT
// | NUM_OF_PAGE_IN_FW_QUEUE_PUB<<RSVD_FW_QUEUE_PAGE_PUB_SHIFT
);
write_nic_dword(dev, RATR0+4*7, (RATE_ALL_OFDM_AG | RATE_ALL_CCK));
//Set AckTimeout
// TODO: (it value is only for FPGA version). need to be changed!!2006.12.18, by Emily
write_nic_byte(dev, ACK_TIMEOUT, 0x30);
// RT_TRACE(COMP_INIT, "%s():priv->ResetProgress is %d\n", __FUNCTION__,priv->ResetProgress);
if(priv->ResetProgress == RESET_TYPE_NORESET)
rtl8192_SetWirelessMode(dev, priv->ieee80211->mode);
if(priv->ResetProgress == RESET_TYPE_NORESET){
CamResetAllEntry(dev);
{
u8 SECR_value = 0x0;
SECR_value |= SCR_TxEncEnable;
SECR_value |= SCR_RxDecEnable;
SECR_value |= SCR_NoSKMC;
write_nic_byte(dev, SECR, SECR_value);
}
}
//Beacon related
write_nic_word(dev, ATIMWND, 2);
write_nic_word(dev, BCN_INTERVAL, 100);
{
#define DEFAULT_EDCA 0x005e4332
int i;
for (i=0; i<QOS_QUEUE_NUM; i++)
write_nic_dword(dev, WDCAPARA_ADD[i], DEFAULT_EDCA);
}
#ifdef USB_RX_AGGREGATION_SUPPORT
//3 For usb rx firmware aggregation control
if(priv->ResetProgress == RESET_TYPE_NORESET)
{
u32 ulValue;
PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
ulValue = (pHTInfo->UsbRxFwAggrEn<<24) | (pHTInfo->UsbRxFwAggrPageNum<<16) |
(pHTInfo->UsbRxFwAggrPacketNum<<8) | (pHTInfo->UsbRxFwAggrTimeout);
/*
* If usb rx firmware aggregation is enabled,
* when anyone of three threshold conditions above is reached,
* firmware will send aggregated packet to driver.
*/
write_nic_dword(dev, 0x1a8, ulValue);
priv->bCurrentRxAggrEnable = true;
}
#endif
rtl8192_phy_configmac(dev);
if (priv->card_8192_version == (u8) VERSION_819xU_A)
{
rtl8192_phy_getTxPower(dev);
rtl8192_phy_setTxPower(dev, priv->chan);
}
//Firmware download
init_status = init_firmware(dev);
if(!init_status)
{
RT_TRACE(COMP_ERR,"ERR!!! %s(): Firmware download is failed\n", __FUNCTION__);
return init_status;
}
RT_TRACE(COMP_INIT, "%s():after firmware download\n", __FUNCTION__);
//
#ifdef TO_DO_LIST
if(Adapter->ResetProgress == RESET_TYPE_NORESET)
{
if(pMgntInfo->RegRfOff == TRUE)
{ // User disable RF via registry.
RT_TRACE((COMP_INIT|COMP_RF), DBG_LOUD, ("InitializeAdapter819xUsb(): Turn off RF for RegRfOff ----------\n"));
MgntActSet_RF_State(Adapter, eRfOff, RF_CHANGE_BY_SW);
// Those actions will be discard in MgntActSet_RF_State because of the same state
for(eRFPath = 0; eRFPath <pHalData->NumTotalRFPath; eRFPath++)
PHY_SetRFReg(Adapter, (RF90_RADIO_PATH_E)eRFPath, 0x4, 0xC00, 0x0);
}
else if(pMgntInfo->RfOffReason > RF_CHANGE_BY_PS)
{ // H/W or S/W RF OFF before sleep.
RT_TRACE((COMP_INIT|COMP_RF), DBG_LOUD, ("InitializeAdapter819xUsb(): Turn off RF for RfOffReason(%d) ----------\n", pMgntInfo->RfOffReason));
MgntActSet_RF_State(Adapter, eRfOff, pMgntInfo->RfOffReason);
}
else
{
pHalData->eRFPowerState = eRfOn;
pMgntInfo->RfOffReason = 0;
RT_TRACE((COMP_INIT|COMP_RF), DBG_LOUD, ("InitializeAdapter819xUsb(): RF is on ----------\n"));
}
}
else
{
if(pHalData->eRFPowerState == eRfOff)
{
MgntActSet_RF_State(Adapter, eRfOff, pMgntInfo->RfOffReason);
// Those actions will be discard in MgntActSet_RF_State because of the same state
for(eRFPath = 0; eRFPath <pHalData->NumTotalRFPath; eRFPath++)
PHY_SetRFReg(Adapter, (RF90_RADIO_PATH_E)eRFPath, 0x4, 0xC00, 0x0);
}
}
#endif
//config RF.
if(priv->ResetProgress == RESET_TYPE_NORESET){
rtl8192_phy_RFConfig(dev);
RT_TRACE(COMP_INIT, "%s():after phy RF config\n", __FUNCTION__);
}
if(priv->ieee80211->FwRWRF)
// We can force firmware to do RF-R/W
priv->Rf_Mode = RF_OP_By_FW;
else
priv->Rf_Mode = RF_OP_By_SW_3wire;
rtl8192_phy_updateInitGain(dev);
/*--set CCK and OFDM Block "ON"--*/
rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn, 0x1);
rtl8192_setBBreg(dev, rFPGA0_RFMOD, bOFDMEn, 0x1);
if(priv->ResetProgress == RESET_TYPE_NORESET)
{
//if D or C cut
u8 tmpvalue = read_nic_byte(dev, 0x301);
if(tmpvalue ==0x03)
{
priv->bDcut = TRUE;
RT_TRACE(COMP_POWER_TRACKING, "D-cut\n");
}
else
{
priv->bDcut = FALSE;
RT_TRACE(COMP_POWER_TRACKING, "C-cut\n");
}
dm_initialize_txpower_tracking(dev);
if(priv->bDcut == TRUE)
{
u32 i, TempCCk;
u32 tmpRegA= rtl8192_QueryBBReg(dev,rOFDM0_XATxIQImbalance,bMaskDWord);
// u32 tmpRegC= rtl8192_QueryBBReg(dev,rOFDM0_XCTxIQImbalance,bMaskDWord);
for(i = 0; i<TxBBGainTableLength; i++)
{
if(tmpRegA == priv->txbbgain_table[i].txbbgain_value)
{
priv->rfa_txpowertrackingindex= (u8)i;
priv->rfa_txpowertrackingindex_real= (u8)i;
priv->rfa_txpowertracking_default= priv->rfa_txpowertrackingindex;
break;
}
}
TempCCk = rtl8192_QueryBBReg(dev, rCCK0_TxFilter1, bMaskByte2);
for(i=0 ; i<CCKTxBBGainTableLength ; i++)
{
if(TempCCk == priv->cck_txbbgain_table[i].ccktxbb_valuearray[0])
{
priv->cck_present_attentuation_20Mdefault=(u8) i;
break;
}
}
priv->cck_present_attentuation_40Mdefault= 0;
priv->cck_present_attentuation_difference= 0;
priv->cck_present_attentuation = priv->cck_present_attentuation_20Mdefault;
// pMgntInfo->bTXPowerTracking = FALSE;//TEMPLY DISABLE
}
}
write_nic_byte(dev, 0x87, 0x0);
return init_status;
}
/* this configures registers for beacon tx and enables it via
* rtl8192_beacon_tx_enable(). rtl8192_beacon_tx_disable() might
* be used to stop beacon transmission
*/
/***************************************************************************
-------------------------------NET STUFF---------------------------
***************************************************************************/
static struct net_device_stats *rtl8192_stats(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
return &priv->ieee80211->stats;
}
bool
HalTxCheckStuck819xUsb(
struct net_device *dev
)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u16 RegTxCounter = read_nic_word(dev, 0x128);
bool bStuck = FALSE;
RT_TRACE(COMP_RESET,"%s():RegTxCounter is %d,TxCounter is %d\n",__FUNCTION__,RegTxCounter,priv->TxCounter);
if(priv->TxCounter==RegTxCounter)
bStuck = TRUE;
priv->TxCounter = RegTxCounter;
return bStuck;
}
/*
* <Assumption: RT_TX_SPINLOCK is acquired.>
* First added: 2006.11.19 by emily
*/
RESET_TYPE
TxCheckStuck(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 QueueID;
// PRT_TCB pTcb;
// u8 ResetThreshold;
bool bCheckFwTxCnt = false;
//unsigned long flags;
//
// Decide such threshold according to current power save mode
//
// RT_TRACE(COMP_RESET, " ==> TxCheckStuck()\n");
// PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK);
// spin_lock_irqsave(&priv->ieee80211->lock,flags);
for (QueueID = 0; QueueID<=BEACON_QUEUE;QueueID ++)
{
if(QueueID == TXCMD_QUEUE)
continue;
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
if((skb_queue_len(&priv->ieee80211->skb_waitQ[QueueID]) == 0) && (skb_queue_len(&priv->ieee80211->skb_aggQ[QueueID]) == 0) && (skb_queue_len(&priv->ieee80211->skb_drv_aggQ[QueueID]) == 0))
#else
if((skb_queue_len(&priv->ieee80211->skb_waitQ[QueueID]) == 0) && (skb_queue_len(&priv->ieee80211->skb_aggQ[QueueID]) == 0))
#endif
continue;
bCheckFwTxCnt = true;
}
// PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK);
// spin_unlock_irqrestore(&priv->ieee80211->lock,flags);
// RT_TRACE(COMP_RESET,"bCheckFwTxCnt is %d\n",bCheckFwTxCnt);
if(bCheckFwTxCnt)
{
if(HalTxCheckStuck819xUsb(dev))
{
RT_TRACE(COMP_RESET, "TxCheckStuck(): Fw indicates no Tx condition! \n");
return RESET_TYPE_SILENT;
}
}
return RESET_TYPE_NORESET;
}
bool
HalRxCheckStuck819xUsb(struct net_device *dev)
{
u16 RegRxCounter = read_nic_word(dev, 0x130);
struct r8192_priv *priv = ieee80211_priv(dev);
bool bStuck = FALSE;
static u8 rx_chk_cnt;
RT_TRACE(COMP_RESET,"%s(): RegRxCounter is %d,RxCounter is %d\n",__FUNCTION__,RegRxCounter,priv->RxCounter);
// If rssi is small, we should check rx for long time because of bad rx.
// or maybe it will continuous silent reset every 2 seconds.
rx_chk_cnt++;
if(priv->undecorated_smoothed_pwdb >= (RateAdaptiveTH_High+5))
{
rx_chk_cnt = 0; //high rssi, check rx stuck right now.
}
else if(priv->undecorated_smoothed_pwdb < (RateAdaptiveTH_High+5) &&
((priv->CurrentChannelBW!=HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb>=RateAdaptiveTH_Low_40M) ||
(priv->CurrentChannelBW==HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb>=RateAdaptiveTH_Low_20M)) )
{
if(rx_chk_cnt < 2)
{
return bStuck;
}
else
{
rx_chk_cnt = 0;
}
}
else if(((priv->CurrentChannelBW!=HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb<RateAdaptiveTH_Low_40M) ||
(priv->CurrentChannelBW==HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb<RateAdaptiveTH_Low_20M)) &&
priv->undecorated_smoothed_pwdb >= VeryLowRSSI)
{
if(rx_chk_cnt < 4)
{
//DbgPrint("RSSI < %d && RSSI >= %d, no check this time \n", RateAdaptiveTH_Low, VeryLowRSSI);
return bStuck;
}
else
{
rx_chk_cnt = 0;
//DbgPrint("RSSI < %d && RSSI >= %d, check this time \n", RateAdaptiveTH_Low, VeryLowRSSI);
}
}
else
{
if(rx_chk_cnt < 8)
{
//DbgPrint("RSSI <= %d, no check this time \n", VeryLowRSSI);
return bStuck;
}
else
{
rx_chk_cnt = 0;
//DbgPrint("RSSI <= %d, check this time \n", VeryLowRSSI);
}
}
if(priv->RxCounter==RegRxCounter)
bStuck = TRUE;
priv->RxCounter = RegRxCounter;
return bStuck;
}
RESET_TYPE
RxCheckStuck(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
//int i;
bool bRxCheck = FALSE;
// RT_TRACE(COMP_RESET," ==> RxCheckStuck()\n");
//PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK);
if(priv->IrpPendingCount > 1)
bRxCheck = TRUE;
//PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK);
// RT_TRACE(COMP_RESET,"bRxCheck is %d \n",bRxCheck);
if(bRxCheck)
{
if(HalRxCheckStuck819xUsb(dev))
{
RT_TRACE(COMP_RESET, "RxStuck Condition\n");
return RESET_TYPE_SILENT;
}
}
return RESET_TYPE_NORESET;
}
/**
* This function is called by Checkforhang to check whether we should ask OS to reset driver
*
* \param pAdapter The adapter context for this miniport
*
* Note:NIC with USB interface sholud not call this function because we cannot scan descriptor
* to judge whether there is tx stuck.
* Note: This function may be required to be rewrite for Vista OS.
* <<<Assumption: Tx spinlock has been acquired >>>
*
* 8185 and 8185b does not implement this function. This is added by Emily at 2006.11.24
*/
RESET_TYPE
rtl819x_ifcheck_resetornot(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
RESET_TYPE TxResetType = RESET_TYPE_NORESET;
RESET_TYPE RxResetType = RESET_TYPE_NORESET;
RT_RF_POWER_STATE rfState;
rfState = priv->ieee80211->eRFPowerState;
TxResetType = TxCheckStuck(dev);
if( rfState != eRfOff ||
/*ADAPTER_TEST_STATUS_FLAG(Adapter, ADAPTER_STATUS_FW_DOWNLOAD_FAILURE)) &&*/
(priv->ieee80211->iw_mode != IW_MODE_ADHOC))
{
// If driver is in the status of firmware download failure , driver skips RF initialization and RF is
// in turned off state. Driver should check whether Rx stuck and do silent reset. And
// if driver is in firmware download failure status, driver should initialize RF in the following
// silent reset procedure Emily, 2008.01.21
// Driver should not check RX stuck in IBSS mode because it is required to
// set Check BSSID in order to send beacon, however, if check BSSID is
// set, STA cannot hear any packet at all. Emily, 2008.04.12
RxResetType = RxCheckStuck(dev);
}
if(TxResetType==RESET_TYPE_NORMAL || RxResetType==RESET_TYPE_NORMAL)
return RESET_TYPE_NORMAL;
else if(TxResetType==RESET_TYPE_SILENT || RxResetType==RESET_TYPE_SILENT){
RT_TRACE(COMP_RESET,"%s():silent reset\n",__FUNCTION__);
return RESET_TYPE_SILENT;
}
else
return RESET_TYPE_NORESET;
}
void rtl8192_cancel_deferred_work(struct r8192_priv* priv);
int _rtl8192_up(struct net_device *dev);
int rtl8192_close(struct net_device *dev);
void
CamRestoreAllEntry( struct net_device *dev)
{
u8 EntryId = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
u8* MacAddr = priv->ieee80211->current_network.bssid;
static u8 CAM_CONST_ADDR[4][6] = {
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x03}};
static u8 CAM_CONST_BROAD[] =
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
RT_TRACE(COMP_SEC, "CamRestoreAllEntry: \n");
if ((priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP40)||
(priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP104))
{
for(EntryId=0; EntryId<4; EntryId++)
{
{
MacAddr = CAM_CONST_ADDR[EntryId];
setKey(dev,
EntryId ,
EntryId,
priv->ieee80211->pairwise_key_type,
MacAddr,
0,
NULL);
}
}
}
else if(priv->ieee80211->pairwise_key_type == KEY_TYPE_TKIP)
{
{
if(priv->ieee80211->iw_mode == IW_MODE_ADHOC)
setKey(dev,
4,
0,
priv->ieee80211->pairwise_key_type,
(u8*)dev->dev_addr,
0,
NULL);
else
setKey(dev,
4,
0,
priv->ieee80211->pairwise_key_type,
MacAddr,
0,
NULL);
}
}
else if(priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP)
{
{
if(priv->ieee80211->iw_mode == IW_MODE_ADHOC)
setKey(dev,
4,
0,
priv->ieee80211->pairwise_key_type,
(u8*)dev->dev_addr,
0,
NULL);
else
setKey(dev,
4,
0,
priv->ieee80211->pairwise_key_type,
MacAddr,
0,
NULL);
}
}
if(priv->ieee80211->group_key_type == KEY_TYPE_TKIP)
{
MacAddr = CAM_CONST_BROAD;
for(EntryId=1 ; EntryId<4 ; EntryId++)
{
{
setKey(dev,
EntryId,
EntryId,
priv->ieee80211->group_key_type,
MacAddr,
0,
NULL);
}
}
if(priv->ieee80211->iw_mode == IW_MODE_ADHOC)
setKey(dev,
0,
0,
priv->ieee80211->group_key_type,
CAM_CONST_ADDR[0],
0,
NULL);
}
else if(priv->ieee80211->group_key_type == KEY_TYPE_CCMP)
{
MacAddr = CAM_CONST_BROAD;
for(EntryId=1; EntryId<4 ; EntryId++)
{
{
setKey(dev,
EntryId ,
EntryId,
priv->ieee80211->group_key_type,
MacAddr,
0,
NULL);
}
}
if(priv->ieee80211->iw_mode == IW_MODE_ADHOC)
setKey(dev,
0 ,
0,
priv->ieee80211->group_key_type,
CAM_CONST_ADDR[0],
0,
NULL);
}
}
//////////////////////////////////////////////////////////////
// This function is used to fix Tx/Rx stop bug temporarily.
// This function will do "system reset" to NIC when Tx or Rx is stuck.
// The method checking Tx/Rx stuck of this function is supported by FW,
// which reports Tx and Rx counter to register 0x128 and 0x130.
//////////////////////////////////////////////////////////////
void
rtl819x_ifsilentreset(struct net_device *dev)
{
//OCTET_STRING asocpdu;
struct r8192_priv *priv = ieee80211_priv(dev);
u8 reset_times = 0;
int reset_status = 0;
struct ieee80211_device *ieee = priv->ieee80211;
// 2007.07.20. If we need to check CCK stop, please uncomment this line.
//bStuck = Adapter->HalFunc.CheckHWStopHandler(Adapter);
if(priv->ResetProgress==RESET_TYPE_NORESET)
{
RESET_START:
RT_TRACE(COMP_RESET,"=========>Reset progress!! \n");
// Set the variable for reset.
priv->ResetProgress = RESET_TYPE_SILENT;
// rtl8192_close(dev);
down(&priv->wx_sem);
if(priv->up == 0)
{
RT_TRACE(COMP_ERR,"%s():the driver is not up! return\n",__FUNCTION__);
up(&priv->wx_sem);
return ;
}
priv->up = 0;
RT_TRACE(COMP_RESET,"%s():======>start to down the driver\n",__FUNCTION__);
// if(!netif_queue_stopped(dev))
// netif_stop_queue(dev);
rtl8192_rtx_disable(dev);
rtl8192_cancel_deferred_work(priv);
deinit_hal_dm(dev);
del_timer_sync(&priv->watch_dog_timer);
ieee->sync_scan_hurryup = 1;
if(ieee->state == IEEE80211_LINKED)
{
down(&ieee->wx_sem);
printk("ieee->state is IEEE80211_LINKED\n");
ieee80211_stop_send_beacons(priv->ieee80211);
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
ieee80211_stop_scan(ieee);
netif_carrier_off(dev);
up(&ieee->wx_sem);
}
else{
printk("ieee->state is NOT LINKED\n");
ieee80211_softmac_stop_protocol(priv->ieee80211); }
up(&priv->wx_sem);
RT_TRACE(COMP_RESET,"%s():<==========down process is finished\n",__FUNCTION__);
//rtl8192_irq_disable(dev);
RT_TRACE(COMP_RESET,"%s():===========>start up the driver\n",__FUNCTION__);
reset_status = _rtl8192_up(dev);
RT_TRACE(COMP_RESET,"%s():<===========up process is finished\n",__FUNCTION__);
if(reset_status == -EAGAIN)
{
if(reset_times < 3)
{
reset_times++;
goto RESET_START;
}
else
{
RT_TRACE(COMP_ERR," ERR!!! %s(): Reset Failed!!\n", __FUNCTION__);
}
}
ieee->is_silent_reset = 1;
EnableHWSecurityConfig8192(dev);
if(ieee->state == IEEE80211_LINKED && ieee->iw_mode == IW_MODE_INFRA)
{
ieee->set_chan(ieee->dev, ieee->current_network.channel);
queue_work(ieee->wq, &ieee->associate_complete_wq);
}
else if(ieee->state == IEEE80211_LINKED && ieee->iw_mode == IW_MODE_ADHOC)
{
ieee->set_chan(ieee->dev, ieee->current_network.channel);
ieee->link_change(ieee->dev);
// notify_wx_assoc_event(ieee);
ieee80211_start_send_beacons(ieee);
if (ieee->data_hard_resume)
ieee->data_hard_resume(ieee->dev);
netif_carrier_on(ieee->dev);
}
CamRestoreAllEntry(dev);
priv->ResetProgress = RESET_TYPE_NORESET;
priv->reset_count++;
priv->bForcedSilentReset =false;
priv->bResetInProgress = false;
// For test --> force write UFWP.
write_nic_byte(dev, UFWP, 1);
RT_TRACE(COMP_RESET, "Reset finished!! ====>[%d]\n", priv->reset_count);
}
}
void CAM_read_entry(
struct net_device *dev,
u32 iIndex
)
{
u32 target_command=0;
u32 target_content=0;
u8 entry_i=0;
u32 ulStatus;
s32 i=100;
// printk("=======>start read CAM\n");
for(entry_i=0;entry_i<CAM_CONTENT_COUNT;entry_i++)
{
// polling bit, and No Write enable, and address
target_command= entry_i+CAM_CONTENT_COUNT*iIndex;
target_command= target_command | BIT31;
//Check polling bit is clear
// mdelay(1);
while((i--)>=0)
{
ulStatus = read_nic_dword(dev, RWCAM);
if(ulStatus & BIT31){
continue;
}
else{
break;
}
}
write_nic_dword(dev, RWCAM, target_command);
RT_TRACE(COMP_SEC,"CAM_read_entry(): WRITE A0: %x \n",target_command);
// printk("CAM_read_entry(): WRITE A0: %lx \n",target_command);
target_content = read_nic_dword(dev, RCAMO);
RT_TRACE(COMP_SEC, "CAM_read_entry(): WRITE A8: %x \n",target_content);
// printk("CAM_read_entry(): WRITE A8: %lx \n",target_content);
}
printk("\n");
}
void rtl819x_update_rxcounts(
struct r8192_priv *priv,
u32* TotalRxBcnNum,
u32* TotalRxDataNum
)
{
u16 SlotIndex;
u8 i;
*TotalRxBcnNum = 0;
*TotalRxDataNum = 0;
SlotIndex = (priv->ieee80211->LinkDetectInfo.SlotIndex++)%(priv->ieee80211->LinkDetectInfo.SlotNum);
priv->ieee80211->LinkDetectInfo.RxBcnNum[SlotIndex] = priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod;
priv->ieee80211->LinkDetectInfo.RxDataNum[SlotIndex] = priv->ieee80211->LinkDetectInfo.NumRecvDataInPeriod;
for( i=0; i<priv->ieee80211->LinkDetectInfo.SlotNum; i++ ){
*TotalRxBcnNum += priv->ieee80211->LinkDetectInfo.RxBcnNum[i];
*TotalRxDataNum += priv->ieee80211->LinkDetectInfo.RxDataNum[i];
}
}
extern void rtl819x_watchdog_wqcallback(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work,struct delayed_work,work);
struct r8192_priv *priv = container_of(dwork,struct r8192_priv,watch_dog_wq);
struct net_device *dev = priv->ieee80211->dev;
struct ieee80211_device* ieee = priv->ieee80211;
RESET_TYPE ResetType = RESET_TYPE_NORESET;
static u8 check_reset_cnt;
bool bBusyTraffic = false;
if(!priv->up)
return;
hal_dm_watchdog(dev);
{//to get busy traffic condition
if(ieee->state == IEEE80211_LINKED)
{
if( ieee->LinkDetectInfo.NumRxOkInPeriod> 666 ||
ieee->LinkDetectInfo.NumTxOkInPeriod> 666 ) {
bBusyTraffic = true;
}
ieee->LinkDetectInfo.NumRxOkInPeriod = 0;
ieee->LinkDetectInfo.NumTxOkInPeriod = 0;
ieee->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
}
}
//added by amy for AP roaming
{
if(priv->ieee80211->state == IEEE80211_LINKED && priv->ieee80211->iw_mode == IW_MODE_INFRA)
{
u32 TotalRxBcnNum = 0;
u32 TotalRxDataNum = 0;
rtl819x_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum);
if((TotalRxBcnNum+TotalRxDataNum) == 0)
{
#ifdef TODO
if(rfState == eRfOff)
RT_TRACE(COMP_ERR,"========>%s()\n",__FUNCTION__);
#endif
printk("===>%s(): AP is power off,connect another one\n",__FUNCTION__);
// Dot11d_Reset(dev);
priv->ieee80211->state = IEEE80211_ASSOCIATING;
notify_wx_assoc_event(priv->ieee80211);
RemovePeerTS(priv->ieee80211,priv->ieee80211->current_network.bssid);
priv->ieee80211->link_change(dev);
queue_work(priv->ieee80211->wq, &priv->ieee80211->associate_procedure_wq);
}
}
priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod=0;
priv->ieee80211->LinkDetectInfo.NumRecvDataInPeriod=0;
}
// CAM_read_entry(dev,4);
//check if reset the driver
if(check_reset_cnt++ >= 3)
{
ResetType = rtl819x_ifcheck_resetornot(dev);
check_reset_cnt = 3;
//DbgPrint("Start to check silent reset\n");
}
// RT_TRACE(COMP_RESET,"%s():priv->force_reset is %d,priv->ResetProgress is %d, priv->bForcedSilentReset is %d,priv->bDisableNormalResetCheck is %d,ResetType is %d\n",__FUNCTION__,priv->force_reset,priv->ResetProgress,priv->bForcedSilentReset,priv->bDisableNormalResetCheck,ResetType);
if( (priv->force_reset) || (priv->ResetProgress==RESET_TYPE_NORESET &&
(priv->bForcedSilentReset ||
(!priv->bDisableNormalResetCheck && ResetType==RESET_TYPE_SILENT)))) // This is control by OID set in Pomelo
{
RT_TRACE(COMP_RESET,"%s():priv->force_reset is %d,priv->ResetProgress is %d, priv->bForcedSilentReset is %d,priv->bDisableNormalResetCheck is %d,ResetType is %d\n",__FUNCTION__,priv->force_reset,priv->ResetProgress,priv->bForcedSilentReset,priv->bDisableNormalResetCheck,ResetType);
rtl819x_ifsilentreset(dev);
}
priv->force_reset = false;
priv->bForcedSilentReset = false;
priv->bResetInProgress = false;
RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
}
void watch_dog_timer_callback(unsigned long data)
{
struct r8192_priv *priv = ieee80211_priv((struct net_device *) data);
//printk("===============>watch_dog timer\n");
queue_delayed_work(priv->priv_wq,&priv->watch_dog_wq, 0);
mod_timer(&priv->watch_dog_timer, jiffies + MSECS(IEEE80211_WATCH_DOG_TIME));
}
int _rtl8192_up(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
//int i;
int init_status = 0;
priv->up=1;
priv->ieee80211->ieee_up=1;
RT_TRACE(COMP_INIT, "Bringing up iface");
init_status = rtl8192_adapter_start(dev);
if(!init_status)
{
RT_TRACE(COMP_ERR,"ERR!!! %s(): initialization failed!\n", __FUNCTION__);
priv->up=priv->ieee80211->ieee_up = 0;
return -EAGAIN;
}
RT_TRACE(COMP_INIT, "start adapter finished\n");
rtl8192_rx_enable(dev);
// rtl8192_tx_enable(dev);
if(priv->ieee80211->state != IEEE80211_LINKED)
ieee80211_softmac_start_protocol(priv->ieee80211);
ieee80211_reset_queue(priv->ieee80211);
watch_dog_timer_callback((unsigned long) dev);
if(!netif_queue_stopped(dev))
netif_start_queue(dev);
else
netif_wake_queue(dev);
return 0;
}
int rtl8192_open(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
down(&priv->wx_sem);
ret = rtl8192_up(dev);
up(&priv->wx_sem);
return ret;
}
int rtl8192_up(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
if (priv->up == 1) return -1;
return _rtl8192_up(dev);
}
int rtl8192_close(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
down(&priv->wx_sem);
ret = rtl8192_down(dev);
up(&priv->wx_sem);
return ret;
}
int rtl8192_down(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int i;
if (priv->up == 0) return -1;
priv->up=0;
priv->ieee80211->ieee_up = 0;
RT_TRACE(COMP_DOWN, "==========>%s()\n", __FUNCTION__);
/* FIXME */
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
rtl8192_rtx_disable(dev);
//rtl8192_irq_disable(dev);
/* Tx related queue release */
for(i = 0; i < MAX_QUEUE_SIZE; i++) {
skb_queue_purge(&priv->ieee80211->skb_waitQ [i]);
}
for(i = 0; i < MAX_QUEUE_SIZE; i++) {
skb_queue_purge(&priv->ieee80211->skb_aggQ [i]);
}
for(i = 0; i < MAX_QUEUE_SIZE; i++) {
skb_queue_purge(&priv->ieee80211->skb_drv_aggQ [i]);
}
//as cancel_delayed_work will del work->timer, so if work is not defined as struct delayed_work, it will corrupt
// flush_scheduled_work();
rtl8192_cancel_deferred_work(priv);
deinit_hal_dm(dev);
del_timer_sync(&priv->watch_dog_timer);
ieee80211_softmac_stop_protocol(priv->ieee80211);
memset(&priv->ieee80211->current_network, 0 , offsetof(struct ieee80211_network, list));
RT_TRACE(COMP_DOWN, "<==========%s()\n", __FUNCTION__);
return 0;
}
void rtl8192_commit(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int reset_status = 0;
//u8 reset_times = 0;
if (priv->up == 0) return ;
priv->up = 0;
rtl8192_cancel_deferred_work(priv);
del_timer_sync(&priv->watch_dog_timer);
//cancel_delayed_work(&priv->SwChnlWorkItem);
ieee80211_softmac_stop_protocol(priv->ieee80211);
//rtl8192_irq_disable(dev);
rtl8192_rtx_disable(dev);
reset_status = _rtl8192_up(dev);
}
/*
void rtl8192_restart(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
*/
void rtl8192_restart(struct work_struct *work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv, reset_wq);
struct net_device *dev = priv->ieee80211->dev;
down(&priv->wx_sem);
rtl8192_commit(dev);
up(&priv->wx_sem);
}
static void r8192_set_multicast(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
short promisc;
//down(&priv->wx_sem);
/* FIXME FIXME */
promisc = (dev->flags & IFF_PROMISC) ? 1:0;
if (promisc != priv->promisc)
// rtl8192_commit(dev);
priv->promisc = promisc;
//schedule_work(&priv->reset_wq);
//up(&priv->wx_sem);
}
int r8192_set_mac_adr(struct net_device *dev, void *mac)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct sockaddr *addr = mac;
down(&priv->wx_sem);
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
schedule_work(&priv->reset_wq);
up(&priv->wx_sem);
return 0;
}
/* based on ipw2200 driver */
int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct iwreq *wrq = (struct iwreq *)rq;
int ret=-1;
struct ieee80211_device *ieee = priv->ieee80211;
u32 key[4];
u8 broadcast_addr[6] = {0xff,0xff,0xff,0xff,0xff,0xff};
struct iw_point *p = &wrq->u.data;
struct ieee_param *ipw = NULL;//(struct ieee_param *)wrq->u.data.pointer;
down(&priv->wx_sem);
if (p->length < sizeof(struct ieee_param) || !p->pointer){
ret = -EINVAL;
goto out;
}
ipw = kmalloc(p->length, GFP_KERNEL);
if (ipw == NULL){
ret = -ENOMEM;
goto out;
}
if (copy_from_user(ipw, p->pointer, p->length)) {
kfree(ipw);
ret = -EFAULT;
goto out;
}
switch (cmd) {
case RTL_IOCTL_WPA_SUPPLICANT:
//parse here for HW security
if (ipw->cmd == IEEE_CMD_SET_ENCRYPTION)
{
if (ipw->u.crypt.set_tx)
{
if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
ieee->pairwise_key_type = KEY_TYPE_CCMP;
else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
ieee->pairwise_key_type = KEY_TYPE_TKIP;
else if (strcmp(ipw->u.crypt.alg, "WEP") == 0)
{
if (ipw->u.crypt.key_len == 13)
ieee->pairwise_key_type = KEY_TYPE_WEP104;
else if (ipw->u.crypt.key_len == 5)
ieee->pairwise_key_type = KEY_TYPE_WEP40;
}
else
ieee->pairwise_key_type = KEY_TYPE_NA;
if (ieee->pairwise_key_type)
{
memcpy((u8*)key, ipw->u.crypt.key, 16);
EnableHWSecurityConfig8192(dev);
//we fill both index entry and 4th entry for pairwise key as in IPW interface, adhoc will only get here, so we need index entry for its default key serching!
//added by WB.
setKey(dev, 4, ipw->u.crypt.idx, ieee->pairwise_key_type, (u8*)ieee->ap_mac_addr, 0, key);
if (ieee->auth_mode != 2)
setKey(dev, ipw->u.crypt.idx, ipw->u.crypt.idx, ieee->pairwise_key_type, (u8*)ieee->ap_mac_addr, 0, key);
}
}
else //if (ipw->u.crypt.idx) //group key use idx > 0
{
memcpy((u8*)key, ipw->u.crypt.key, 16);
if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
ieee->group_key_type= KEY_TYPE_CCMP;
else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
ieee->group_key_type = KEY_TYPE_TKIP;
else if (strcmp(ipw->u.crypt.alg, "WEP") == 0)
{
if (ipw->u.crypt.key_len == 13)
ieee->group_key_type = KEY_TYPE_WEP104;
else if (ipw->u.crypt.key_len == 5)
ieee->group_key_type = KEY_TYPE_WEP40;
}
else
ieee->group_key_type = KEY_TYPE_NA;
if (ieee->group_key_type)
{
setKey( dev,
ipw->u.crypt.idx,
ipw->u.crypt.idx, //KeyIndex
ieee->group_key_type, //KeyType
broadcast_addr, //MacAddr
0, //DefaultKey
key); //KeyContent
}
}
}
#ifdef JOHN_HWSEC_DEBUG
//john's test 0711
printk("@@ wrq->u pointer = ");
for(i=0;i<wrq->u.data.length;i++){
if(i%10==0) printk("\n");
printk( "%8x|", ((u32*)wrq->u.data.pointer)[i] );
}
printk("\n");
#endif /*JOHN_HWSEC_DEBUG*/
ret = ieee80211_wpa_supplicant_ioctl(priv->ieee80211, &wrq->u.data);
break;
default:
ret = -EOPNOTSUPP;
break;
}
kfree(ipw);
ipw = NULL;
out:
up(&priv->wx_sem);
return ret;
}
u8 HwRateToMRate90(bool bIsHT, u8 rate)
{
u8 ret_rate = 0xff;
if(!bIsHT) {
switch(rate) {
case DESC90_RATE1M: ret_rate = MGN_1M; break;
case DESC90_RATE2M: ret_rate = MGN_2M; break;
case DESC90_RATE5_5M: ret_rate = MGN_5_5M; break;
case DESC90_RATE11M: ret_rate = MGN_11M; break;
case DESC90_RATE6M: ret_rate = MGN_6M; break;
case DESC90_RATE9M: ret_rate = MGN_9M; break;
case DESC90_RATE12M: ret_rate = MGN_12M; break;
case DESC90_RATE18M: ret_rate = MGN_18M; break;
case DESC90_RATE24M: ret_rate = MGN_24M; break;
case DESC90_RATE36M: ret_rate = MGN_36M; break;
case DESC90_RATE48M: ret_rate = MGN_48M; break;
case DESC90_RATE54M: ret_rate = MGN_54M; break;
default:
ret_rate = 0xff;
RT_TRACE(COMP_RECV, "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n", rate, bIsHT);
break;
}
} else {
switch(rate) {
case DESC90_RATEMCS0: ret_rate = MGN_MCS0; break;
case DESC90_RATEMCS1: ret_rate = MGN_MCS1; break;
case DESC90_RATEMCS2: ret_rate = MGN_MCS2; break;
case DESC90_RATEMCS3: ret_rate = MGN_MCS3; break;
case DESC90_RATEMCS4: ret_rate = MGN_MCS4; break;
case DESC90_RATEMCS5: ret_rate = MGN_MCS5; break;
case DESC90_RATEMCS6: ret_rate = MGN_MCS6; break;
case DESC90_RATEMCS7: ret_rate = MGN_MCS7; break;
case DESC90_RATEMCS8: ret_rate = MGN_MCS8; break;
case DESC90_RATEMCS9: ret_rate = MGN_MCS9; break;
case DESC90_RATEMCS10: ret_rate = MGN_MCS10; break;
case DESC90_RATEMCS11: ret_rate = MGN_MCS11; break;
case DESC90_RATEMCS12: ret_rate = MGN_MCS12; break;
case DESC90_RATEMCS13: ret_rate = MGN_MCS13; break;
case DESC90_RATEMCS14: ret_rate = MGN_MCS14; break;
case DESC90_RATEMCS15: ret_rate = MGN_MCS15; break;
case DESC90_RATEMCS32: ret_rate = (0x80|0x20); break;
default:
ret_rate = 0xff;
RT_TRACE(COMP_RECV, "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n",rate, bIsHT);
break;
}
}
return ret_rate;
}
/**
* Function: UpdateRxPktTimeStamp
* Overview: Record the TSF time stamp when receiving a packet
*
* Input:
* PADAPTER Adapter
* PRT_RFD pRfd,
*
* Output:
* PRT_RFD pRfd
* (pRfd->Status.TimeStampHigh is updated)
* (pRfd->Status.TimeStampLow is updated)
* Return:
* None
*/
void UpdateRxPktTimeStamp8190 (struct net_device *dev, struct ieee80211_rx_stats *stats)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
if(stats->bIsAMPDU && !stats->bFirstMPDU) {
stats->mac_time[0] = priv->LastRxDescTSFLow;
stats->mac_time[1] = priv->LastRxDescTSFHigh;
} else {
priv->LastRxDescTSFLow = stats->mac_time[0];
priv->LastRxDescTSFHigh = stats->mac_time[1];
}
}
//by amy 080606
long rtl819x_translate_todbm(u8 signal_strength_index )// 0-100 index.
{
long signal_power; // in dBm.
// Translate to dBm (x=0.5y-95).
signal_power = (long)((signal_strength_index + 1) >> 1);
signal_power -= 95;
return signal_power;
}
/* 2008/01/22 MH We can not declare RSSI/EVM total value of sliding window to
be a local static. Otherwise, it may increase when we return from S3/S4. The
value will be kept in memory or disk. Declare the value in the adaptor
and it will be reinitialized when returned from S3/S4. */
void rtl8192_process_phyinfo(struct r8192_priv * priv,u8* buffer, struct ieee80211_rx_stats * pprevious_stats, struct ieee80211_rx_stats * pcurrent_stats)
{
bool bcheck = false;
u8 rfpath;
u32 nspatial_stream, tmp_val;
//u8 i;
static u32 slide_rssi_index, slide_rssi_statistics;
static u32 slide_evm_index, slide_evm_statistics;
static u32 last_rssi, last_evm;
static u32 slide_beacon_adc_pwdb_index, slide_beacon_adc_pwdb_statistics;
static u32 last_beacon_adc_pwdb;
struct ieee80211_hdr_3addr *hdr;
u16 sc ;
unsigned int frag,seq;
hdr = (struct ieee80211_hdr_3addr *)buffer;
sc = le16_to_cpu(hdr->seq_ctl);
frag = WLAN_GET_SEQ_FRAG(sc);
seq = WLAN_GET_SEQ_SEQ(sc);
//cosa add 04292008 to record the sequence number
pcurrent_stats->Seq_Num = seq;
//
// Check whether we should take the previous packet into accounting
//
if(!pprevious_stats->bIsAMPDU)
{
// if previous packet is not aggregated packet
bcheck = true;
}
if(slide_rssi_statistics++ >= PHY_RSSI_SLID_WIN_MAX)
{
slide_rssi_statistics = PHY_RSSI_SLID_WIN_MAX;
last_rssi = priv->stats.slide_signal_strength[slide_rssi_index];
priv->stats.slide_rssi_total -= last_rssi;
}
priv->stats.slide_rssi_total += pprevious_stats->SignalStrength;
priv->stats.slide_signal_strength[slide_rssi_index++] = pprevious_stats->SignalStrength;
if(slide_rssi_index >= PHY_RSSI_SLID_WIN_MAX)
slide_rssi_index = 0;
// <1> Showed on UI for user, in dbm
tmp_val = priv->stats.slide_rssi_total/slide_rssi_statistics;
priv->stats.signal_strength = rtl819x_translate_todbm((u8)tmp_val);
pcurrent_stats->rssi = priv->stats.signal_strength;
//
// If the previous packet does not match the criteria, neglect it
//
if(!pprevious_stats->bPacketMatchBSSID)
{
if(!pprevious_stats->bToSelfBA)
return;
}
if(!bcheck)
return;
//rtl8190_process_cck_rxpathsel(priv,pprevious_stats);//only rtl8190 supported
//
// Check RSSI
//
priv->stats.num_process_phyinfo++;
/* record the general signal strength to the sliding window. */
// <2> Showed on UI for engineering
// hardware does not provide rssi information for each rf path in CCK
if(!pprevious_stats->bIsCCK && (pprevious_stats->bPacketToSelf || pprevious_stats->bToSelfBA))
{
for (rfpath = RF90_PATH_A; rfpath < priv->NumTotalRFPath; rfpath++)
{
if (!rtl8192_phy_CheckIsLegalRFPath(priv->ieee80211->dev, rfpath))
continue;
//Fixed by Jacken 2008-03-20
if(priv->stats.rx_rssi_percentage[rfpath] == 0)
{
priv->stats.rx_rssi_percentage[rfpath] = pprevious_stats->RxMIMOSignalStrength[rfpath];
//DbgPrint("MIMO RSSI initialize \n");
}
if(pprevious_stats->RxMIMOSignalStrength[rfpath] > priv->stats.rx_rssi_percentage[rfpath])
{
priv->stats.rx_rssi_percentage[rfpath] =
( (priv->stats.rx_rssi_percentage[rfpath]*(Rx_Smooth_Factor-1)) +
(pprevious_stats->RxMIMOSignalStrength[rfpath])) /(Rx_Smooth_Factor);
priv->stats.rx_rssi_percentage[rfpath] = priv->stats.rx_rssi_percentage[rfpath] + 1;
}
else
{
priv->stats.rx_rssi_percentage[rfpath] =
( (priv->stats.rx_rssi_percentage[rfpath]*(Rx_Smooth_Factor-1)) +
(pprevious_stats->RxMIMOSignalStrength[rfpath])) /(Rx_Smooth_Factor);
}
RT_TRACE(COMP_DBG,"priv->stats.rx_rssi_percentage[rfPath] = %d \n" ,priv->stats.rx_rssi_percentage[rfpath] );
}
}
//
// Check PWDB.
//
RT_TRACE(COMP_RXDESC, "Smooth %s PWDB = %d\n",
pprevious_stats->bIsCCK? "CCK": "OFDM",
pprevious_stats->RxPWDBAll);
if(pprevious_stats->bPacketBeacon)
{
/* record the beacon pwdb to the sliding window. */
if(slide_beacon_adc_pwdb_statistics++ >= PHY_Beacon_RSSI_SLID_WIN_MAX)
{
slide_beacon_adc_pwdb_statistics = PHY_Beacon_RSSI_SLID_WIN_MAX;
last_beacon_adc_pwdb = priv->stats.Slide_Beacon_pwdb[slide_beacon_adc_pwdb_index];
priv->stats.Slide_Beacon_Total -= last_beacon_adc_pwdb;
//DbgPrint("slide_beacon_adc_pwdb_index = %d, last_beacon_adc_pwdb = %d, Adapter->RxStats.Slide_Beacon_Total = %d\n",
// slide_beacon_adc_pwdb_index, last_beacon_adc_pwdb, Adapter->RxStats.Slide_Beacon_Total);
}
priv->stats.Slide_Beacon_Total += pprevious_stats->RxPWDBAll;
priv->stats.Slide_Beacon_pwdb[slide_beacon_adc_pwdb_index] = pprevious_stats->RxPWDBAll;
//DbgPrint("slide_beacon_adc_pwdb_index = %d, pPreviousRfd->Status.RxPWDBAll = %d\n", slide_beacon_adc_pwdb_index, pPreviousRfd->Status.RxPWDBAll);
slide_beacon_adc_pwdb_index++;
if(slide_beacon_adc_pwdb_index >= PHY_Beacon_RSSI_SLID_WIN_MAX)
slide_beacon_adc_pwdb_index = 0;
pprevious_stats->RxPWDBAll = priv->stats.Slide_Beacon_Total/slide_beacon_adc_pwdb_statistics;
if(pprevious_stats->RxPWDBAll >= 3)
pprevious_stats->RxPWDBAll -= 3;
}
RT_TRACE(COMP_RXDESC, "Smooth %s PWDB = %d\n",
pprevious_stats->bIsCCK? "CCK": "OFDM",
pprevious_stats->RxPWDBAll);
if(pprevious_stats->bPacketToSelf || pprevious_stats->bPacketBeacon || pprevious_stats->bToSelfBA)
{
if(priv->undecorated_smoothed_pwdb < 0) // initialize
{
priv->undecorated_smoothed_pwdb = pprevious_stats->RxPWDBAll;
//DbgPrint("First pwdb initialize \n");
}
if(pprevious_stats->RxPWDBAll > (u32)priv->undecorated_smoothed_pwdb)
{
priv->undecorated_smoothed_pwdb =
( ((priv->undecorated_smoothed_pwdb)*(Rx_Smooth_Factor-1)) +
(pprevious_stats->RxPWDBAll)) /(Rx_Smooth_Factor);
priv->undecorated_smoothed_pwdb = priv->undecorated_smoothed_pwdb + 1;
}
else
{
priv->undecorated_smoothed_pwdb =
( ((priv->undecorated_smoothed_pwdb)*(Rx_Smooth_Factor-1)) +
(pprevious_stats->RxPWDBAll)) /(Rx_Smooth_Factor);
}
}
//
// Check EVM
//
/* record the general EVM to the sliding window. */
if(pprevious_stats->SignalQuality == 0)
{
}
else
{
if(pprevious_stats->bPacketToSelf || pprevious_stats->bPacketBeacon || pprevious_stats->bToSelfBA){
if(slide_evm_statistics++ >= PHY_RSSI_SLID_WIN_MAX){
slide_evm_statistics = PHY_RSSI_SLID_WIN_MAX;
last_evm = priv->stats.slide_evm[slide_evm_index];
priv->stats.slide_evm_total -= last_evm;
}
priv->stats.slide_evm_total += pprevious_stats->SignalQuality;
priv->stats.slide_evm[slide_evm_index++] = pprevious_stats->SignalQuality;
if(slide_evm_index >= PHY_RSSI_SLID_WIN_MAX)
slide_evm_index = 0;
// <1> Showed on UI for user, in percentage.
tmp_val = priv->stats.slide_evm_total/slide_evm_statistics;
priv->stats.signal_quality = tmp_val;
//cosa add 10/11/2007, Showed on UI for user in Windows Vista, for Link quality.
priv->stats.last_signal_strength_inpercent = tmp_val;
}
// <2> Showed on UI for engineering
if(pprevious_stats->bPacketToSelf || pprevious_stats->bPacketBeacon || pprevious_stats->bToSelfBA)
{
for(nspatial_stream = 0; nspatial_stream<2 ; nspatial_stream++) // 2 spatial stream
{
if(pprevious_stats->RxMIMOSignalQuality[nspatial_stream] != -1)
{
if(priv->stats.rx_evm_percentage[nspatial_stream] == 0) // initialize
{
priv->stats.rx_evm_percentage[nspatial_stream] = pprevious_stats->RxMIMOSignalQuality[nspatial_stream];
}
priv->stats.rx_evm_percentage[nspatial_stream] =
( (priv->stats.rx_evm_percentage[nspatial_stream]* (Rx_Smooth_Factor-1)) +
(pprevious_stats->RxMIMOSignalQuality[nspatial_stream]* 1)) / (Rx_Smooth_Factor);
}
}
}
}
}
/*-----------------------------------------------------------------------------
* Function: rtl819x_query_rxpwrpercentage()
*
* Overview:
*
* Input: char antpower
*
* Output: NONE
*
* Return: 0-100 percentage
*
* Revised History:
* When Who Remark
* 05/26/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
static u8 rtl819x_query_rxpwrpercentage(
char antpower
)
{
if ((antpower <= -100) || (antpower >= 20))
{
return 0;
}
else if (antpower >= 0)
{
return 100;
}
else
{
return (100+antpower);
}
} /* QueryRxPwrPercentage */
static u8
rtl819x_evm_dbtopercentage(
char value
)
{
char ret_val;
ret_val = value;
if(ret_val >= 0)
ret_val = 0;
if(ret_val <= -33)
ret_val = -33;
ret_val = 0 - ret_val;
ret_val*=3;
if(ret_val == 99)
ret_val = 100;
return(ret_val);
}
//
// Description:
// We want good-looking for signal strength/quality
// 2007/7/19 01:09, by cosa.
//
long
rtl819x_signal_scale_mapping(
long currsig
)
{
long retsig;
// Step 1. Scale mapping.
if(currsig >= 61 && currsig <= 100)
{
retsig = 90 + ((currsig - 60) / 4);
}
else if(currsig >= 41 && currsig <= 60)
{
retsig = 78 + ((currsig - 40) / 2);
}
else if(currsig >= 31 && currsig <= 40)
{
retsig = 66 + (currsig - 30);
}
else if(currsig >= 21 && currsig <= 30)
{
retsig = 54 + (currsig - 20);
}
else if(currsig >= 5 && currsig <= 20)
{
retsig = 42 + (((currsig - 5) * 2) / 3);
}
else if(currsig == 4)
{
retsig = 36;
}
else if(currsig == 3)
{
retsig = 27;
}
else if(currsig == 2)
{
retsig = 18;
}
else if(currsig == 1)
{
retsig = 9;
}
else
{
retsig = currsig;
}
return retsig;
}
static void rtl8192_query_rxphystatus(
struct r8192_priv * priv,
struct ieee80211_rx_stats * pstats,
rx_drvinfo_819x_usb * pdrvinfo,
struct ieee80211_rx_stats * precord_stats,
bool bpacket_match_bssid,
bool bpacket_toself,
bool bPacketBeacon,
bool bToSelfBA
)
{
//PRT_RFD_STATUS pRtRfdStatus = &(pRfd->Status);
phy_sts_ofdm_819xusb_t* pofdm_buf;
phy_sts_cck_819xusb_t * pcck_buf;
phy_ofdm_rx_status_rxsc_sgien_exintfflag* prxsc;
u8 *prxpkt;
u8 i, max_spatial_stream, tmp_rxsnr, tmp_rxevm, rxsc_sgien_exflg;
char rx_pwr[4], rx_pwr_all=0;
//long rx_avg_pwr = 0;
char rx_snrX, rx_evmX;
u8 evm, pwdb_all;
u32 RSSI, total_rssi=0;//, total_evm=0;
// long signal_strength_index = 0;
u8 is_cck_rate=0;
u8 rf_rx_num = 0;
priv->stats.numqry_phystatus++;
is_cck_rate = rx_hal_is_cck_rate(pdrvinfo);
// Record it for next packet processing
memset(precord_stats, 0, sizeof(struct ieee80211_rx_stats));
pstats->bPacketMatchBSSID = precord_stats->bPacketMatchBSSID = bpacket_match_bssid;
pstats->bPacketToSelf = precord_stats->bPacketToSelf = bpacket_toself;
pstats->bIsCCK = precord_stats->bIsCCK = is_cck_rate;//RX_HAL_IS_CCK_RATE(pDrvInfo);
pstats->bPacketBeacon = precord_stats->bPacketBeacon = bPacketBeacon;
pstats->bToSelfBA = precord_stats->bToSelfBA = bToSelfBA;
prxpkt = (u8*)pdrvinfo;
/* Move pointer to the 16th bytes. Phy status start address. */
prxpkt += sizeof(rx_drvinfo_819x_usb);
/* Initial the cck and ofdm buffer pointer */
pcck_buf = (phy_sts_cck_819xusb_t *)prxpkt;
pofdm_buf = (phy_sts_ofdm_819xusb_t *)prxpkt;
pstats->RxMIMOSignalQuality[0] = -1;
pstats->RxMIMOSignalQuality[1] = -1;
precord_stats->RxMIMOSignalQuality[0] = -1;
precord_stats->RxMIMOSignalQuality[1] = -1;
if(is_cck_rate)
{
//
// (1)Hardware does not provide RSSI for CCK
//
//
// (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive)
//
u8 report;//, cck_agc_rpt;
priv->stats.numqry_phystatusCCK++;
if(!priv->bCckHighPower)
{
report = pcck_buf->cck_agc_rpt & 0xc0;
report = report>>6;
switch(report)
{
//Fixed by Jacken from Bryant 2008-03-20
//Original value is -38 , -26 , -14 , -2
//Fixed value is -35 , -23 , -11 , 6
case 0x3:
rx_pwr_all = -35 - (pcck_buf->cck_agc_rpt & 0x3e);
break;
case 0x2:
rx_pwr_all = -23 - (pcck_buf->cck_agc_rpt & 0x3e);
break;
case 0x1:
rx_pwr_all = -11 - (pcck_buf->cck_agc_rpt & 0x3e);
break;
case 0x0:
rx_pwr_all = 6 - (pcck_buf->cck_agc_rpt & 0x3e);
break;
}
}
else
{
report = pcck_buf->cck_agc_rpt & 0x60;
report = report>>5;
switch(report)
{
case 0x3:
rx_pwr_all = -35 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1) ;
break;
case 0x2:
rx_pwr_all = -23 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1);
break;
case 0x1:
rx_pwr_all = -11 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1) ;
break;
case 0x0:
rx_pwr_all = 6 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1) ;
break;
}
}
pwdb_all = rtl819x_query_rxpwrpercentage(rx_pwr_all);
pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
pstats->RecvSignalPower = pwdb_all;
//
// (3) Get Signal Quality (EVM)
//
//if(bpacket_match_bssid)
{
u8 sq;
if(pstats->RxPWDBAll > 40)
{
sq = 100;
}else
{
sq = pcck_buf->sq_rpt;
if(pcck_buf->sq_rpt > 64)
sq = 0;
else if (pcck_buf->sq_rpt < 20)
sq = 100;
else
sq = ((64-sq) * 100) / 44;
}
pstats->SignalQuality = precord_stats->SignalQuality = sq;
pstats->RxMIMOSignalQuality[0] = precord_stats->RxMIMOSignalQuality[0] = sq;
pstats->RxMIMOSignalQuality[1] = precord_stats->RxMIMOSignalQuality[1] = -1;
}
}
else
{
priv->stats.numqry_phystatusHT++;
//
// (1)Get RSSI for HT rate
//
for(i=RF90_PATH_A; i<priv->NumTotalRFPath; i++)
{
// 2008/01/30 MH we will judge RF RX path now.
if (priv->brfpath_rxenable[i])
rf_rx_num++;
else
continue;
if (!rtl8192_phy_CheckIsLegalRFPath(priv->ieee80211->dev, i))
continue;
//Fixed by Jacken from Bryant 2008-03-20
//Original value is 106
rx_pwr[i] = ((pofdm_buf->trsw_gain_X[i]&0x3F)*2) - 106;
//Get Rx snr value in DB
tmp_rxsnr = pofdm_buf->rxsnr_X[i];
rx_snrX = (char)(tmp_rxsnr);
//rx_snrX >>= 1;
rx_snrX /= 2;
priv->stats.rxSNRdB[i] = (long)rx_snrX;
/* Translate DBM to percentage. */
RSSI = rtl819x_query_rxpwrpercentage(rx_pwr[i]);
total_rssi += RSSI;
/* Record Signal Strength for next packet */
//if(bpacket_match_bssid)
{
pstats->RxMIMOSignalStrength[i] =(u8) RSSI;
precord_stats->RxMIMOSignalStrength[i] =(u8) RSSI;
}
}
//
// (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive)
//
//Fixed by Jacken from Bryant 2008-03-20
//Original value is 106
rx_pwr_all = (((pofdm_buf->pwdb_all ) >> 1 )& 0x7f) -106;
pwdb_all = rtl819x_query_rxpwrpercentage(rx_pwr_all);
pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
pstats->RxPower = precord_stats->RxPower = rx_pwr_all;
//
// (3)EVM of HT rate
//
if(pdrvinfo->RxHT && pdrvinfo->RxRate>=DESC90_RATEMCS8 &&
pdrvinfo->RxRate<=DESC90_RATEMCS15)
max_spatial_stream = 2; //both spatial stream make sense
else
max_spatial_stream = 1; //only spatial stream 1 makes sense
for(i=0; i<max_spatial_stream; i++)
{
tmp_rxevm = pofdm_buf->rxevm_X[i];
rx_evmX = (char)(tmp_rxevm);
// Do not use shift operation like "rx_evmX >>= 1" because the compiler of free build environment
// will set the most significant bit to "zero" when doing shifting operation which may change a negative
// value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore.
rx_evmX /= 2; //dbm
evm = rtl819x_evm_dbtopercentage(rx_evmX);
//if(bpacket_match_bssid)
{
if(i==0) // Fill value in RFD, Get the first spatial stream only
pstats->SignalQuality = precord_stats->SignalQuality = (u8)(evm & 0xff);
pstats->RxMIMOSignalQuality[i] = precord_stats->RxMIMOSignalQuality[i] = (u8)(evm & 0xff);
}
}
/* record rx statistics for debug */
rxsc_sgien_exflg = pofdm_buf->rxsc_sgien_exflg;
prxsc = (phy_ofdm_rx_status_rxsc_sgien_exintfflag *)&rxsc_sgien_exflg;
if(pdrvinfo->BW) //40M channel
priv->stats.received_bwtype[1+prxsc->rxsc]++;
else //20M channel
priv->stats.received_bwtype[0]++;
}
//UI BSS List signal strength(in percentage), make it good looking, from 0~100.
//It is assigned to the BSS List in GetValueFromBeaconOrProbeRsp().
if(is_cck_rate)
{
pstats->SignalStrength = precord_stats->SignalStrength = (u8)(rtl819x_signal_scale_mapping((long)pwdb_all));//PWDB_ALL;
}
else
{
//pRfd->Status.SignalStrength = pRecordRfd->Status.SignalStrength = (u8)(SignalScaleMapping(total_rssi/=RF90_PATH_MAX));//(u8)(total_rssi/=RF90_PATH_MAX);
// We can judge RX path number now.
if (rf_rx_num != 0)
pstats->SignalStrength = precord_stats->SignalStrength = (u8)(rtl819x_signal_scale_mapping((long)(total_rssi/=rf_rx_num)));
}
} /* QueryRxPhyStatus8190Pci */
void
rtl8192_record_rxdesc_forlateruse(
struct ieee80211_rx_stats * psrc_stats,
struct ieee80211_rx_stats * ptarget_stats
)
{
ptarget_stats->bIsAMPDU = psrc_stats->bIsAMPDU;
ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
ptarget_stats->Seq_Num = psrc_stats->Seq_Num;
}
void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
struct ieee80211_rx_stats * pstats,
rx_drvinfo_819x_usb *pdrvinfo)
{
// TODO: We must only check packet for current MAC address. Not finish
rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
struct net_device *dev=info->dev;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
bool bpacket_match_bssid, bpacket_toself;
bool bPacketBeacon=FALSE, bToSelfBA=FALSE;
static struct ieee80211_rx_stats previous_stats;
struct ieee80211_hdr_3addr *hdr;//by amy
u16 fc,type;
// Get Signal Quality for only RX data queue (but not command queue)
u8* tmp_buf;
//u16 tmp_buf_len = 0;
u8 *praddr;
/* Get MAC frame start address. */
tmp_buf = (u8*)skb->data;// + get_rxpacket_shiftbytes_819xusb(pstats);
hdr = (struct ieee80211_hdr_3addr *)tmp_buf;
fc = le16_to_cpu(hdr->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
/* Check if the received packet is acceptable. */
bpacket_match_bssid = ((IEEE80211_FTYPE_CTL != type) &&
(eqMacAddr(priv->ieee80211->current_network.bssid, (fc & IEEE80211_FCTL_TODS)? hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS )? hdr->addr2 : hdr->addr3))
&& (!pstats->bHwError) && (!pstats->bCRC)&& (!pstats->bICV));
bpacket_toself = bpacket_match_bssid & (eqMacAddr(praddr, priv->ieee80211->dev->dev_addr));
if(WLAN_FC_GET_FRAMETYPE(fc)== IEEE80211_STYPE_BEACON)
{
bPacketBeacon = true;
//DbgPrint("Beacon 2, MatchBSSID = %d, ToSelf = %d \n", bPacketMatchBSSID, bPacketToSelf);
}
if(WLAN_FC_GET_FRAMETYPE(fc) == IEEE80211_STYPE_BLOCKACK)
{
if((eqMacAddr(praddr,dev->dev_addr)))
bToSelfBA = true;
//DbgPrint("BlockAck, MatchBSSID = %d, ToSelf = %d \n", bPacketMatchBSSID, bPacketToSelf);
}
if(bpacket_match_bssid)
{
priv->stats.numpacket_matchbssid++;
}
if(bpacket_toself){
priv->stats.numpacket_toself++;
}
//
// Process PHY information for previous packet (RSSI/PWDB/EVM)
//
// Because phy information is contained in the last packet of AMPDU only, so driver
// should process phy information of previous packet
rtl8192_process_phyinfo(priv, tmp_buf, &previous_stats, pstats);
rtl8192_query_rxphystatus(priv, pstats, pdrvinfo, &previous_stats, bpacket_match_bssid,bpacket_toself,bPacketBeacon,bToSelfBA);
rtl8192_record_rxdesc_forlateruse(pstats, &previous_stats);
}
/**
* Function: UpdateReceivedRateHistogramStatistics
* Overview: Record the received data rate
*
* Input:
* struct net_device *dev
* struct ieee80211_rx_stats *stats
*
* Output:
*
* (priv->stats.ReceivedRateHistogram[] is updated)
* Return:
* None
*/
void
UpdateReceivedRateHistogramStatistics8190(
struct net_device *dev,
struct ieee80211_rx_stats *stats
)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
u32 rcvType=1; //0: Total, 1:OK, 2:CRC, 3:ICV
u32 rateIndex;
u32 preamble_guardinterval; //1: short preamble/GI, 0: long preamble/GI
if(stats->bCRC)
rcvType = 2;
else if(stats->bICV)
rcvType = 3;
if(stats->bShortPreamble)
preamble_guardinterval = 1;// short
else
preamble_guardinterval = 0;// long
switch(stats->rate)
{
//
// CCK rate
//
case MGN_1M: rateIndex = 0; break;
case MGN_2M: rateIndex = 1; break;
case MGN_5_5M: rateIndex = 2; break;
case MGN_11M: rateIndex = 3; break;
//
// Legacy OFDM rate
//
case MGN_6M: rateIndex = 4; break;
case MGN_9M: rateIndex = 5; break;
case MGN_12M: rateIndex = 6; break;
case MGN_18M: rateIndex = 7; break;
case MGN_24M: rateIndex = 8; break;
case MGN_36M: rateIndex = 9; break;
case MGN_48M: rateIndex = 10; break;
case MGN_54M: rateIndex = 11; break;
//
// 11n High throughput rate
//
case MGN_MCS0: rateIndex = 12; break;
case MGN_MCS1: rateIndex = 13; break;
case MGN_MCS2: rateIndex = 14; break;
case MGN_MCS3: rateIndex = 15; break;
case MGN_MCS4: rateIndex = 16; break;
case MGN_MCS5: rateIndex = 17; break;
case MGN_MCS6: rateIndex = 18; break;
case MGN_MCS7: rateIndex = 19; break;
case MGN_MCS8: rateIndex = 20; break;
case MGN_MCS9: rateIndex = 21; break;
case MGN_MCS10: rateIndex = 22; break;
case MGN_MCS11: rateIndex = 23; break;
case MGN_MCS12: rateIndex = 24; break;
case MGN_MCS13: rateIndex = 25; break;
case MGN_MCS14: rateIndex = 26; break;
case MGN_MCS15: rateIndex = 27; break;
default: rateIndex = 28; break;
}
priv->stats.received_preamble_GI[preamble_guardinterval][rateIndex]++;
priv->stats.received_rate_histogram[0][rateIndex]++; //total
priv->stats.received_rate_histogram[rcvType][rateIndex]++;
}
void query_rxdesc_status(struct sk_buff *skb, struct ieee80211_rx_stats *stats, bool bIsRxAggrSubframe)
{
rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
struct net_device *dev=info->dev;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
//rx_desc_819x_usb *desc = (rx_desc_819x_usb *)skb->data;
rx_drvinfo_819x_usb *driver_info = NULL;
//
//Get Rx Descriptor Information
//
#ifdef USB_RX_AGGREGATION_SUPPORT
if (bIsRxAggrSubframe)
{
rx_desc_819x_usb_aggr_subframe *desc = (rx_desc_819x_usb_aggr_subframe *)skb->data;
stats->Length = desc->Length ;
stats->RxDrvInfoSize = desc->RxDrvInfoSize;
stats->RxBufShift = 0; //RxBufShift = 2 in RxDesc, but usb didn't shift bytes in fact.
stats->bICV = desc->ICV;
stats->bCRC = desc->CRC32;
stats->bHwError = stats->bCRC|stats->bICV;
stats->Decrypted = !desc->SWDec;//RTL8190 set this bit to indicate that Hw does not decrypt packet
} else
#endif
{
rx_desc_819x_usb *desc = (rx_desc_819x_usb *)skb->data;
stats->Length = desc->Length;
stats->RxDrvInfoSize = desc->RxDrvInfoSize;
stats->RxBufShift = 0;//desc->Shift&0x03;
stats->bICV = desc->ICV;
stats->bCRC = desc->CRC32;
stats->bHwError = stats->bCRC|stats->bICV;
//RTL8190 set this bit to indicate that Hw does not decrypt packet
stats->Decrypted = !desc->SWDec;
}
if((priv->ieee80211->pHTInfo->bCurrentHTSupport == true) && (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP))
{
stats->bHwError = false;
}
else
{
stats->bHwError = stats->bCRC|stats->bICV;
}
if(stats->Length < 24 || stats->Length > MAX_8192U_RX_SIZE)
stats->bHwError |= 1;
//
//Get Driver Info
//
// TODO: Need to verify it on FGPA platform
//Driver info are written to the RxBuffer following rx desc
if (stats->RxDrvInfoSize != 0) {
driver_info = (rx_drvinfo_819x_usb *)(skb->data + sizeof(rx_desc_819x_usb) + \
stats->RxBufShift);
/* unit: 0.5M */
/* TODO */
if(!stats->bHwError){
u8 ret_rate;
ret_rate = HwRateToMRate90(driver_info->RxHT, driver_info->RxRate);
if(ret_rate == 0xff)
{
// Abnormal Case: Receive CRC OK packet with Rx descriptor indicating non supported rate.
// Special Error Handling here, 2008.05.16, by Emily
stats->bHwError = 1;
stats->rate = MGN_1M; //Set 1M rate by default
}else
{
stats->rate = ret_rate;
}
}
else
stats->rate = 0x02;
stats->bShortPreamble = driver_info->SPLCP;
UpdateReceivedRateHistogramStatistics8190(dev, stats);
stats->bIsAMPDU = (driver_info->PartAggr==1);
stats->bFirstMPDU = (driver_info->PartAggr==1) && (driver_info->FirstAGGR==1);
stats->TimeStampLow = driver_info->TSFL;
// xiong mask it, 070514
//pRfd->Status.TimeStampHigh = PlatformEFIORead4Byte(Adapter, TSFR+4);
// stats->TimeStampHigh = read_nic_dword(dev, TSFR+4);
UpdateRxPktTimeStamp8190(dev, stats);
//
// Rx A-MPDU
//
if(driver_info->FirstAGGR==1 || driver_info->PartAggr == 1)
RT_TRACE(COMP_RXDESC, "driver_info->FirstAGGR = %d, driver_info->PartAggr = %d\n",
driver_info->FirstAGGR, driver_info->PartAggr);
}
skb_pull(skb,sizeof(rx_desc_819x_usb));
//
// Get Total offset of MPDU Frame Body
//
if((stats->RxBufShift + stats->RxDrvInfoSize) > 0) {
stats->bShift = 1;
skb_pull(skb,stats->RxBufShift + stats->RxDrvInfoSize);
}
#ifdef USB_RX_AGGREGATION_SUPPORT
/* for the rx aggregated sub frame, the redundant space truly contained in the packet */
if(bIsRxAggrSubframe) {
skb_pull(skb, 8);
}
#endif
/* for debug 2008.5.29 */
//added by vivi, for MP, 20080108
stats->RxIs40MHzPacket = driver_info->BW;
if(stats->RxDrvInfoSize != 0)
TranslateRxSignalStuff819xUsb(skb, stats, driver_info);
}
u32 GetRxPacketShiftBytes819xUsb(struct ieee80211_rx_stats *Status, bool bIsRxAggrSubframe)
{
#ifdef USB_RX_AGGREGATION_SUPPORT
if (bIsRxAggrSubframe)
return (sizeof(rx_desc_819x_usb) + Status->RxDrvInfoSize
+ Status->RxBufShift + 8);
else
#endif
return (sizeof(rx_desc_819x_usb) + Status->RxDrvInfoSize
+ Status->RxBufShift);
}
void rtl8192_rx_nomal(struct sk_buff* skb)
{
rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
struct net_device *dev=info->dev;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct ieee80211_rx_stats stats = {
.signal = 0,
.noise = -98,
.rate = 0,
// .mac_time = jiffies,
.freq = IEEE80211_24GHZ_BAND,
};
u32 rx_pkt_len = 0;
struct ieee80211_hdr_1addr *ieee80211_hdr = NULL;
bool unicast_packet = false;
#ifdef USB_RX_AGGREGATION_SUPPORT
struct sk_buff *agg_skb = NULL;
u32 TotalLength = 0;
u32 TempDWord = 0;
u32 PacketLength = 0;
u32 PacketOccupiedLendth = 0;
u8 TempByte = 0;
u32 PacketShiftBytes = 0;
rx_desc_819x_usb_aggr_subframe *RxDescr = NULL;
u8 PaddingBytes = 0;
//add just for testing
u8 testing;
#endif
/* 20 is for ps-poll */
if((skb->len >=(20 + sizeof(rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
#ifdef USB_RX_AGGREGATION_SUPPORT
TempByte = *(skb->data + sizeof(rx_desc_819x_usb));
#endif
/* first packet should not contain Rx aggregation header */
query_rxdesc_status(skb, &stats, false);
/* TODO */
/* hardware related info */
#ifdef USB_RX_AGGREGATION_SUPPORT
if (TempByte & BIT0) {
agg_skb = skb;
//TotalLength = agg_skb->len - 4; /*sCrcLng*/
TotalLength = stats.Length - 4; /*sCrcLng*/
//RT_TRACE(COMP_RECV, "%s:first aggregated packet!Length=%d\n",__FUNCTION__,TotalLength);
/* though the head pointer has passed this position */
TempDWord = *(u32 *)(agg_skb->data - 4);
PacketLength = (u16)(TempDWord & 0x3FFF); /*sCrcLng*/
skb = dev_alloc_skb(PacketLength);
memcpy(skb_put(skb,PacketLength),agg_skb->data,PacketLength);
PacketShiftBytes = GetRxPacketShiftBytes819xUsb(&stats, false);
}
#endif
/* Process the MPDU received */
skb_trim(skb, skb->len - 4/*sCrcLng*/);
rx_pkt_len = skb->len;
ieee80211_hdr = (struct ieee80211_hdr_1addr *)skb->data;
unicast_packet = false;
if(is_broadcast_ether_addr(ieee80211_hdr->addr1)) {
//TODO
}else if(is_multicast_ether_addr(ieee80211_hdr->addr1)){
//TODO
}else {
/* unicast packet */
unicast_packet = true;
}
if(!ieee80211_rx(priv->ieee80211,skb, &stats)) {
dev_kfree_skb_any(skb);
} else {
priv->stats.rxoktotal++;
if(unicast_packet) {
priv->stats.rxbytesunicast += rx_pkt_len;
}
}
#ifdef USB_RX_AGGREGATION_SUPPORT
testing = 1;
// (PipeIndex == 0) && (TempByte & BIT0) => TotalLength > 0.
if (TotalLength > 0) {
PacketOccupiedLendth = PacketLength + (PacketShiftBytes + 8);
if ((PacketOccupiedLendth & 0xFF) != 0)
PacketOccupiedLendth = (PacketOccupiedLendth & 0xFFFFFF00) + 256;
PacketOccupiedLendth -= 8;
TempDWord = PacketOccupiedLendth - PacketShiftBytes; /*- PacketLength */
if (agg_skb->len > TempDWord)
skb_pull(agg_skb, TempDWord);
else
agg_skb->len = 0;
while (agg_skb->len>=GetRxPacketShiftBytes819xUsb(&stats, true)) {
u8 tmpCRC = 0, tmpICV = 0;
//RT_TRACE(COMP_RECV,"%s:aggred pkt,total_len = %d\n",__FUNCTION__,agg_skb->len);
RxDescr = (rx_desc_819x_usb_aggr_subframe *)(agg_skb->data);
tmpCRC = RxDescr->CRC32;
tmpICV = RxDescr->ICV;
memcpy(agg_skb->data, &agg_skb->data[44], 2);
RxDescr->CRC32 = tmpCRC;
RxDescr->ICV = tmpICV;
memset(&stats, 0, sizeof(struct ieee80211_rx_stats));
stats.signal = 0;
stats.noise = -98;
stats.rate = 0;
stats.freq = IEEE80211_24GHZ_BAND;
query_rxdesc_status(agg_skb, &stats, true);
PacketLength = stats.Length;
if(PacketLength > agg_skb->len) {
break;
}
/* Process the MPDU received */
skb = dev_alloc_skb(PacketLength);
memcpy(skb_put(skb,PacketLength),agg_skb->data, PacketLength);
skb_trim(skb, skb->len - 4/*sCrcLng*/);
rx_pkt_len = skb->len;
ieee80211_hdr = (struct ieee80211_hdr_1addr *)skb->data;
unicast_packet = false;
if(is_broadcast_ether_addr(ieee80211_hdr->addr1)) {
//TODO
}else if(is_multicast_ether_addr(ieee80211_hdr->addr1)){
//TODO
}else {
/* unicast packet */
unicast_packet = true;
}
if(!ieee80211_rx(priv->ieee80211,skb, &stats)) {
dev_kfree_skb_any(skb);
} else {
priv->stats.rxoktotal++;
if(unicast_packet) {
priv->stats.rxbytesunicast += rx_pkt_len;
}
}
/* should trim the packet which has been copied to target skb */
skb_pull(agg_skb, PacketLength);
PacketShiftBytes = GetRxPacketShiftBytes819xUsb(&stats, true);
PacketOccupiedLendth = PacketLength + PacketShiftBytes;
if ((PacketOccupiedLendth & 0xFF) != 0) {
PaddingBytes = 256 - (PacketOccupiedLendth & 0xFF);
if (agg_skb->len > PaddingBytes)
skb_pull(agg_skb, PaddingBytes);
else
agg_skb->len = 0;
}
}
dev_kfree_skb(agg_skb);
}
#endif
} else {
priv->stats.rxurberr++;
printk("actual_length:%d\n", skb->len);
dev_kfree_skb_any(skb);
}
}
void
rtl819xusb_process_received_packet(
struct net_device *dev,
struct ieee80211_rx_stats *pstats
)
{
// bool bfreerfd=false, bqueued=false;
u8* frame;
u16 frame_len=0;
struct r8192_priv *priv = ieee80211_priv(dev);
// u8 index = 0;
// u8 TID = 0;
//u16 seqnum = 0;
//PRX_TS_RECORD pts = NULL;
// Get shifted bytes of Starting address of 802.11 header. 2006.09.28, by Emily
//porting by amy 080508
pstats->virtual_address += get_rxpacket_shiftbytes_819xusb(pstats);
frame = pstats->virtual_address;
frame_len = pstats->packetlength;
#ifdef TODO // by amy about HCT
if(!Adapter->bInHctTest)
CountRxErrStatistics(Adapter, pRfd);
#endif
{
#ifdef ENABLE_PS //by amy for adding ps function in future
RT_RF_POWER_STATE rtState;
// When RF is off, we should not count the packet for hw/sw synchronize
// reason, ie. there may be a duration while sw switch is changed and hw
// switch is being changed. 2006.12.04, by shien chang.
Adapter->HalFunc.GetHwRegHandler(Adapter, HW_VAR_RF_STATE, (u8* )(&rtState));
if (rtState == eRfOff)
{
return;
}
#endif
priv->stats.rxframgment++;
}
#ifdef TODO
RmMonitorSignalStrength(Adapter, pRfd);
#endif
/* 2007/01/16 MH Add RX command packet handle here. */
/* 2007/03/01 MH We have to release RFD and return if rx pkt is cmd pkt. */
if (rtl819xusb_rx_command_packet(dev, pstats))
{
return;
}
#ifdef SW_CRC_CHECK
SwCrcCheck();
#endif
}
void query_rx_cmdpkt_desc_status(struct sk_buff *skb, struct ieee80211_rx_stats *stats)
{
// rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
// struct net_device *dev=info->dev;
// struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
rx_desc_819x_usb *desc = (rx_desc_819x_usb *)skb->data;
// rx_drvinfo_819x_usb *driver_info;
//
//Get Rx Descriptor Information
//
stats->virtual_address = (u8*)skb->data;
stats->Length = desc->Length;
stats->RxDrvInfoSize = 0;
stats->RxBufShift = 0;
stats->packetlength = stats->Length-scrclng;
stats->fraglength = stats->packetlength;
stats->fragoffset = 0;
stats->ntotalfrag = 1;
}
void rtl8192_rx_cmd(struct sk_buff *skb)
{
struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
struct net_device *dev = info->dev;
//int ret;
// struct urb *rx_urb = info->urb;
/* TODO */
struct ieee80211_rx_stats stats = {
.signal = 0,
.noise = -98,
.rate = 0,
// .mac_time = jiffies,
.freq = IEEE80211_24GHZ_BAND,
};
if((skb->len >=(20 + sizeof(rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE))
{
query_rx_cmdpkt_desc_status(skb,&stats);
// this is to be done by amy 080508 prfd->queue_id = 1;
//
// Process the command packet received.
//
rtl819xusb_process_received_packet(dev,&stats);
dev_kfree_skb_any(skb);
}
}
void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
{
struct sk_buff *skb;
struct rtl8192_rx_info *info;
while (NULL != (skb = skb_dequeue(&priv->skb_queue))) {
info = (struct rtl8192_rx_info *)skb->cb;
switch (info->out_pipe) {
/* Nomal packet pipe */
case 3:
//RT_TRACE(COMP_RECV, "normal in-pipe index(%d)\n",info->out_pipe);
priv->IrpPendingCount--;
rtl8192_rx_nomal(skb);
break;
/* Command packet pipe */
case 9:
RT_TRACE(COMP_RECV, "command in-pipe index(%d)\n",\
info->out_pipe);
rtl8192_rx_cmd(skb);
break;
default: /* should never get here! */
RT_TRACE(COMP_ERR, "Unknown in-pipe index(%d)\n",\
info->out_pipe);
dev_kfree_skb(skb);
break;
}
}
}
static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_open = rtl8192_open,
.ndo_stop = rtl8192_close,
.ndo_get_stats = rtl8192_stats,
.ndo_tx_timeout = tx_timeout,
.ndo_do_ioctl = rtl8192_ioctl,
.ndo_set_rx_mode = r8192_set_multicast,
.ndo_set_mac_address = r8192_set_mac_adr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_start_xmit = ieee80211_xmit,
};
/****************************************************************************
---------------------------- USB_STUFF---------------------------
*****************************************************************************/
static int rtl8192_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
// unsigned long ioaddr = 0;
struct net_device *dev = NULL;
struct r8192_priv *priv= NULL;
struct usb_device *udev = interface_to_usbdev(intf);
int ret;
RT_TRACE(COMP_INIT, "Oops: i'm coming\n");
dev = alloc_ieee80211(sizeof(struct r8192_priv));
if (dev == NULL)
return -ENOMEM;
usb_set_intfdata(intf, dev);
SET_NETDEV_DEV(dev, &intf->dev);
priv = ieee80211_priv(dev);
priv->ieee80211 = netdev_priv(dev);
priv->udev=udev;
dev->netdev_ops = &rtl8192_netdev_ops;
//DMESG("Oops: i'm coming\n");
#if WIRELESS_EXT >= 12
#if WIRELESS_EXT < 17
dev->get_wireless_stats = r8192_get_wireless_stats;
#endif
dev->wireless_handlers = (struct iw_handler_def *) &r8192_wx_handlers_def;
#endif
dev->type=ARPHRD_ETHER;
dev->watchdog_timeo = HZ*3; //modified by john, 0805
if (dev_alloc_name(dev, ifname) < 0){
RT_TRACE(COMP_INIT, "Oops: devname already taken! Trying wlan%%d...\n");
ifname = "wlan%d";
dev_alloc_name(dev, ifname);
}
RT_TRACE(COMP_INIT, "Driver probe completed1\n");
if(rtl8192_init(dev)!=0){
RT_TRACE(COMP_ERR, "Initialization failed");
ret = -ENODEV;
goto fail;
}
netif_carrier_off(dev);
netif_stop_queue(dev);
ret = register_netdev(dev);
if (ret)
goto fail2;
RT_TRACE(COMP_INIT, "dev name=======> %s\n",dev->name);
rtl8192_proc_init_one(dev);
RT_TRACE(COMP_INIT, "Driver probe completed\n");
return 0;
fail2:
rtl8192_down(dev);
kfree(priv->pFirmware);
priv->pFirmware = NULL;
rtl8192_usb_deleteendpoints(dev);
destroy_workqueue(priv->priv_wq);
mdelay(10);
fail:
free_ieee80211(dev);
RT_TRACE(COMP_ERR, "wlan driver load failed\n");
return ret;
}
//detach all the work and timer structure declared or inititialize in r8192U_init function.
void rtl8192_cancel_deferred_work(struct r8192_priv* priv)
{
cancel_work_sync(&priv->reset_wq);
cancel_delayed_work(&priv->watch_dog_wq);
cancel_delayed_work(&priv->update_beacon_wq);
cancel_work_sync(&priv->qos_activate);
//cancel_work_sync(&priv->SetBWModeWorkItem);
//cancel_work_sync(&priv->SwChnlWorkItem);
}
static void rtl8192_usb_disconnect(struct usb_interface *intf)
{
struct net_device *dev = usb_get_intfdata(intf);
struct r8192_priv *priv = ieee80211_priv(dev);
if(dev){
unregister_netdev(dev);
RT_TRACE(COMP_DOWN, "=============>wlan driver to be removed\n");
rtl8192_proc_remove_one(dev);
rtl8192_down(dev);
kfree(priv->pFirmware);
priv->pFirmware = NULL;
// priv->rf_close(dev);
// rtl8192_SetRFPowerState(dev, eRfOff);
rtl8192_usb_deleteendpoints(dev);
destroy_workqueue(priv->priv_wq);
//rtl8192_irq_disable(dev);
//rtl8192_reset(dev);
mdelay(10);
}
free_ieee80211(dev);
RT_TRACE(COMP_DOWN, "wlan driver removed\n");
}
/* fun with the built-in ieee80211 stack... */
extern int ieee80211_debug_init(void);
extern void ieee80211_debug_exit(void);
extern int ieee80211_crypto_init(void);
extern void ieee80211_crypto_deinit(void);
extern int ieee80211_crypto_tkip_init(void);
extern void ieee80211_crypto_tkip_exit(void);
extern int ieee80211_crypto_ccmp_init(void);
extern void ieee80211_crypto_ccmp_exit(void);
extern int ieee80211_crypto_wep_init(void);
extern void ieee80211_crypto_wep_exit(void);
static int __init rtl8192_usb_module_init(void)
{
int ret;
#ifdef CONFIG_IEEE80211_DEBUG
ret = ieee80211_debug_init();
if (ret) {
printk(KERN_ERR "ieee80211_debug_init() failed %d\n", ret);
return ret;
}
#endif
ret = ieee80211_crypto_init();
if (ret) {
printk(KERN_ERR "ieee80211_crypto_init() failed %d\n", ret);
return ret;
}
ret = ieee80211_crypto_tkip_init();
if (ret) {
printk(KERN_ERR "ieee80211_crypto_tkip_init() failed %d\n",
ret);
return ret;
}
ret = ieee80211_crypto_ccmp_init();
if (ret) {
printk(KERN_ERR "ieee80211_crypto_ccmp_init() failed %d\n",
ret);
return ret;
}
ret = ieee80211_crypto_wep_init();
if (ret) {
printk(KERN_ERR "ieee80211_crypto_wep_init() failed %d\n", ret);
return ret;
}
printk(KERN_INFO "\nLinux kernel driver for RTL8192 based WLAN cards\n");
printk(KERN_INFO "Copyright (c) 2007-2008, Realsil Wlan\n");
RT_TRACE(COMP_INIT, "Initializing module");
RT_TRACE(COMP_INIT, "Wireless extensions version %d", WIRELESS_EXT);
rtl8192_proc_module_init();
return usb_register(&rtl8192_usb_driver);
}
static void __exit rtl8192_usb_module_exit(void)
{
usb_deregister(&rtl8192_usb_driver);
RT_TRACE(COMP_DOWN, "Exiting");
// rtl8192_proc_module_remove();
}
void rtl8192_try_wake_queue(struct net_device *dev, int pri)
{
unsigned long flags;
short enough_desc;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
spin_lock_irqsave(&priv->tx_lock,flags);
enough_desc = check_nic_enough_desc(dev,pri);
spin_unlock_irqrestore(&priv->tx_lock,flags);
if(enough_desc)
ieee80211_wake_queue(priv->ieee80211);
}
void EnableHWSecurityConfig8192(struct net_device *dev)
{
u8 SECR_value = 0x0;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct ieee80211_device* ieee = priv->ieee80211;
SECR_value = SCR_TxEncEnable | SCR_RxDecEnable;
if (((KEY_TYPE_WEP40 == ieee->pairwise_key_type) || (KEY_TYPE_WEP104 == ieee->pairwise_key_type)) && (priv->ieee80211->auth_mode != 2))
{
SECR_value |= SCR_RxUseDK;
SECR_value |= SCR_TxUseDK;
}
else if ((ieee->iw_mode == IW_MODE_ADHOC) && (ieee->pairwise_key_type & (KEY_TYPE_CCMP | KEY_TYPE_TKIP)))
{
SECR_value |= SCR_RxUseDK;
SECR_value |= SCR_TxUseDK;
}
//add HWSec active enable here.
//default using hwsec. when peer AP is in N mode only and pairwise_key_type is none_aes(which HT_IOT_ACT_PURE_N_MODE indicates it), use software security. when peer AP is in b,g,n mode mixed and pairwise_key_type is none_aes, use g mode hw security. WB on 2008.7.4
ieee->hwsec_active = 1;
if ((ieee->pHTInfo->IOTAction&HT_IOT_ACT_PURE_N_MODE) || !hwwep)//!ieee->hwsec_support) //add hwsec_support flag to totol control hw_sec on/off
{
ieee->hwsec_active = 0;
SECR_value &= ~SCR_RxDecEnable;
}
RT_TRACE(COMP_SEC,"%s:, hwsec:%d, pairwise_key:%d, SECR_value:%x\n", __FUNCTION__, \
ieee->hwsec_active, ieee->pairwise_key_type, SECR_value);
{
write_nic_byte(dev, SECR, SECR_value);//SECR_value | SCR_UseDK );
}
}
void setKey( struct net_device *dev,
u8 EntryNo,
u8 KeyIndex,
u16 KeyType,
u8 *MacAddr,
u8 DefaultKey,
u32 *KeyContent )
{
u32 TargetCommand = 0;
u32 TargetContent = 0;
u16 usConfig = 0;
u8 i;
if (EntryNo >= TOTAL_CAM_ENTRY)
RT_TRACE(COMP_ERR, "cam entry exceeds in setKey()\n");
RT_TRACE(COMP_SEC, "====>to setKey(), dev:%p, EntryNo:%d, KeyIndex:%d, KeyType:%d, MacAddr%pM\n", dev,EntryNo, KeyIndex, KeyType, MacAddr);
if (DefaultKey)
usConfig |= BIT15 | (KeyType<<2);
else
usConfig |= BIT15 | (KeyType<<2) | KeyIndex;
// usConfig |= BIT15 | (KeyType<<2) | (DefaultKey<<5) | KeyIndex;
for(i=0 ; i<CAM_CONTENT_COUNT; i++){
TargetCommand = i+CAM_CONTENT_COUNT*EntryNo;
TargetCommand |= BIT31|BIT16;
if(i==0){//MAC|Config
TargetContent = (u32)(*(MacAddr+0)) << 16|
(u32)(*(MacAddr+1)) << 24|
(u32)usConfig;
write_nic_dword(dev, WCAMI, TargetContent);
write_nic_dword(dev, RWCAM, TargetCommand);
// printk("setkey cam =%8x\n", read_cam(dev, i+6*EntryNo));
}
else if(i==1){//MAC
TargetContent = (u32)(*(MacAddr+2)) |
(u32)(*(MacAddr+3)) << 8|
(u32)(*(MacAddr+4)) << 16|
(u32)(*(MacAddr+5)) << 24;
write_nic_dword(dev, WCAMI, TargetContent);
write_nic_dword(dev, RWCAM, TargetCommand);
}
else {
//Key Material
if(KeyContent !=NULL){
write_nic_dword(dev, WCAMI, (u32)(*(KeyContent+i-2)) );
write_nic_dword(dev, RWCAM, TargetCommand);
}
}
}
}
/***************************************************************************
------------------- module init / exit stubs ----------------
****************************************************************************/
module_init(rtl8192_usb_module_init);
module_exit(rtl8192_usb_module_exit);
| gpl-2.0 |
rocky-luo/linux-kernel | fs/ufs/namei.c | 2335 | 7740 | /*
* linux/fs/ufs/namei.c
*
* Migration to usage of "page cache" on May 2006 by
* Evgeniy Dushistov <dushistov@mail.ru> based on ext2 code base.
*
* Copyright (C) 1998
* Daniel Pirkl <daniel.pirkl@email.cz>
* Charles University, Faculty of Mathematics and Physics
*
* from
*
* linux/fs/ext2/namei.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/namei.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
*/
#include <linux/time.h>
#include <linux/fs.h>
#include "ufs_fs.h"
#include "ufs.h"
#include "util.h"
static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
{
int err = ufs_add_link(dentry, inode);
if (!err) {
d_instantiate(dentry, inode);
return 0;
}
inode_dec_link_count(inode);
iput(inode);
return err;
}
static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
{
struct inode * inode = NULL;
ino_t ino;
if (dentry->d_name.len > UFS_MAXNAMLEN)
return ERR_PTR(-ENAMETOOLONG);
lock_ufs(dir->i_sb);
ino = ufs_inode_by_name(dir, &dentry->d_name);
if (ino)
inode = ufs_iget(dir->i_sb, ino);
unlock_ufs(dir->i_sb);
if (IS_ERR(inode))
return ERR_CAST(inode);
return d_splice_alias(inode, dentry);
}
/*
* By the time this is called, we already have created
* the directory cache entry for the new file, but it
* is so far negative - it has no inode.
*
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
static int ufs_create (struct inode * dir, struct dentry * dentry, int mode,
struct nameidata *nd)
{
struct inode *inode;
int err;
UFSD("BEGIN\n");
inode = ufs_new_inode(dir, mode);
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
inode->i_op = &ufs_file_inode_operations;
inode->i_fop = &ufs_file_operations;
inode->i_mapping->a_ops = &ufs_aops;
mark_inode_dirty(inode);
lock_ufs(dir->i_sb);
err = ufs_add_nondir(dentry, inode);
unlock_ufs(dir->i_sb);
}
UFSD("END: err=%d\n", err);
return err;
}
static int ufs_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t rdev)
{
struct inode *inode;
int err;
if (!old_valid_dev(rdev))
return -EINVAL;
inode = ufs_new_inode(dir, mode);
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
init_special_inode(inode, mode, rdev);
ufs_set_inode_dev(inode->i_sb, UFS_I(inode), rdev);
mark_inode_dirty(inode);
lock_ufs(dir->i_sb);
err = ufs_add_nondir(dentry, inode);
unlock_ufs(dir->i_sb);
}
return err;
}
static int ufs_symlink (struct inode * dir, struct dentry * dentry,
const char * symname)
{
struct super_block * sb = dir->i_sb;
int err = -ENAMETOOLONG;
unsigned l = strlen(symname)+1;
struct inode * inode;
if (l > sb->s_blocksize)
goto out_notlocked;
lock_ufs(dir->i_sb);
inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out;
if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
/* slow symlink */
inode->i_op = &ufs_symlink_inode_operations;
inode->i_mapping->a_ops = &ufs_aops;
err = page_symlink(inode, symname, l);
if (err)
goto out_fail;
} else {
/* fast symlink */
inode->i_op = &ufs_fast_symlink_inode_operations;
memcpy(UFS_I(inode)->i_u1.i_symlink, symname, l);
inode->i_size = l-1;
}
mark_inode_dirty(inode);
err = ufs_add_nondir(dentry, inode);
out:
unlock_ufs(dir->i_sb);
out_notlocked:
return err;
out_fail:
inode_dec_link_count(inode);
iput(inode);
goto out;
}
static int ufs_link (struct dentry * old_dentry, struct inode * dir,
struct dentry *dentry)
{
struct inode *inode = old_dentry->d_inode;
int error;
lock_ufs(dir->i_sb);
if (inode->i_nlink >= UFS_LINK_MAX) {
unlock_ufs(dir->i_sb);
return -EMLINK;
}
inode->i_ctime = CURRENT_TIME_SEC;
inode_inc_link_count(inode);
ihold(inode);
error = ufs_add_nondir(dentry, inode);
unlock_ufs(dir->i_sb);
return error;
}
static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
{
struct inode * inode;
int err = -EMLINK;
if (dir->i_nlink >= UFS_LINK_MAX)
goto out;
lock_ufs(dir->i_sb);
inode_inc_link_count(dir);
inode = ufs_new_inode(dir, S_IFDIR|mode);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_dir;
inode->i_op = &ufs_dir_inode_operations;
inode->i_fop = &ufs_dir_operations;
inode->i_mapping->a_ops = &ufs_aops;
inode_inc_link_count(inode);
err = ufs_make_empty(inode, dir);
if (err)
goto out_fail;
err = ufs_add_link(dentry, inode);
if (err)
goto out_fail;
unlock_ufs(dir->i_sb);
d_instantiate(dentry, inode);
out:
return err;
out_fail:
inode_dec_link_count(inode);
inode_dec_link_count(inode);
iput (inode);
out_dir:
inode_dec_link_count(dir);
unlock_ufs(dir->i_sb);
goto out;
}
static int ufs_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode * inode = dentry->d_inode;
struct ufs_dir_entry *de;
struct page *page;
int err = -ENOENT;
de = ufs_find_entry(dir, &dentry->d_name, &page);
if (!de)
goto out;
err = ufs_delete_entry(dir, de, page);
if (err)
goto out;
inode->i_ctime = dir->i_ctime;
inode_dec_link_count(inode);
err = 0;
out:
return err;
}
static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
{
struct inode * inode = dentry->d_inode;
int err= -ENOTEMPTY;
lock_ufs(dir->i_sb);
if (ufs_empty_dir (inode)) {
err = ufs_unlink(dir, dentry);
if (!err) {
inode->i_size = 0;
inode_dec_link_count(inode);
inode_dec_link_count(dir);
}
}
unlock_ufs(dir->i_sb);
return err;
}
static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct inode *old_inode = old_dentry->d_inode;
struct inode *new_inode = new_dentry->d_inode;
struct page *dir_page = NULL;
struct ufs_dir_entry * dir_de = NULL;
struct page *old_page;
struct ufs_dir_entry *old_de;
int err = -ENOENT;
old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_de)
goto out;
if (S_ISDIR(old_inode->i_mode)) {
err = -EIO;
dir_de = ufs_dotdot(old_inode, &dir_page);
if (!dir_de)
goto out_old;
}
if (new_inode) {
struct page *new_page;
struct ufs_dir_entry *new_de;
err = -ENOTEMPTY;
if (dir_de && !ufs_empty_dir(new_inode))
goto out_dir;
err = -ENOENT;
new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
if (!new_de)
goto out_dir;
ufs_set_link(new_dir, new_de, new_page, old_inode);
new_inode->i_ctime = CURRENT_TIME_SEC;
if (dir_de)
drop_nlink(new_inode);
inode_dec_link_count(new_inode);
} else {
if (dir_de) {
err = -EMLINK;
if (new_dir->i_nlink >= UFS_LINK_MAX)
goto out_dir;
}
err = ufs_add_link(new_dentry, old_inode);
if (err)
goto out_dir;
if (dir_de)
inode_inc_link_count(new_dir);
}
/*
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
old_inode->i_ctime = CURRENT_TIME_SEC;
ufs_delete_entry(old_dir, old_de, old_page);
mark_inode_dirty(old_inode);
if (dir_de) {
ufs_set_link(old_inode, dir_de, dir_page, new_dir);
inode_dec_link_count(old_dir);
}
return 0;
out_dir:
if (dir_de) {
kunmap(dir_page);
page_cache_release(dir_page);
}
out_old:
kunmap(old_page);
page_cache_release(old_page);
out:
return err;
}
const struct inode_operations ufs_dir_inode_operations = {
.create = ufs_create,
.lookup = ufs_lookup,
.link = ufs_link,
.unlink = ufs_unlink,
.symlink = ufs_symlink,
.mkdir = ufs_mkdir,
.rmdir = ufs_rmdir,
.mknod = ufs_mknod,
.rename = ufs_rename,
};
| gpl-2.0 |
lani11/Potsy_Kernel | drivers/s390/cio/qdio_thinint.c | 2335 | 6477 | /*
* linux/drivers/s390/cio/thinint_qdio.c
*
* Copyright 2000,2009 IBM Corp.
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/kernel_stat.h>
#include <asm/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
#include <asm/airq.h>
#include <asm/isc.h>
#include "cio.h"
#include "ioasm.h"
#include "qdio.h"
#include "qdio_debug.h"
/*
* Restriction: only 63 iqdio subchannels would have its own indicator,
* after that, subsequent subchannels share one indicator
*/
#define TIQDIO_NR_NONSHARED_IND 63
#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
/* list of thin interrupt input queues */
static LIST_HEAD(tiq_list);
DEFINE_MUTEX(tiq_list_lock);
/* adapter local summary indicator */
static u8 *tiqdio_alsi;
struct indicator_t *q_indicators;
static u64 last_ai_time;
/* returns addr for the device state change indicator */
static u32 *get_indicator(void)
{
int i;
for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
if (!atomic_read(&q_indicators[i].count)) {
atomic_set(&q_indicators[i].count, 1);
return &q_indicators[i].ind;
}
/* use the shared indicator */
atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
return &q_indicators[TIQDIO_SHARED_IND].ind;
}
static void put_indicator(u32 *addr)
{
int i;
if (!addr)
return;
i = ((unsigned long)addr - (unsigned long)q_indicators) /
sizeof(struct indicator_t);
atomic_dec(&q_indicators[i].count);
}
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
mutex_lock(&tiq_list_lock);
for_each_input_queue(irq_ptr, q, i)
list_add_rcu(&q->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
xchg(irq_ptr->dsci, 1 << 7);
}
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
for (i = 0; i < irq_ptr->nr_input_qs; i++) {
q = irq_ptr->input_qs[i];
/* if establish triggered an error */
if (!q || !q->entry.prev || !q->entry.next)
continue;
mutex_lock(&tiq_list_lock);
list_del_rcu(&q->entry);
mutex_unlock(&tiq_list_lock);
synchronize_rcu();
}
}
static inline u32 clear_shared_ind(void)
{
if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count))
return 0;
return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
}
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @alsi: pointer to adapter local summary indicator
* @data: NULL
*/
static void tiqdio_thinint_handler(void *alsi, void *data)
{
u32 si_used = clear_shared_ind();
struct qdio_q *q;
last_ai_time = S390_lowcore.int_clock;
kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++;
/* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock();
/* check for work on all inbound thinint queues */
list_for_each_entry_rcu(q, &tiq_list, entry) {
/* only process queues from changed sets */
if (unlikely(shared_ind(q->irq_ptr->dsci))) {
if (!si_used)
continue;
} else if (!*q->irq_ptr->dsci)
continue;
if (q->u.in.queue_start_poll) {
/* skip if polling is enabled or already in work */
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state)) {
qperf_inc(q, int_discarded);
continue;
}
/* avoid dsci clear here, done after processing */
q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
q->irq_ptr->int_parm);
} else {
/* only clear it if the indicator is non-shared */
if (!shared_ind(q->irq_ptr->dsci))
xchg(q->irq_ptr->dsci, 0);
/*
* Call inbound processing but not directly
* since that could starve other thinint queues.
*/
tasklet_schedule(&q->tasklet);
}
qperf_inc(q, adapter_int);
}
rcu_read_unlock();
}
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
{
struct scssc_area *scssc_area;
int rc;
scssc_area = (struct scssc_area *)irq_ptr->chsc_page;
memset(scssc_area, 0, PAGE_SIZE);
if (reset) {
scssc_area->summary_indicator_addr = 0;
scssc_area->subchannel_indicator_addr = 0;
} else {
scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi);
scssc_area->subchannel_indicator_addr =
virt_to_phys(irq_ptr->dsci);
}
scssc_area->request = (struct chsc_header) {
.length = 0x0fe0,
.code = 0x0021,
};
scssc_area->operation_code = 0;
scssc_area->ks = PAGE_DEFAULT_KEY >> 4;
scssc_area->kc = PAGE_DEFAULT_KEY >> 4;
scssc_area->isc = QDIO_AIRQ_ISC;
scssc_area->schid = irq_ptr->schid;
/* enable the time delay disablement facility */
if (css_general_characteristics.aif_tdd)
scssc_area->word_with_d_bit = 0x10000000;
rc = chsc(scssc_area);
if (rc)
return -EIO;
rc = chsc_error_from_response(scssc_area->response.code);
if (rc) {
DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
scssc_area->response.code);
DBF_ERROR_HEX(&scssc_area->response, sizeof(void *));
return rc;
}
DBF_EVENT("setscind");
DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long));
DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long));
return 0;
}
/* allocate non-shared indicators and shared indicator */
int __init tiqdio_allocate_memory(void)
{
q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
GFP_KERNEL);
if (!q_indicators)
return -ENOMEM;
return 0;
}
void tiqdio_free_memory(void)
{
kfree(q_indicators);
}
int __init tiqdio_register_thinints(void)
{
isc_register(QDIO_AIRQ_ISC);
tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler,
NULL, QDIO_AIRQ_ISC);
if (IS_ERR(tiqdio_alsi)) {
DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi));
tiqdio_alsi = NULL;
isc_unregister(QDIO_AIRQ_ISC);
return -ENOMEM;
}
return 0;
}
int qdio_establish_thinint(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return 0;
return set_subchannel_ind(irq_ptr, 0);
}
void qdio_setup_thinint(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return;
irq_ptr->dsci = get_indicator();
DBF_HEX(&irq_ptr->dsci, sizeof(void *));
}
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return;
/* reset adapter interrupt indicators */
set_subchannel_ind(irq_ptr, 1);
put_indicator(irq_ptr->dsci);
}
void __exit tiqdio_unregister_thinints(void)
{
WARN_ON(!list_empty(&tiq_list));
if (tiqdio_alsi) {
s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
isc_unregister(QDIO_AIRQ_ISC);
}
}
| gpl-2.0 |
1N4148/kernel_golden | fs/attr.c | 2335 | 6938 | /*
* linux/fs/attr.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* changes by Thomas Schoebel-Theuer
*/
#include <linux/module.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/capability.h>
#include <linux/fsnotify.h>
#include <linux/fcntl.h>
#include <linux/security.h>
/**
* inode_change_ok - check if attribute changes to an inode are allowed
* @inode: inode to check
* @attr: attributes to change
*
* Check if we are allowed to change the attributes contained in @attr
* in the given inode. This includes the normal unix access permission
* checks, as well as checks for rlimits and others.
*
* Should be called as the first thing in ->setattr implementations,
* possibly after taking additional locks.
*/
int inode_change_ok(const struct inode *inode, struct iattr *attr)
{
unsigned int ia_valid = attr->ia_valid;
/*
* First check size constraints. These can't be overriden using
* ATTR_FORCE.
*/
if (ia_valid & ATTR_SIZE) {
int error = inode_newsize_ok(inode, attr->ia_size);
if (error)
return error;
}
/* If force is set do it anyway. */
if (ia_valid & ATTR_FORCE)
return 0;
/* Make sure a caller can chown. */
if ((ia_valid & ATTR_UID) &&
(current_fsuid() != inode->i_uid ||
attr->ia_uid != inode->i_uid) && !capable(CAP_CHOWN))
return -EPERM;
/* Make sure caller can chgrp. */
if ((ia_valid & ATTR_GID) &&
(current_fsuid() != inode->i_uid ||
(!in_group_p(attr->ia_gid) && attr->ia_gid != inode->i_gid)) &&
!capable(CAP_CHOWN))
return -EPERM;
/* Make sure a caller can chmod. */
if (ia_valid & ATTR_MODE) {
if (!inode_owner_or_capable(inode))
return -EPERM;
/* Also check the setgid bit! */
if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
inode->i_gid) && !capable(CAP_FSETID))
attr->ia_mode &= ~S_ISGID;
}
/* Check for setting the inode time. */
if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) {
if (!inode_owner_or_capable(inode))
return -EPERM;
}
return 0;
}
EXPORT_SYMBOL(inode_change_ok);
/**
* inode_newsize_ok - may this inode be truncated to a given size
* @inode: the inode to be truncated
* @offset: the new size to assign to the inode
* @Returns: 0 on success, -ve errno on failure
*
* inode_newsize_ok must be called with i_mutex held.
*
* inode_newsize_ok will check filesystem limits and ulimits to check that the
* new inode size is within limits. inode_newsize_ok will also send SIGXFSZ
* when necessary. Caller must not proceed with inode size change if failure is
* returned. @inode must be a file (not directory), with appropriate
* permissions to allow truncate (inode_newsize_ok does NOT check these
* conditions).
*/
int inode_newsize_ok(const struct inode *inode, loff_t offset)
{
if (inode->i_size < offset) {
unsigned long limit;
limit = rlimit(RLIMIT_FSIZE);
if (limit != RLIM_INFINITY && offset > limit)
goto out_sig;
if (offset > inode->i_sb->s_maxbytes)
goto out_big;
} else {
/*
* truncation of in-use swapfiles is disallowed - it would
* cause subsequent swapout to scribble on the now-freed
* blocks.
*/
if (IS_SWAPFILE(inode))
return -ETXTBSY;
}
return 0;
out_sig:
send_sig(SIGXFSZ, current, 0);
out_big:
return -EFBIG;
}
EXPORT_SYMBOL(inode_newsize_ok);
/**
* setattr_copy - copy simple metadata updates into the generic inode
* @inode: the inode to be updated
* @attr: the new attributes
*
* setattr_copy must be called with i_mutex held.
*
* setattr_copy updates the inode's metadata with that specified
* in attr. Noticeably missing is inode size update, which is more complex
* as it requires pagecache updates.
*
* The inode is not marked as dirty after this operation. The rationale is
* that for "simple" filesystems, the struct inode is the inode storage.
* The caller is free to mark the inode dirty afterwards if needed.
*/
void setattr_copy(struct inode *inode, const struct iattr *attr)
{
unsigned int ia_valid = attr->ia_valid;
if (ia_valid & ATTR_UID)
inode->i_uid = attr->ia_uid;
if (ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
if (ia_valid & ATTR_ATIME)
inode->i_atime = timespec_trunc(attr->ia_atime,
inode->i_sb->s_time_gran);
if (ia_valid & ATTR_MTIME)
inode->i_mtime = timespec_trunc(attr->ia_mtime,
inode->i_sb->s_time_gran);
if (ia_valid & ATTR_CTIME)
inode->i_ctime = timespec_trunc(attr->ia_ctime,
inode->i_sb->s_time_gran);
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
mode &= ~S_ISGID;
inode->i_mode = mode;
}
}
EXPORT_SYMBOL(setattr_copy);
int notify_change(struct dentry * dentry, struct iattr * attr)
{
struct inode *inode = dentry->d_inode;
mode_t mode = inode->i_mode;
int error;
struct timespec now;
unsigned int ia_valid = attr->ia_valid;
if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)) {
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return -EPERM;
}
if ((ia_valid & ATTR_MODE)) {
mode_t amode = attr->ia_mode;
/* Flag setting protected by i_mutex */
if (is_sxid(amode))
inode->i_flags &= ~S_NOSEC;
}
now = current_fs_time(inode->i_sb);
attr->ia_ctime = now;
if (!(ia_valid & ATTR_ATIME_SET))
attr->ia_atime = now;
if (!(ia_valid & ATTR_MTIME_SET))
attr->ia_mtime = now;
if (ia_valid & ATTR_KILL_PRIV) {
attr->ia_valid &= ~ATTR_KILL_PRIV;
ia_valid &= ~ATTR_KILL_PRIV;
error = security_inode_need_killpriv(dentry);
if (error > 0)
error = security_inode_killpriv(dentry);
if (error)
return error;
}
/*
* We now pass ATTR_KILL_S*ID to the lower level setattr function so
* that the function has the ability to reinterpret a mode change
* that's due to these bits. This adds an implicit restriction that
* no function will ever call notify_change with both ATTR_MODE and
* ATTR_KILL_S*ID set.
*/
if ((ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) &&
(ia_valid & ATTR_MODE))
BUG();
if (ia_valid & ATTR_KILL_SUID) {
if (mode & S_ISUID) {
ia_valid = attr->ia_valid |= ATTR_MODE;
attr->ia_mode = (inode->i_mode & ~S_ISUID);
}
}
if (ia_valid & ATTR_KILL_SGID) {
if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
if (!(ia_valid & ATTR_MODE)) {
ia_valid = attr->ia_valid |= ATTR_MODE;
attr->ia_mode = inode->i_mode;
}
attr->ia_mode &= ~S_ISGID;
}
}
if (!(attr->ia_valid & ~(ATTR_KILL_SUID | ATTR_KILL_SGID)))
return 0;
error = security_inode_setattr(dentry, attr);
if (error)
return error;
if (ia_valid & ATTR_SIZE)
down_write(&dentry->d_inode->i_alloc_sem);
if (inode->i_op->setattr)
error = inode->i_op->setattr(dentry, attr);
else
error = simple_setattr(dentry, attr);
if (ia_valid & ATTR_SIZE)
up_write(&dentry->d_inode->i_alloc_sem);
if (!error)
fsnotify_change(dentry, ia_valid);
return error;
}
EXPORT_SYMBOL(notify_change);
| gpl-2.0 |
Supermaster34/3.0-Kernel-Galaxy-Player-US | drivers/net/enic/vnic_wq.c | 3359 | 4872 | /*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "vnic_dev.h"
#include "vnic_wq.h"
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
struct vnic_wq_buf *buf;
struct vnic_dev *vdev;
unsigned int i, j, count = wq->ring.desc_count;
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
vdev = wq->vdev;
for (i = 0; i < blks; i++) {
wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
if (!wq->bufs[i]) {
pr_err("Failed to alloc wq_bufs\n");
return -ENOMEM;
}
}
for (i = 0; i < blks; i++) {
buf = wq->bufs[i];
for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) {
buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j;
buf->desc = (u8 *)wq->ring.descs +
wq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = wq->bufs[0];
break;
} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
buf->next = wq->bufs[i + 1];
} else {
buf->next = buf + 1;
buf++;
}
}
}
wq->to_use = wq->to_clean = wq->bufs[0];
return 0;
}
void vnic_wq_free(struct vnic_wq *wq)
{
struct vnic_dev *vdev;
unsigned int i;
vdev = wq->vdev;
vnic_dev_free_desc_ring(vdev, &wq->ring);
for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
if (wq->bufs[i]) {
kfree(wq->bufs[i]);
wq->bufs[i] = NULL;
}
}
wq->ctrl = NULL;
}
int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
wq->index = index;
wq->vdev = vdev;
wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
if (!wq->ctrl) {
pr_err("Failed to hook WQ[%d] resource\n", index);
return -EINVAL;
}
vnic_wq_disable(wq);
err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
if (err)
return err;
err = vnic_wq_alloc_bufs(wq);
if (err) {
vnic_wq_free(wq);
return err;
}
return 0;
}
static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
unsigned int fetch_index, unsigned int posted_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
unsigned int count = wq->ring.desc_count;
paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &wq->ctrl->ring_base);
iowrite32(count, &wq->ctrl->ring_size);
iowrite32(fetch_index, &wq->ctrl->fetch_index);
iowrite32(posted_index, &wq->ctrl->posted_index);
iowrite32(cq_index, &wq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
iowrite32(0, &wq->ctrl->error_status);
wq->to_use = wq->to_clean =
&wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
[fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
}
void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
vnic_wq_init_start(wq, cq_index, 0, 0,
error_interrupt_enable,
error_interrupt_offset);
}
unsigned int vnic_wq_error_status(struct vnic_wq *wq)
{
return ioread32(&wq->ctrl->error_status);
}
void vnic_wq_enable(struct vnic_wq *wq)
{
iowrite32(1, &wq->ctrl->enable);
}
int vnic_wq_disable(struct vnic_wq *wq)
{
unsigned int wait;
iowrite32(0, &wq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 0; wait < 1000; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
udelay(10);
}
pr_err("Failed to disable WQ[%d]\n", wq->index);
return -ETIMEDOUT;
}
void vnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
{
struct vnic_wq_buf *buf;
buf = wq->to_clean;
while (vnic_wq_desc_used(wq) > 0) {
(*buf_clean)(wq, buf);
buf = wq->to_clean = buf->next;
wq->ring.desc_avail++;
}
wq->to_use = wq->to_clean = wq->bufs[0];
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(0, &wq->ctrl->error_status);
vnic_dev_clear_desc_ring(&wq->ring);
}
| gpl-2.0 |
Jazz-823/kernel_lge_hammerhead_CM | net/ceph/osd_client.c | 3359 | 57236 | #include <linux/ceph/ceph_debug.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#ifdef CONFIG_BLOCK
#include <linux/bio.h>
#endif
#include <linux/ceph/libceph.h>
#include <linux/ceph/osd_client.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/auth.h>
#include <linux/ceph/pagelist.h>
#define OSD_OP_FRONT_LEN 4096
#define OSD_OPREPLY_FRONT_LEN 512
static const struct ceph_connection_operations osd_con_ops;
static void send_queued(struct ceph_osd_client *osdc);
static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
static void __register_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
static void __unregister_linger_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
static void __send_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
static int op_needs_trail(int op)
{
switch (op) {
case CEPH_OSD_OP_GETXATTR:
case CEPH_OSD_OP_SETXATTR:
case CEPH_OSD_OP_CMPXATTR:
case CEPH_OSD_OP_CALL:
case CEPH_OSD_OP_NOTIFY:
return 1;
default:
return 0;
}
}
static int op_has_extent(int op)
{
return (op == CEPH_OSD_OP_READ ||
op == CEPH_OSD_OP_WRITE);
}
void ceph_calc_raw_layout(struct ceph_osd_client *osdc,
struct ceph_file_layout *layout,
u64 snapid,
u64 off, u64 *plen, u64 *bno,
struct ceph_osd_request *req,
struct ceph_osd_req_op *op)
{
struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
u64 orig_len = *plen;
u64 objoff, objlen; /* extent in object */
reqhead->snapid = cpu_to_le64(snapid);
/* object extent? */
ceph_calc_file_object_mapping(layout, off, plen, bno,
&objoff, &objlen);
if (*plen < orig_len)
dout(" skipping last %llu, final file extent %llu~%llu\n",
orig_len - *plen, off, *plen);
if (op_has_extent(op->op)) {
op->extent.offset = objoff;
op->extent.length = objlen;
}
req->r_num_pages = calc_pages_for(off, *plen);
req->r_page_alignment = off & ~PAGE_MASK;
if (op->op == CEPH_OSD_OP_WRITE)
op->payload_len = *plen;
dout("calc_layout bno=%llx %llu~%llu (%d pages)\n",
*bno, objoff, objlen, req->r_num_pages);
}
EXPORT_SYMBOL(ceph_calc_raw_layout);
/*
* Implement client access to distributed object storage cluster.
*
* All data objects are stored within a cluster/cloud of OSDs, or
* "object storage devices." (Note that Ceph OSDs have _nothing_ to
* do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
* remote daemons serving up and coordinating consistent and safe
* access to storage.
*
* Cluster membership and the mapping of data objects onto storage devices
* are described by the osd map.
*
* We keep track of pending OSD requests (read, write), resubmit
* requests to different OSDs when the cluster topology/data layout
* change, or retry the affected requests when the communications
* channel with an OSD is reset.
*/
/*
* calculate the mapping of a file extent onto an object, and fill out the
* request accordingly. shorten extent as necessary if it crosses an
* object boundary.
*
* fill osd op in request message.
*/
static void calc_layout(struct ceph_osd_client *osdc,
struct ceph_vino vino,
struct ceph_file_layout *layout,
u64 off, u64 *plen,
struct ceph_osd_request *req,
struct ceph_osd_req_op *op)
{
u64 bno;
ceph_calc_raw_layout(osdc, layout, vino.snap, off,
plen, &bno, req, op);
snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno);
req->r_oid_len = strlen(req->r_oid);
}
/*
* requests
*/
void ceph_osdc_release_request(struct kref *kref)
{
struct ceph_osd_request *req = container_of(kref,
struct ceph_osd_request,
r_kref);
if (req->r_request)
ceph_msg_put(req->r_request);
if (req->r_reply)
ceph_msg_put(req->r_reply);
if (req->r_con_filling_msg) {
dout("release_request revoking pages %p from con %p\n",
req->r_pages, req->r_con_filling_msg);
ceph_con_revoke_message(req->r_con_filling_msg,
req->r_reply);
ceph_con_put(req->r_con_filling_msg);
}
if (req->r_own_pages)
ceph_release_page_vector(req->r_pages,
req->r_num_pages);
#ifdef CONFIG_BLOCK
if (req->r_bio)
bio_put(req->r_bio);
#endif
ceph_put_snap_context(req->r_snapc);
if (req->r_trail) {
ceph_pagelist_release(req->r_trail);
kfree(req->r_trail);
}
if (req->r_mempool)
mempool_free(req, req->r_osdc->req_mempool);
else
kfree(req);
}
EXPORT_SYMBOL(ceph_osdc_release_request);
static int get_num_ops(struct ceph_osd_req_op *ops, int *needs_trail)
{
int i = 0;
if (needs_trail)
*needs_trail = 0;
while (ops[i].op) {
if (needs_trail && op_needs_trail(ops[i].op))
*needs_trail = 1;
i++;
}
return i;
}
struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
int flags,
struct ceph_snap_context *snapc,
struct ceph_osd_req_op *ops,
bool use_mempool,
gfp_t gfp_flags,
struct page **pages,
struct bio *bio)
{
struct ceph_osd_request *req;
struct ceph_msg *msg;
int needs_trail;
int num_op = get_num_ops(ops, &needs_trail);
size_t msg_size = sizeof(struct ceph_osd_request_head);
msg_size += num_op*sizeof(struct ceph_osd_op);
if (use_mempool) {
req = mempool_alloc(osdc->req_mempool, gfp_flags);
memset(req, 0, sizeof(*req));
} else {
req = kzalloc(sizeof(*req), gfp_flags);
}
if (req == NULL)
return NULL;
req->r_osdc = osdc;
req->r_mempool = use_mempool;
kref_init(&req->r_kref);
init_completion(&req->r_completion);
init_completion(&req->r_safe_completion);
INIT_LIST_HEAD(&req->r_unsafe_item);
INIT_LIST_HEAD(&req->r_linger_item);
INIT_LIST_HEAD(&req->r_linger_osd);
INIT_LIST_HEAD(&req->r_req_lru_item);
req->r_flags = flags;
WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
/* create reply message */
if (use_mempool)
msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
else
msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
if (!msg) {
ceph_osdc_put_request(req);
return NULL;
}
req->r_reply = msg;
/* allocate space for the trailing data */
if (needs_trail) {
req->r_trail = kmalloc(sizeof(struct ceph_pagelist), gfp_flags);
if (!req->r_trail) {
ceph_osdc_put_request(req);
return NULL;
}
ceph_pagelist_init(req->r_trail);
}
/* create request message; allow space for oid */
msg_size += MAX_OBJ_NAME_SIZE;
if (snapc)
msg_size += sizeof(u64) * snapc->num_snaps;
if (use_mempool)
msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
else
msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
if (!msg) {
ceph_osdc_put_request(req);
return NULL;
}
msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
memset(msg->front.iov_base, 0, msg->front.iov_len);
req->r_request = msg;
req->r_pages = pages;
#ifdef CONFIG_BLOCK
if (bio) {
req->r_bio = bio;
bio_get(req->r_bio);
}
#endif
return req;
}
EXPORT_SYMBOL(ceph_osdc_alloc_request);
static void osd_req_encode_op(struct ceph_osd_request *req,
struct ceph_osd_op *dst,
struct ceph_osd_req_op *src)
{
dst->op = cpu_to_le16(src->op);
switch (dst->op) {
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_WRITE:
dst->extent.offset =
cpu_to_le64(src->extent.offset);
dst->extent.length =
cpu_to_le64(src->extent.length);
dst->extent.truncate_size =
cpu_to_le64(src->extent.truncate_size);
dst->extent.truncate_seq =
cpu_to_le32(src->extent.truncate_seq);
break;
case CEPH_OSD_OP_GETXATTR:
case CEPH_OSD_OP_SETXATTR:
case CEPH_OSD_OP_CMPXATTR:
BUG_ON(!req->r_trail);
dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
dst->xattr.cmp_op = src->xattr.cmp_op;
dst->xattr.cmp_mode = src->xattr.cmp_mode;
ceph_pagelist_append(req->r_trail, src->xattr.name,
src->xattr.name_len);
ceph_pagelist_append(req->r_trail, src->xattr.val,
src->xattr.value_len);
break;
case CEPH_OSD_OP_CALL:
BUG_ON(!req->r_trail);
dst->cls.class_len = src->cls.class_len;
dst->cls.method_len = src->cls.method_len;
dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
ceph_pagelist_append(req->r_trail, src->cls.class_name,
src->cls.class_len);
ceph_pagelist_append(req->r_trail, src->cls.method_name,
src->cls.method_len);
ceph_pagelist_append(req->r_trail, src->cls.indata,
src->cls.indata_len);
break;
case CEPH_OSD_OP_ROLLBACK:
dst->snap.snapid = cpu_to_le64(src->snap.snapid);
break;
case CEPH_OSD_OP_STARTSYNC:
break;
case CEPH_OSD_OP_NOTIFY:
{
__le32 prot_ver = cpu_to_le32(src->watch.prot_ver);
__le32 timeout = cpu_to_le32(src->watch.timeout);
BUG_ON(!req->r_trail);
ceph_pagelist_append(req->r_trail,
&prot_ver, sizeof(prot_ver));
ceph_pagelist_append(req->r_trail,
&timeout, sizeof(timeout));
}
case CEPH_OSD_OP_NOTIFY_ACK:
case CEPH_OSD_OP_WATCH:
dst->watch.cookie = cpu_to_le64(src->watch.cookie);
dst->watch.ver = cpu_to_le64(src->watch.ver);
dst->watch.flag = src->watch.flag;
break;
default:
pr_err("unrecognized osd opcode %d\n", dst->op);
WARN_ON(1);
break;
}
dst->payload_len = cpu_to_le32(src->payload_len);
}
/*
* build new request AND message
*
*/
void ceph_osdc_build_request(struct ceph_osd_request *req,
u64 off, u64 *plen,
struct ceph_osd_req_op *src_ops,
struct ceph_snap_context *snapc,
struct timespec *mtime,
const char *oid,
int oid_len)
{
struct ceph_msg *msg = req->r_request;
struct ceph_osd_request_head *head;
struct ceph_osd_req_op *src_op;
struct ceph_osd_op *op;
void *p;
int num_op = get_num_ops(src_ops, NULL);
size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
int flags = req->r_flags;
u64 data_len = 0;
int i;
head = msg->front.iov_base;
op = (void *)(head + 1);
p = (void *)(op + num_op);
req->r_snapc = ceph_get_snap_context(snapc);
head->client_inc = cpu_to_le32(1); /* always, for now. */
head->flags = cpu_to_le32(flags);
if (flags & CEPH_OSD_FLAG_WRITE)
ceph_encode_timespec(&head->mtime, mtime);
head->num_ops = cpu_to_le16(num_op);
/* fill in oid */
head->object_len = cpu_to_le32(oid_len);
memcpy(p, oid, oid_len);
p += oid_len;
src_op = src_ops;
while (src_op->op) {
osd_req_encode_op(req, op, src_op);
src_op++;
op++;
}
if (req->r_trail)
data_len += req->r_trail->length;
if (snapc) {
head->snap_seq = cpu_to_le64(snapc->seq);
head->num_snaps = cpu_to_le32(snapc->num_snaps);
for (i = 0; i < snapc->num_snaps; i++) {
put_unaligned_le64(snapc->snaps[i], p);
p += sizeof(u64);
}
}
if (flags & CEPH_OSD_FLAG_WRITE) {
req->r_request->hdr.data_off = cpu_to_le16(off);
req->r_request->hdr.data_len = cpu_to_le32(*plen + data_len);
} else if (data_len) {
req->r_request->hdr.data_off = 0;
req->r_request->hdr.data_len = cpu_to_le32(data_len);
}
req->r_request->page_alignment = req->r_page_alignment;
BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
msg_size = p - msg->front.iov_base;
msg->front.iov_len = msg_size;
msg->hdr.front_len = cpu_to_le32(msg_size);
return;
}
EXPORT_SYMBOL(ceph_osdc_build_request);
/*
* build new request AND message, calculate layout, and adjust file
* extent as needed.
*
* if the file was recently truncated, we include information about its
* old and new size so that the object can be updated appropriately. (we
* avoid synchronously deleting truncated objects because it's slow.)
*
* if @do_sync, include a 'startsync' command so that the osd will flush
* data quickly.
*/
struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
struct ceph_file_layout *layout,
struct ceph_vino vino,
u64 off, u64 *plen,
int opcode, int flags,
struct ceph_snap_context *snapc,
int do_sync,
u32 truncate_seq,
u64 truncate_size,
struct timespec *mtime,
bool use_mempool, int num_reply,
int page_align)
{
struct ceph_osd_req_op ops[3];
struct ceph_osd_request *req;
ops[0].op = opcode;
ops[0].extent.truncate_seq = truncate_seq;
ops[0].extent.truncate_size = truncate_size;
ops[0].payload_len = 0;
if (do_sync) {
ops[1].op = CEPH_OSD_OP_STARTSYNC;
ops[1].payload_len = 0;
ops[2].op = 0;
} else
ops[1].op = 0;
req = ceph_osdc_alloc_request(osdc, flags,
snapc, ops,
use_mempool,
GFP_NOFS, NULL, NULL);
if (!req)
return NULL;
/* calculate max write size */
calc_layout(osdc, vino, layout, off, plen, req, ops);
req->r_file_layout = *layout; /* keep a copy */
/* in case it differs from natural (file) alignment that
calc_layout filled in for us */
req->r_num_pages = calc_pages_for(page_align, *plen);
req->r_page_alignment = page_align;
ceph_osdc_build_request(req, off, plen, ops,
snapc,
mtime,
req->r_oid, req->r_oid_len);
return req;
}
EXPORT_SYMBOL(ceph_osdc_new_request);
/*
* We keep osd requests in an rbtree, sorted by ->r_tid.
*/
static void __insert_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *new)
{
struct rb_node **p = &osdc->requests.rb_node;
struct rb_node *parent = NULL;
struct ceph_osd_request *req = NULL;
while (*p) {
parent = *p;
req = rb_entry(parent, struct ceph_osd_request, r_node);
if (new->r_tid < req->r_tid)
p = &(*p)->rb_left;
else if (new->r_tid > req->r_tid)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new->r_node, parent, p);
rb_insert_color(&new->r_node, &osdc->requests);
}
static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
u64 tid)
{
struct ceph_osd_request *req;
struct rb_node *n = osdc->requests.rb_node;
while (n) {
req = rb_entry(n, struct ceph_osd_request, r_node);
if (tid < req->r_tid)
n = n->rb_left;
else if (tid > req->r_tid)
n = n->rb_right;
else
return req;
}
return NULL;
}
static struct ceph_osd_request *
__lookup_request_ge(struct ceph_osd_client *osdc,
u64 tid)
{
struct ceph_osd_request *req;
struct rb_node *n = osdc->requests.rb_node;
while (n) {
req = rb_entry(n, struct ceph_osd_request, r_node);
if (tid < req->r_tid) {
if (!n->rb_left)
return req;
n = n->rb_left;
} else if (tid > req->r_tid) {
n = n->rb_right;
} else {
return req;
}
}
return NULL;
}
/*
* Resubmit requests pending on the given osd.
*/
static void __kick_osd_requests(struct ceph_osd_client *osdc,
struct ceph_osd *osd)
{
struct ceph_osd_request *req, *nreq;
int err;
dout("__kick_osd_requests osd%d\n", osd->o_osd);
err = __reset_osd(osdc, osd);
if (err == -EAGAIN)
return;
list_for_each_entry(req, &osd->o_requests, r_osd_item) {
list_move(&req->r_req_lru_item, &osdc->req_unsent);
dout("requeued %p tid %llu osd%d\n", req, req->r_tid,
osd->o_osd);
if (!req->r_linger)
req->r_flags |= CEPH_OSD_FLAG_RETRY;
}
list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
r_linger_osd) {
/*
* reregister request prior to unregistering linger so
* that r_osd is preserved.
*/
BUG_ON(!list_empty(&req->r_req_lru_item));
__register_request(osdc, req);
list_add(&req->r_req_lru_item, &osdc->req_unsent);
list_add(&req->r_osd_item, &req->r_osd->o_requests);
__unregister_linger_request(osdc, req);
dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
osd->o_osd);
}
}
static void kick_osd_requests(struct ceph_osd_client *osdc,
struct ceph_osd *kickosd)
{
mutex_lock(&osdc->request_mutex);
__kick_osd_requests(osdc, kickosd);
mutex_unlock(&osdc->request_mutex);
}
/*
* If the osd connection drops, we need to resubmit all requests.
*/
static void osd_reset(struct ceph_connection *con)
{
struct ceph_osd *osd = con->private;
struct ceph_osd_client *osdc;
if (!osd)
return;
dout("osd_reset osd%d\n", osd->o_osd);
osdc = osd->o_osdc;
down_read(&osdc->map_sem);
kick_osd_requests(osdc, osd);
send_queued(osdc);
up_read(&osdc->map_sem);
}
/*
* Track open sessions with osds.
*/
static struct ceph_osd *create_osd(struct ceph_osd_client *osdc)
{
struct ceph_osd *osd;
osd = kzalloc(sizeof(*osd), GFP_NOFS);
if (!osd)
return NULL;
atomic_set(&osd->o_ref, 1);
osd->o_osdc = osdc;
INIT_LIST_HEAD(&osd->o_requests);
INIT_LIST_HEAD(&osd->o_linger_requests);
INIT_LIST_HEAD(&osd->o_osd_lru);
osd->o_incarnation = 1;
ceph_con_init(osdc->client->msgr, &osd->o_con);
osd->o_con.private = osd;
osd->o_con.ops = &osd_con_ops;
osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD;
INIT_LIST_HEAD(&osd->o_keepalive_item);
return osd;
}
static struct ceph_osd *get_osd(struct ceph_osd *osd)
{
if (atomic_inc_not_zero(&osd->o_ref)) {
dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
atomic_read(&osd->o_ref));
return osd;
} else {
dout("get_osd %p FAIL\n", osd);
return NULL;
}
}
static void put_osd(struct ceph_osd *osd)
{
dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
atomic_read(&osd->o_ref) - 1);
if (atomic_dec_and_test(&osd->o_ref)) {
struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
if (osd->o_authorizer)
ac->ops->destroy_authorizer(ac, osd->o_authorizer);
kfree(osd);
}
}
/*
* remove an osd from our map
*/
static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
{
dout("__remove_osd %p\n", osd);
BUG_ON(!list_empty(&osd->o_requests));
rb_erase(&osd->o_node, &osdc->osds);
list_del_init(&osd->o_osd_lru);
ceph_con_close(&osd->o_con);
put_osd(osd);
}
static void remove_all_osds(struct ceph_osd_client *osdc)
{
dout("__remove_old_osds %p\n", osdc);
mutex_lock(&osdc->request_mutex);
while (!RB_EMPTY_ROOT(&osdc->osds)) {
struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
struct ceph_osd, o_node);
__remove_osd(osdc, osd);
}
mutex_unlock(&osdc->request_mutex);
}
static void __move_osd_to_lru(struct ceph_osd_client *osdc,
struct ceph_osd *osd)
{
dout("__move_osd_to_lru %p\n", osd);
BUG_ON(!list_empty(&osd->o_osd_lru));
list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
}
static void __remove_osd_from_lru(struct ceph_osd *osd)
{
dout("__remove_osd_from_lru %p\n", osd);
if (!list_empty(&osd->o_osd_lru))
list_del_init(&osd->o_osd_lru);
}
static void remove_old_osds(struct ceph_osd_client *osdc)
{
struct ceph_osd *osd, *nosd;
dout("__remove_old_osds %p\n", osdc);
mutex_lock(&osdc->request_mutex);
list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
if (time_before(jiffies, osd->lru_ttl))
break;
__remove_osd(osdc, osd);
}
mutex_unlock(&osdc->request_mutex);
}
/*
* reset osd connect
*/
static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
{
struct ceph_osd_request *req;
int ret = 0;
dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
if (list_empty(&osd->o_requests) &&
list_empty(&osd->o_linger_requests)) {
__remove_osd(osdc, osd);
} else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
&osd->o_con.peer_addr,
sizeof(osd->o_con.peer_addr)) == 0 &&
!ceph_con_opened(&osd->o_con)) {
dout(" osd addr hasn't changed and connection never opened,"
" letting msgr retry");
/* touch each r_stamp for handle_timeout()'s benfit */
list_for_each_entry(req, &osd->o_requests, r_osd_item)
req->r_stamp = jiffies;
ret = -EAGAIN;
} else {
ceph_con_close(&osd->o_con);
ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]);
osd->o_incarnation++;
}
return ret;
}
static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
{
struct rb_node **p = &osdc->osds.rb_node;
struct rb_node *parent = NULL;
struct ceph_osd *osd = NULL;
dout("__insert_osd %p osd%d\n", new, new->o_osd);
while (*p) {
parent = *p;
osd = rb_entry(parent, struct ceph_osd, o_node);
if (new->o_osd < osd->o_osd)
p = &(*p)->rb_left;
else if (new->o_osd > osd->o_osd)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new->o_node, parent, p);
rb_insert_color(&new->o_node, &osdc->osds);
}
static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
{
struct ceph_osd *osd;
struct rb_node *n = osdc->osds.rb_node;
while (n) {
osd = rb_entry(n, struct ceph_osd, o_node);
if (o < osd->o_osd)
n = n->rb_left;
else if (o > osd->o_osd)
n = n->rb_right;
else
return osd;
}
return NULL;
}
static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
{
schedule_delayed_work(&osdc->timeout_work,
osdc->client->options->osd_keepalive_timeout * HZ);
}
static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
{
cancel_delayed_work(&osdc->timeout_work);
}
/*
* Register request, assign tid. If this is the first request, set up
* the timeout event.
*/
static void __register_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
req->r_tid = ++osdc->last_tid;
req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
dout("__register_request %p tid %lld\n", req, req->r_tid);
__insert_request(osdc, req);
ceph_osdc_get_request(req);
osdc->num_requests++;
if (osdc->num_requests == 1) {
dout(" first request, scheduling timeout\n");
__schedule_osd_timeout(osdc);
}
}
static void register_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
mutex_lock(&osdc->request_mutex);
__register_request(osdc, req);
mutex_unlock(&osdc->request_mutex);
}
/*
* called under osdc->request_mutex
*/
static void __unregister_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
dout("__unregister_request %p tid %lld\n", req, req->r_tid);
rb_erase(&req->r_node, &osdc->requests);
osdc->num_requests--;
if (req->r_osd) {
/* make sure the original request isn't in flight. */
ceph_con_revoke(&req->r_osd->o_con, req->r_request);
list_del_init(&req->r_osd_item);
if (list_empty(&req->r_osd->o_requests) &&
list_empty(&req->r_osd->o_linger_requests)) {
dout("moving osd to %p lru\n", req->r_osd);
__move_osd_to_lru(osdc, req->r_osd);
}
if (list_empty(&req->r_linger_item))
req->r_osd = NULL;
}
ceph_osdc_put_request(req);
list_del_init(&req->r_req_lru_item);
if (osdc->num_requests == 0) {
dout(" no requests, canceling timeout\n");
__cancel_osd_timeout(osdc);
}
}
/*
* Cancel a previously queued request message
*/
static void __cancel_request(struct ceph_osd_request *req)
{
if (req->r_sent && req->r_osd) {
ceph_con_revoke(&req->r_osd->o_con, req->r_request);
req->r_sent = 0;
}
}
static void __register_linger_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
dout("__register_linger_request %p\n", req);
list_add_tail(&req->r_linger_item, &osdc->req_linger);
list_add_tail(&req->r_linger_osd, &req->r_osd->o_linger_requests);
}
static void __unregister_linger_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
dout("__unregister_linger_request %p\n", req);
if (req->r_osd) {
list_del_init(&req->r_linger_item);
list_del_init(&req->r_linger_osd);
if (list_empty(&req->r_osd->o_requests) &&
list_empty(&req->r_osd->o_linger_requests)) {
dout("moving osd to %p lru\n", req->r_osd);
__move_osd_to_lru(osdc, req->r_osd);
}
if (list_empty(&req->r_osd_item))
req->r_osd = NULL;
}
}
void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
mutex_lock(&osdc->request_mutex);
if (req->r_linger) {
__unregister_linger_request(osdc, req);
ceph_osdc_put_request(req);
}
mutex_unlock(&osdc->request_mutex);
}
EXPORT_SYMBOL(ceph_osdc_unregister_linger_request);
void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
if (!req->r_linger) {
dout("set_request_linger %p\n", req);
req->r_linger = 1;
/*
* caller is now responsible for calling
* unregister_linger_request
*/
ceph_osdc_get_request(req);
}
}
EXPORT_SYMBOL(ceph_osdc_set_request_linger);
/*
* Pick an osd (the first 'up' osd in the pg), allocate the osd struct
* (as needed), and set the request r_osd appropriately. If there is
* no up osd, set r_osd to NULL. Move the request to the appropriate list
* (unsent, homeless) or leave on in-flight lru.
*
* Return 0 if unchanged, 1 if changed, or negative on error.
*
* Caller should hold map_sem for read and request_mutex.
*/
static int __map_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req, int force_resend)
{
struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
struct ceph_pg pgid;
int acting[CEPH_PG_MAX_SIZE];
int o = -1, num = 0;
int err;
dout("map_request %p tid %lld\n", req, req->r_tid);
err = ceph_calc_object_layout(&reqhead->layout, req->r_oid,
&req->r_file_layout, osdc->osdmap);
if (err) {
list_move(&req->r_req_lru_item, &osdc->req_notarget);
return err;
}
pgid = reqhead->layout.ol_pgid;
req->r_pgid = pgid;
err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
if (err > 0) {
o = acting[0];
num = err;
}
if ((!force_resend &&
req->r_osd && req->r_osd->o_osd == o &&
req->r_sent >= req->r_osd->o_incarnation &&
req->r_num_pg_osds == num &&
memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
(req->r_osd == NULL && o == -1))
return 0; /* no change */
dout("map_request tid %llu pgid %d.%x osd%d (was osd%d)\n",
req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o,
req->r_osd ? req->r_osd->o_osd : -1);
/* record full pg acting set */
memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
req->r_num_pg_osds = num;
if (req->r_osd) {
__cancel_request(req);
list_del_init(&req->r_osd_item);
req->r_osd = NULL;
}
req->r_osd = __lookup_osd(osdc, o);
if (!req->r_osd && o >= 0) {
err = -ENOMEM;
req->r_osd = create_osd(osdc);
if (!req->r_osd) {
list_move(&req->r_req_lru_item, &osdc->req_notarget);
goto out;
}
dout("map_request osd %p is osd%d\n", req->r_osd, o);
req->r_osd->o_osd = o;
req->r_osd->o_con.peer_name.num = cpu_to_le64(o);
__insert_osd(osdc, req->r_osd);
ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]);
}
if (req->r_osd) {
__remove_osd_from_lru(req->r_osd);
list_add(&req->r_osd_item, &req->r_osd->o_requests);
list_move(&req->r_req_lru_item, &osdc->req_unsent);
} else {
list_move(&req->r_req_lru_item, &osdc->req_notarget);
}
err = 1; /* osd or pg changed */
out:
return err;
}
/*
* caller should hold map_sem (for read) and request_mutex
*/
static void __send_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
struct ceph_osd_request_head *reqhead;
dout("send_request %p tid %llu to osd%d flags %d\n",
req, req->r_tid, req->r_osd->o_osd, req->r_flags);
reqhead = req->r_request->front.iov_base;
reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch);
reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */
reqhead->reassert_version = req->r_reassert_version;
req->r_stamp = jiffies;
list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
ceph_msg_get(req->r_request); /* send consumes a ref */
ceph_con_send(&req->r_osd->o_con, req->r_request);
req->r_sent = req->r_osd->o_incarnation;
}
/*
* Send any requests in the queue (req_unsent).
*/
static void send_queued(struct ceph_osd_client *osdc)
{
struct ceph_osd_request *req, *tmp;
dout("send_queued\n");
mutex_lock(&osdc->request_mutex);
list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) {
__send_request(osdc, req);
}
mutex_unlock(&osdc->request_mutex);
}
/*
* Timeout callback, called every N seconds when 1 or more osd
* requests has been active for more than N seconds. When this
* happens, we ping all OSDs with requests who have timed out to
* ensure any communications channel reset is detected. Reset the
* request timeouts another N seconds in the future as we go.
* Reschedule the timeout event another N seconds in future (unless
* there are no open requests).
*/
static void handle_timeout(struct work_struct *work)
{
struct ceph_osd_client *osdc =
container_of(work, struct ceph_osd_client, timeout_work.work);
struct ceph_osd_request *req, *last_req = NULL;
struct ceph_osd *osd;
unsigned long timeout = osdc->client->options->osd_timeout * HZ;
unsigned long keepalive =
osdc->client->options->osd_keepalive_timeout * HZ;
unsigned long last_stamp = 0;
struct list_head slow_osds;
dout("timeout\n");
down_read(&osdc->map_sem);
ceph_monc_request_next_osdmap(&osdc->client->monc);
mutex_lock(&osdc->request_mutex);
/*
* reset osds that appear to be _really_ unresponsive. this
* is a failsafe measure.. we really shouldn't be getting to
* this point if the system is working properly. the monitors
* should mark the osd as failed and we should find out about
* it from an updated osd map.
*/
while (timeout && !list_empty(&osdc->req_lru)) {
req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
r_req_lru_item);
/* hasn't been long enough since we sent it? */
if (time_before(jiffies, req->r_stamp + timeout))
break;
/* hasn't been long enough since it was acked? */
if (req->r_request->ack_stamp == 0 ||
time_before(jiffies, req->r_request->ack_stamp + timeout))
break;
BUG_ON(req == last_req && req->r_stamp == last_stamp);
last_req = req;
last_stamp = req->r_stamp;
osd = req->r_osd;
BUG_ON(!osd);
pr_warning(" tid %llu timed out on osd%d, will reset osd\n",
req->r_tid, osd->o_osd);
__kick_osd_requests(osdc, osd);
}
/*
* ping osds that are a bit slow. this ensures that if there
* is a break in the TCP connection we will notice, and reopen
* a connection with that osd (from the fault callback).
*/
INIT_LIST_HEAD(&slow_osds);
list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
if (time_before(jiffies, req->r_stamp + keepalive))
break;
osd = req->r_osd;
BUG_ON(!osd);
dout(" tid %llu is slow, will send keepalive on osd%d\n",
req->r_tid, osd->o_osd);
list_move_tail(&osd->o_keepalive_item, &slow_osds);
}
while (!list_empty(&slow_osds)) {
osd = list_entry(slow_osds.next, struct ceph_osd,
o_keepalive_item);
list_del_init(&osd->o_keepalive_item);
ceph_con_keepalive(&osd->o_con);
}
__schedule_osd_timeout(osdc);
mutex_unlock(&osdc->request_mutex);
send_queued(osdc);
up_read(&osdc->map_sem);
}
static void handle_osds_timeout(struct work_struct *work)
{
struct ceph_osd_client *osdc =
container_of(work, struct ceph_osd_client,
osds_timeout_work.work);
unsigned long delay =
osdc->client->options->osd_idle_ttl * HZ >> 2;
dout("osds timeout\n");
down_read(&osdc->map_sem);
remove_old_osds(osdc);
up_read(&osdc->map_sem);
schedule_delayed_work(&osdc->osds_timeout_work,
round_jiffies_relative(delay));
}
static void complete_request(struct ceph_osd_request *req)
{
if (req->r_safe_callback)
req->r_safe_callback(req, NULL);
complete_all(&req->r_safe_completion); /* fsync waiter */
}
/*
* handle osd op reply. either call the callback if it is specified,
* or do the completion to wake up the waiting thread.
*/
static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
struct ceph_connection *con)
{
struct ceph_osd_reply_head *rhead = msg->front.iov_base;
struct ceph_osd_request *req;
u64 tid;
int numops, object_len, flags;
s32 result;
tid = le64_to_cpu(msg->hdr.tid);
if (msg->front.iov_len < sizeof(*rhead))
goto bad;
numops = le32_to_cpu(rhead->num_ops);
object_len = le32_to_cpu(rhead->object_len);
result = le32_to_cpu(rhead->result);
if (msg->front.iov_len != sizeof(*rhead) + object_len +
numops * sizeof(struct ceph_osd_op))
goto bad;
dout("handle_reply %p tid %llu result %d\n", msg, tid, (int)result);
/* lookup */
mutex_lock(&osdc->request_mutex);
req = __lookup_request(osdc, tid);
if (req == NULL) {
dout("handle_reply tid %llu dne\n", tid);
mutex_unlock(&osdc->request_mutex);
return;
}
ceph_osdc_get_request(req);
flags = le32_to_cpu(rhead->flags);
/*
* if this connection filled our message, drop our reference now, to
* avoid a (safe but slower) revoke later.
*/
if (req->r_con_filling_msg == con && req->r_reply == msg) {
dout(" dropping con_filling_msg ref %p\n", con);
req->r_con_filling_msg = NULL;
ceph_con_put(con);
}
if (!req->r_got_reply) {
unsigned bytes;
req->r_result = le32_to_cpu(rhead->result);
bytes = le32_to_cpu(msg->hdr.data_len);
dout("handle_reply result %d bytes %d\n", req->r_result,
bytes);
if (req->r_result == 0)
req->r_result = bytes;
/* in case this is a write and we need to replay, */
req->r_reassert_version = rhead->reassert_version;
req->r_got_reply = 1;
} else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
dout("handle_reply tid %llu dup ack\n", tid);
mutex_unlock(&osdc->request_mutex);
goto done;
}
dout("handle_reply tid %llu flags %d\n", tid, flags);
if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
__register_linger_request(osdc, req);
/* either this is a read, or we got the safe response */
if (result < 0 ||
(flags & CEPH_OSD_FLAG_ONDISK) ||
((flags & CEPH_OSD_FLAG_WRITE) == 0))
__unregister_request(osdc, req);
mutex_unlock(&osdc->request_mutex);
if (req->r_callback)
req->r_callback(req, msg);
else
complete_all(&req->r_completion);
if (flags & CEPH_OSD_FLAG_ONDISK)
complete_request(req);
done:
dout("req=%p req->r_linger=%d\n", req, req->r_linger);
ceph_osdc_put_request(req);
return;
bad:
pr_err("corrupt osd_op_reply got %d %d expected %d\n",
(int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len),
(int)sizeof(*rhead));
ceph_msg_dump(msg);
}
static void reset_changed_osds(struct ceph_osd_client *osdc)
{
struct rb_node *p, *n;
for (p = rb_first(&osdc->osds); p; p = n) {
struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
n = rb_next(p);
if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
memcmp(&osd->o_con.peer_addr,
ceph_osd_addr(osdc->osdmap,
osd->o_osd),
sizeof(struct ceph_entity_addr)) != 0)
__reset_osd(osdc, osd);
}
}
/*
* Requeue requests whose mapping to an OSD has changed. If requests map to
* no osd, request a new map.
*
* Caller should hold map_sem for read and request_mutex.
*/
static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
{
struct ceph_osd_request *req, *nreq;
struct rb_node *p;
int needmap = 0;
int err;
dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
mutex_lock(&osdc->request_mutex);
for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
req = rb_entry(p, struct ceph_osd_request, r_node);
err = __map_request(osdc, req, force_resend);
if (err < 0)
continue; /* error */
if (req->r_osd == NULL) {
dout("%p tid %llu maps to no osd\n", req, req->r_tid);
needmap++; /* request a newer map */
} else if (err > 0) {
dout("%p tid %llu requeued on osd%d\n", req, req->r_tid,
req->r_osd ? req->r_osd->o_osd : -1);
if (!req->r_linger)
req->r_flags |= CEPH_OSD_FLAG_RETRY;
}
}
list_for_each_entry_safe(req, nreq, &osdc->req_linger,
r_linger_item) {
dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
err = __map_request(osdc, req, force_resend);
if (err == 0)
continue; /* no change and no osd was specified */
if (err < 0)
continue; /* hrm! */
if (req->r_osd == NULL) {
dout("tid %llu maps to no valid osd\n", req->r_tid);
needmap++; /* request a newer map */
continue;
}
dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
req->r_osd ? req->r_osd->o_osd : -1);
__unregister_linger_request(osdc, req);
__register_request(osdc, req);
}
mutex_unlock(&osdc->request_mutex);
if (needmap) {
dout("%d requests for down osds, need new map\n", needmap);
ceph_monc_request_next_osdmap(&osdc->client->monc);
}
}
/*
* Process updated osd map.
*
* The message contains any number of incremental and full maps, normally
* indicating some sort of topology change in the cluster. Kick requests
* off to different OSDs as needed.
*/
void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
{
void *p, *end, *next;
u32 nr_maps, maplen;
u32 epoch;
struct ceph_osdmap *newmap = NULL, *oldmap;
int err;
struct ceph_fsid fsid;
dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
p = msg->front.iov_base;
end = p + msg->front.iov_len;
/* verify fsid */
ceph_decode_need(&p, end, sizeof(fsid), bad);
ceph_decode_copy(&p, &fsid, sizeof(fsid));
if (ceph_check_fsid(osdc->client, &fsid) < 0)
return;
down_write(&osdc->map_sem);
/* incremental maps */
ceph_decode_32_safe(&p, end, nr_maps, bad);
dout(" %d inc maps\n", nr_maps);
while (nr_maps > 0) {
ceph_decode_need(&p, end, 2*sizeof(u32), bad);
epoch = ceph_decode_32(&p);
maplen = ceph_decode_32(&p);
ceph_decode_need(&p, end, maplen, bad);
next = p + maplen;
if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
dout("applying incremental map %u len %d\n",
epoch, maplen);
newmap = osdmap_apply_incremental(&p, next,
osdc->osdmap,
osdc->client->msgr);
if (IS_ERR(newmap)) {
err = PTR_ERR(newmap);
goto bad;
}
BUG_ON(!newmap);
if (newmap != osdc->osdmap) {
ceph_osdmap_destroy(osdc->osdmap);
osdc->osdmap = newmap;
}
kick_requests(osdc, 0);
reset_changed_osds(osdc);
} else {
dout("ignoring incremental map %u len %d\n",
epoch, maplen);
}
p = next;
nr_maps--;
}
if (newmap)
goto done;
/* full maps */
ceph_decode_32_safe(&p, end, nr_maps, bad);
dout(" %d full maps\n", nr_maps);
while (nr_maps) {
ceph_decode_need(&p, end, 2*sizeof(u32), bad);
epoch = ceph_decode_32(&p);
maplen = ceph_decode_32(&p);
ceph_decode_need(&p, end, maplen, bad);
if (nr_maps > 1) {
dout("skipping non-latest full map %u len %d\n",
epoch, maplen);
} else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
dout("skipping full map %u len %d, "
"older than our %u\n", epoch, maplen,
osdc->osdmap->epoch);
} else {
int skipped_map = 0;
dout("taking full map %u len %d\n", epoch, maplen);
newmap = osdmap_decode(&p, p+maplen);
if (IS_ERR(newmap)) {
err = PTR_ERR(newmap);
goto bad;
}
BUG_ON(!newmap);
oldmap = osdc->osdmap;
osdc->osdmap = newmap;
if (oldmap) {
if (oldmap->epoch + 1 < newmap->epoch)
skipped_map = 1;
ceph_osdmap_destroy(oldmap);
}
kick_requests(osdc, skipped_map);
}
p += maplen;
nr_maps--;
}
done:
downgrade_write(&osdc->map_sem);
ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
/*
* subscribe to subsequent osdmap updates if full to ensure
* we find out when we are no longer full and stop returning
* ENOSPC.
*/
if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
ceph_monc_request_next_osdmap(&osdc->client->monc);
send_queued(osdc);
up_read(&osdc->map_sem);
wake_up_all(&osdc->client->auth_wq);
return;
bad:
pr_err("osdc handle_map corrupt msg\n");
ceph_msg_dump(msg);
up_write(&osdc->map_sem);
return;
}
/*
* watch/notify callback event infrastructure
*
* These callbacks are used both for watch and notify operations.
*/
static void __release_event(struct kref *kref)
{
struct ceph_osd_event *event =
container_of(kref, struct ceph_osd_event, kref);
dout("__release_event %p\n", event);
kfree(event);
}
static void get_event(struct ceph_osd_event *event)
{
kref_get(&event->kref);
}
void ceph_osdc_put_event(struct ceph_osd_event *event)
{
kref_put(&event->kref, __release_event);
}
EXPORT_SYMBOL(ceph_osdc_put_event);
static void __insert_event(struct ceph_osd_client *osdc,
struct ceph_osd_event *new)
{
struct rb_node **p = &osdc->event_tree.rb_node;
struct rb_node *parent = NULL;
struct ceph_osd_event *event = NULL;
while (*p) {
parent = *p;
event = rb_entry(parent, struct ceph_osd_event, node);
if (new->cookie < event->cookie)
p = &(*p)->rb_left;
else if (new->cookie > event->cookie)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new->node, parent, p);
rb_insert_color(&new->node, &osdc->event_tree);
}
static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
u64 cookie)
{
struct rb_node **p = &osdc->event_tree.rb_node;
struct rb_node *parent = NULL;
struct ceph_osd_event *event = NULL;
while (*p) {
parent = *p;
event = rb_entry(parent, struct ceph_osd_event, node);
if (cookie < event->cookie)
p = &(*p)->rb_left;
else if (cookie > event->cookie)
p = &(*p)->rb_right;
else
return event;
}
return NULL;
}
static void __remove_event(struct ceph_osd_event *event)
{
struct ceph_osd_client *osdc = event->osdc;
if (!RB_EMPTY_NODE(&event->node)) {
dout("__remove_event removed %p\n", event);
rb_erase(&event->node, &osdc->event_tree);
ceph_osdc_put_event(event);
} else {
dout("__remove_event didn't remove %p\n", event);
}
}
int ceph_osdc_create_event(struct ceph_osd_client *osdc,
void (*event_cb)(u64, u64, u8, void *),
int one_shot, void *data,
struct ceph_osd_event **pevent)
{
struct ceph_osd_event *event;
event = kmalloc(sizeof(*event), GFP_NOIO);
if (!event)
return -ENOMEM;
dout("create_event %p\n", event);
event->cb = event_cb;
event->one_shot = one_shot;
event->data = data;
event->osdc = osdc;
INIT_LIST_HEAD(&event->osd_node);
kref_init(&event->kref); /* one ref for us */
kref_get(&event->kref); /* one ref for the caller */
init_completion(&event->completion);
spin_lock(&osdc->event_lock);
event->cookie = ++osdc->event_count;
__insert_event(osdc, event);
spin_unlock(&osdc->event_lock);
*pevent = event;
return 0;
}
EXPORT_SYMBOL(ceph_osdc_create_event);
void ceph_osdc_cancel_event(struct ceph_osd_event *event)
{
struct ceph_osd_client *osdc = event->osdc;
dout("cancel_event %p\n", event);
spin_lock(&osdc->event_lock);
__remove_event(event);
spin_unlock(&osdc->event_lock);
ceph_osdc_put_event(event); /* caller's */
}
EXPORT_SYMBOL(ceph_osdc_cancel_event);
static void do_event_work(struct work_struct *work)
{
struct ceph_osd_event_work *event_work =
container_of(work, struct ceph_osd_event_work, work);
struct ceph_osd_event *event = event_work->event;
u64 ver = event_work->ver;
u64 notify_id = event_work->notify_id;
u8 opcode = event_work->opcode;
dout("do_event_work completing %p\n", event);
event->cb(ver, notify_id, opcode, event->data);
complete(&event->completion);
dout("do_event_work completed %p\n", event);
ceph_osdc_put_event(event);
kfree(event_work);
}
/*
* Process osd watch notifications
*/
void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg)
{
void *p, *end;
u8 proto_ver;
u64 cookie, ver, notify_id;
u8 opcode;
struct ceph_osd_event *event;
struct ceph_osd_event_work *event_work;
p = msg->front.iov_base;
end = p + msg->front.iov_len;
ceph_decode_8_safe(&p, end, proto_ver, bad);
ceph_decode_8_safe(&p, end, opcode, bad);
ceph_decode_64_safe(&p, end, cookie, bad);
ceph_decode_64_safe(&p, end, ver, bad);
ceph_decode_64_safe(&p, end, notify_id, bad);
spin_lock(&osdc->event_lock);
event = __find_event(osdc, cookie);
if (event) {
get_event(event);
if (event->one_shot)
__remove_event(event);
}
spin_unlock(&osdc->event_lock);
dout("handle_watch_notify cookie %lld ver %lld event %p\n",
cookie, ver, event);
if (event) {
event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
if (!event_work) {
dout("ERROR: could not allocate event_work\n");
goto done_err;
}
INIT_WORK(&event_work->work, do_event_work);
event_work->event = event;
event_work->ver = ver;
event_work->notify_id = notify_id;
event_work->opcode = opcode;
if (!queue_work(osdc->notify_wq, &event_work->work)) {
dout("WARNING: failed to queue notify event work\n");
goto done_err;
}
}
return;
done_err:
complete(&event->completion);
ceph_osdc_put_event(event);
return;
bad:
pr_err("osdc handle_watch_notify corrupt msg\n");
return;
}
int ceph_osdc_wait_event(struct ceph_osd_event *event, unsigned long timeout)
{
int err;
dout("wait_event %p\n", event);
err = wait_for_completion_interruptible_timeout(&event->completion,
timeout * HZ);
ceph_osdc_put_event(event);
if (err > 0)
err = 0;
dout("wait_event %p returns %d\n", event, err);
return err;
}
EXPORT_SYMBOL(ceph_osdc_wait_event);
/*
* Register request, send initial attempt.
*/
int ceph_osdc_start_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req,
bool nofail)
{
int rc = 0;
req->r_request->pages = req->r_pages;
req->r_request->nr_pages = req->r_num_pages;
#ifdef CONFIG_BLOCK
req->r_request->bio = req->r_bio;
#endif
req->r_request->trail = req->r_trail;
register_request(osdc, req);
down_read(&osdc->map_sem);
mutex_lock(&osdc->request_mutex);
/*
* a racing kick_requests() may have sent the message for us
* while we dropped request_mutex above, so only send now if
* the request still han't been touched yet.
*/
if (req->r_sent == 0) {
rc = __map_request(osdc, req, 0);
if (rc < 0) {
if (nofail) {
dout("osdc_start_request failed map, "
" will retry %lld\n", req->r_tid);
rc = 0;
}
goto out_unlock;
}
if (req->r_osd == NULL) {
dout("send_request %p no up osds in pg\n", req);
ceph_monc_request_next_osdmap(&osdc->client->monc);
} else {
__send_request(osdc, req);
}
rc = 0;
}
out_unlock:
mutex_unlock(&osdc->request_mutex);
up_read(&osdc->map_sem);
return rc;
}
EXPORT_SYMBOL(ceph_osdc_start_request);
/*
* wait for a request to complete
*/
int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
int rc;
rc = wait_for_completion_interruptible(&req->r_completion);
if (rc < 0) {
mutex_lock(&osdc->request_mutex);
__cancel_request(req);
__unregister_request(osdc, req);
mutex_unlock(&osdc->request_mutex);
complete_request(req);
dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
return rc;
}
dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
return req->r_result;
}
EXPORT_SYMBOL(ceph_osdc_wait_request);
/*
* sync - wait for all in-flight requests to flush. avoid starvation.
*/
void ceph_osdc_sync(struct ceph_osd_client *osdc)
{
struct ceph_osd_request *req;
u64 last_tid, next_tid = 0;
mutex_lock(&osdc->request_mutex);
last_tid = osdc->last_tid;
while (1) {
req = __lookup_request_ge(osdc, next_tid);
if (!req)
break;
if (req->r_tid > last_tid)
break;
next_tid = req->r_tid + 1;
if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
continue;
ceph_osdc_get_request(req);
mutex_unlock(&osdc->request_mutex);
dout("sync waiting on tid %llu (last is %llu)\n",
req->r_tid, last_tid);
wait_for_completion(&req->r_safe_completion);
mutex_lock(&osdc->request_mutex);
ceph_osdc_put_request(req);
}
mutex_unlock(&osdc->request_mutex);
dout("sync done (thru tid %llu)\n", last_tid);
}
EXPORT_SYMBOL(ceph_osdc_sync);
/*
* init, shutdown
*/
int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
{
int err;
dout("init\n");
osdc->client = client;
osdc->osdmap = NULL;
init_rwsem(&osdc->map_sem);
init_completion(&osdc->map_waiters);
osdc->last_requested_map = 0;
mutex_init(&osdc->request_mutex);
osdc->last_tid = 0;
osdc->osds = RB_ROOT;
INIT_LIST_HEAD(&osdc->osd_lru);
osdc->requests = RB_ROOT;
INIT_LIST_HEAD(&osdc->req_lru);
INIT_LIST_HEAD(&osdc->req_unsent);
INIT_LIST_HEAD(&osdc->req_notarget);
INIT_LIST_HEAD(&osdc->req_linger);
osdc->num_requests = 0;
INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
spin_lock_init(&osdc->event_lock);
osdc->event_tree = RB_ROOT;
osdc->event_count = 0;
schedule_delayed_work(&osdc->osds_timeout_work,
round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
err = -ENOMEM;
osdc->req_mempool = mempool_create_kmalloc_pool(10,
sizeof(struct ceph_osd_request));
if (!osdc->req_mempool)
goto out;
err = ceph_msgpool_init(&osdc->msgpool_op, OSD_OP_FRONT_LEN, 10, true,
"osd_op");
if (err < 0)
goto out_mempool;
err = ceph_msgpool_init(&osdc->msgpool_op_reply,
OSD_OPREPLY_FRONT_LEN, 10, true,
"osd_op_reply");
if (err < 0)
goto out_msgpool;
osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
if (IS_ERR(osdc->notify_wq)) {
err = PTR_ERR(osdc->notify_wq);
osdc->notify_wq = NULL;
goto out_msgpool;
}
return 0;
out_msgpool:
ceph_msgpool_destroy(&osdc->msgpool_op);
out_mempool:
mempool_destroy(osdc->req_mempool);
out:
return err;
}
EXPORT_SYMBOL(ceph_osdc_init);
void ceph_osdc_stop(struct ceph_osd_client *osdc)
{
flush_workqueue(osdc->notify_wq);
destroy_workqueue(osdc->notify_wq);
cancel_delayed_work_sync(&osdc->timeout_work);
cancel_delayed_work_sync(&osdc->osds_timeout_work);
if (osdc->osdmap) {
ceph_osdmap_destroy(osdc->osdmap);
osdc->osdmap = NULL;
}
remove_all_osds(osdc);
mempool_destroy(osdc->req_mempool);
ceph_msgpool_destroy(&osdc->msgpool_op);
ceph_msgpool_destroy(&osdc->msgpool_op_reply);
}
EXPORT_SYMBOL(ceph_osdc_stop);
/*
* Read some contiguous pages. If we cross a stripe boundary, shorten
* *plen. Return number of bytes read, or error.
*/
int ceph_osdc_readpages(struct ceph_osd_client *osdc,
struct ceph_vino vino, struct ceph_file_layout *layout,
u64 off, u64 *plen,
u32 truncate_seq, u64 truncate_size,
struct page **pages, int num_pages, int page_align)
{
struct ceph_osd_request *req;
int rc = 0;
dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
vino.snap, off, *plen);
req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
NULL, 0, truncate_seq, truncate_size, NULL,
false, 1, page_align);
if (!req)
return -ENOMEM;
/* it may be a short read due to an object boundary */
req->r_pages = pages;
dout("readpages final extent is %llu~%llu (%d pages align %d)\n",
off, *plen, req->r_num_pages, page_align);
rc = ceph_osdc_start_request(osdc, req, false);
if (!rc)
rc = ceph_osdc_wait_request(osdc, req);
ceph_osdc_put_request(req);
dout("readpages result %d\n", rc);
return rc;
}
EXPORT_SYMBOL(ceph_osdc_readpages);
/*
* do a synchronous write on N pages
*/
int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
struct ceph_file_layout *layout,
struct ceph_snap_context *snapc,
u64 off, u64 len,
u32 truncate_seq, u64 truncate_size,
struct timespec *mtime,
struct page **pages, int num_pages,
int flags, int do_sync, bool nofail)
{
struct ceph_osd_request *req;
int rc = 0;
int page_align = off & ~PAGE_MASK;
BUG_ON(vino.snap != CEPH_NOSNAP);
req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
CEPH_OSD_OP_WRITE,
flags | CEPH_OSD_FLAG_ONDISK |
CEPH_OSD_FLAG_WRITE,
snapc, do_sync,
truncate_seq, truncate_size, mtime,
nofail, 1, page_align);
if (!req)
return -ENOMEM;
/* it may be a short write due to an object boundary */
req->r_pages = pages;
dout("writepages %llu~%llu (%d pages)\n", off, len,
req->r_num_pages);
rc = ceph_osdc_start_request(osdc, req, nofail);
if (!rc)
rc = ceph_osdc_wait_request(osdc, req);
ceph_osdc_put_request(req);
if (rc == 0)
rc = len;
dout("writepages result %d\n", rc);
return rc;
}
EXPORT_SYMBOL(ceph_osdc_writepages);
/*
* handle incoming message
*/
static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
{
struct ceph_osd *osd = con->private;
struct ceph_osd_client *osdc;
int type = le16_to_cpu(msg->hdr.type);
if (!osd)
goto out;
osdc = osd->o_osdc;
switch (type) {
case CEPH_MSG_OSD_MAP:
ceph_osdc_handle_map(osdc, msg);
break;
case CEPH_MSG_OSD_OPREPLY:
handle_reply(osdc, msg, con);
break;
case CEPH_MSG_WATCH_NOTIFY:
handle_watch_notify(osdc, msg);
break;
default:
pr_err("received unknown message type %d %s\n", type,
ceph_msg_type_name(type));
}
out:
ceph_msg_put(msg);
}
/*
* lookup and return message for incoming reply. set up reply message
* pages.
*/
static struct ceph_msg *get_reply(struct ceph_connection *con,
struct ceph_msg_header *hdr,
int *skip)
{
struct ceph_osd *osd = con->private;
struct ceph_osd_client *osdc = osd->o_osdc;
struct ceph_msg *m;
struct ceph_osd_request *req;
int front = le32_to_cpu(hdr->front_len);
int data_len = le32_to_cpu(hdr->data_len);
u64 tid;
tid = le64_to_cpu(hdr->tid);
mutex_lock(&osdc->request_mutex);
req = __lookup_request(osdc, tid);
if (!req) {
*skip = 1;
m = NULL;
pr_info("get_reply unknown tid %llu from osd%d\n", tid,
osd->o_osd);
goto out;
}
if (req->r_con_filling_msg) {
dout("get_reply revoking msg %p from old con %p\n",
req->r_reply, req->r_con_filling_msg);
ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply);
ceph_con_put(req->r_con_filling_msg);
req->r_con_filling_msg = NULL;
}
if (front > req->r_reply->front.iov_len) {
pr_warning("get_reply front %d > preallocated %d\n",
front, (int)req->r_reply->front.iov_len);
m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
if (!m)
goto out;
ceph_msg_put(req->r_reply);
req->r_reply = m;
}
m = ceph_msg_get(req->r_reply);
if (data_len > 0) {
int want = calc_pages_for(req->r_page_alignment, data_len);
if (unlikely(req->r_num_pages < want)) {
pr_warning("tid %lld reply has %d bytes %d pages, we"
" had only %d pages ready\n", tid, data_len,
want, req->r_num_pages);
*skip = 1;
ceph_msg_put(m);
m = NULL;
goto out;
}
m->pages = req->r_pages;
m->nr_pages = req->r_num_pages;
m->page_alignment = req->r_page_alignment;
#ifdef CONFIG_BLOCK
m->bio = req->r_bio;
#endif
}
*skip = 0;
req->r_con_filling_msg = ceph_con_get(con);
dout("get_reply tid %lld %p\n", tid, m);
out:
mutex_unlock(&osdc->request_mutex);
return m;
}
static struct ceph_msg *alloc_msg(struct ceph_connection *con,
struct ceph_msg_header *hdr,
int *skip)
{
struct ceph_osd *osd = con->private;
int type = le16_to_cpu(hdr->type);
int front = le32_to_cpu(hdr->front_len);
switch (type) {
case CEPH_MSG_OSD_MAP:
case CEPH_MSG_WATCH_NOTIFY:
return ceph_msg_new(type, front, GFP_NOFS, false);
case CEPH_MSG_OSD_OPREPLY:
return get_reply(con, hdr, skip);
default:
pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
osd->o_osd);
*skip = 1;
return NULL;
}
}
/*
* Wrappers to refcount containing ceph_osd struct
*/
static struct ceph_connection *get_osd_con(struct ceph_connection *con)
{
struct ceph_osd *osd = con->private;
if (get_osd(osd))
return con;
return NULL;
}
static void put_osd_con(struct ceph_connection *con)
{
struct ceph_osd *osd = con->private;
put_osd(osd);
}
/*
* authentication
*/
static int get_authorizer(struct ceph_connection *con,
void **buf, int *len, int *proto,
void **reply_buf, int *reply_len, int force_new)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
int ret = 0;
if (force_new && o->o_authorizer) {
ac->ops->destroy_authorizer(ac, o->o_authorizer);
o->o_authorizer = NULL;
}
if (o->o_authorizer == NULL) {
ret = ac->ops->create_authorizer(
ac, CEPH_ENTITY_TYPE_OSD,
&o->o_authorizer,
&o->o_authorizer_buf,
&o->o_authorizer_buf_len,
&o->o_authorizer_reply_buf,
&o->o_authorizer_reply_buf_len);
if (ret)
return ret;
}
*proto = ac->protocol;
*buf = o->o_authorizer_buf;
*len = o->o_authorizer_buf_len;
*reply_buf = o->o_authorizer_reply_buf;
*reply_len = o->o_authorizer_reply_buf_len;
return 0;
}
static int verify_authorizer_reply(struct ceph_connection *con, int len)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len);
}
static int invalidate_authorizer(struct ceph_connection *con)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
if (ac->ops->invalidate_authorizer)
ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
return ceph_monc_validate_auth(&osdc->client->monc);
}
static const struct ceph_connection_operations osd_con_ops = {
.get = get_osd_con,
.put = put_osd_con,
.dispatch = dispatch,
.get_authorizer = get_authorizer,
.verify_authorizer_reply = verify_authorizer_reply,
.invalidate_authorizer = invalidate_authorizer,
.alloc_msg = alloc_msg,
.fault = osd_reset,
};
| gpl-2.0 |
varigit/VAR-SOM-AM33-SDK7-Kernel | arch/mips/jazz/irq.c | 3615 | 4294 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 Linus Torvalds
* Copyright (C) 1994 - 2001, 2003, 07 Ralf Baechle
*/
#include <linux/clockchips.h>
#include <linux/i8253.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <asm/irq_cpu.h>
#include <asm/i8259.h>
#include <asm/io.h>
#include <asm/jazz.h>
#include <asm/pgtable.h>
#include <asm/tlbmisc.h>
static DEFINE_RAW_SPINLOCK(r4030_lock);
static void enable_r4030_irq(struct irq_data *d)
{
unsigned int mask = 1 << (d->irq - JAZZ_IRQ_START);
unsigned long flags;
raw_spin_lock_irqsave(&r4030_lock, flags);
mask |= r4030_read_reg16(JAZZ_IO_IRQ_ENABLE);
r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, mask);
raw_spin_unlock_irqrestore(&r4030_lock, flags);
}
void disable_r4030_irq(struct irq_data *d)
{
unsigned int mask = ~(1 << (d->irq - JAZZ_IRQ_START));
unsigned long flags;
raw_spin_lock_irqsave(&r4030_lock, flags);
mask &= r4030_read_reg16(JAZZ_IO_IRQ_ENABLE);
r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, mask);
raw_spin_unlock_irqrestore(&r4030_lock, flags);
}
static struct irq_chip r4030_irq_type = {
.name = "R4030",
.irq_mask = disable_r4030_irq,
.irq_unmask = enable_r4030_irq,
};
void __init init_r4030_ints(void)
{
int i;
for (i = JAZZ_IRQ_START; i <= JAZZ_IRQ_END; i++)
irq_set_chip_and_handler(i, &r4030_irq_type, handle_level_irq);
r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0);
r4030_read_reg16(JAZZ_IO_IRQ_SOURCE); /* clear pending IRQs */
r4030_read_reg32(JAZZ_R4030_INVAL_ADDR); /* clear error bits */
}
/*
* On systems with i8259-style interrupt controllers we assume for
* driver compatibility reasons interrupts 0 - 15 to be the i8259
* interrupts even if the hardware uses a different interrupt numbering.
*/
void __init arch_init_irq(void)
{
/*
* this is a hack to get back the still needed wired mapping
* killed by init_mm()
*/
/* Map 0xe0000000 -> 0x0:800005C0, 0xe0010000 -> 0x1:30000580 */
add_wired_entry(0x02000017, 0x03c00017, 0xe0000000, PM_64K);
/* Map 0xe2000000 -> 0x0:900005C0, 0xe3010000 -> 0x0:910005C0 */
add_wired_entry(0x02400017, 0x02440017, 0xe2000000, PM_16M);
/* Map 0xe4000000 -> 0x0:600005C0, 0xe4100000 -> 400005C0 */
add_wired_entry(0x01800017, 0x01000017, 0xe4000000, PM_4M);
init_i8259_irqs(); /* Integrated i8259 */
mips_cpu_irq_init();
init_r4030_ints();
change_c0_status(ST0_IM, IE_IRQ2 | IE_IRQ1);
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_cause() & read_c0_status();
unsigned int irq;
if (pending & IE_IRQ4) {
r4030_read_reg32(JAZZ_TIMER_REGISTER);
do_IRQ(JAZZ_TIMER_IRQ);
} else if (pending & IE_IRQ2) {
irq = *(volatile u8 *)JAZZ_EISA_IRQ_ACK;
do_IRQ(irq);
} else if (pending & IE_IRQ1) {
irq = *(volatile u8 *)JAZZ_IO_IRQ_SOURCE >> 2;
if (likely(irq > 0))
do_IRQ(irq + JAZZ_IRQ_START - 1);
else
panic("Unimplemented loc_no_irq handler");
}
}
static void r4030_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
/* Nothing to do ... */
}
struct clock_event_device r4030_clockevent = {
.name = "r4030",
.features = CLOCK_EVT_FEAT_PERIODIC,
.rating = 300,
.irq = JAZZ_TIMER_IRQ,
.set_mode = r4030_set_mode,
};
static irqreturn_t r4030_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *cd = dev_id;
cd->event_handler(cd);
return IRQ_HANDLED;
}
static struct irqaction r4030_timer_irqaction = {
.handler = r4030_timer_interrupt,
.flags = IRQF_TIMER,
.name = "R4030 timer",
};
void __init plat_time_init(void)
{
struct clock_event_device *cd = &r4030_clockevent;
struct irqaction *action = &r4030_timer_irqaction;
unsigned int cpu = smp_processor_id();
BUG_ON(HZ != 100);
cd->cpumask = cpumask_of(cpu);
clockevents_register_device(cd);
action->dev_id = cd;
setup_irq(JAZZ_TIMER_IRQ, action);
/*
* Set clock to 100Hz.
*
* The R4030 timer receives an input clock of 1kHz which is divieded by
* a programmable 4-bit divider. This makes it fairly inflexible.
*/
r4030_write_reg32(JAZZ_TIMER_INTERVAL, 9);
setup_pit_timer();
}
| gpl-2.0 |
danonbrown/trltetmo-kernel | drivers/scsi/isci/remote_node_context.c | 4639 | 25675 | /*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* BSD LICENSE
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <scsi/sas_ata.h>
#include "host.h"
#include "isci.h"
#include "remote_device.h"
#include "remote_node_context.h"
#include "scu_event_codes.h"
#include "scu_task_context.h"
#undef C
#define C(a) (#a)
const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
{
static const char * const strings[] = RNC_STATES;
return strings[state];
}
#undef C
/**
*
* @sci_rnc: The state of the remote node context object to check.
*
* This method will return true if the remote node context is in a READY state
* otherwise it will return false bool true if the remote node context is in
* the ready state. false if the remote node context is not in the ready state.
*/
bool sci_remote_node_context_is_ready(
struct sci_remote_node_context *sci_rnc)
{
u32 current_state = sci_rnc->sm.current_state_id;
if (current_state == SCI_RNC_READY) {
return true;
}
return false;
}
bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc)
{
u32 current_state = sci_rnc->sm.current_state_id;
if (current_state == SCI_RNC_TX_RX_SUSPENDED)
return true;
return false;
}
static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
{
if (id < ihost->remote_node_entries &&
ihost->device_table[id])
return &ihost->remote_node_context_table[id];
return NULL;
}
static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
{
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
struct domain_device *dev = idev->domain_dev;
int rni = sci_rnc->remote_node_index;
union scu_remote_node_context *rnc;
struct isci_host *ihost;
__le64 sas_addr;
ihost = idev->owning_port->owning_controller;
rnc = sci_rnc_by_id(ihost, rni);
memset(rnc, 0, sizeof(union scu_remote_node_context)
* sci_remote_device_node_count(idev));
rnc->ssp.remote_node_index = rni;
rnc->ssp.remote_node_port_width = idev->device_port_width;
rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
/* sas address is __be64, context ram format is __le64 */
sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
rnc->ssp.nexus_loss_timer_enable = true;
rnc->ssp.check_bit = false;
rnc->ssp.is_valid = false;
rnc->ssp.is_remote_node_context = true;
rnc->ssp.function_number = 0;
rnc->ssp.arbitration_wait_time = 0;
if (dev_is_sata(dev)) {
rnc->ssp.connection_occupancy_timeout =
ihost->user_parameters.stp_max_occupancy_timeout;
rnc->ssp.connection_inactivity_timeout =
ihost->user_parameters.stp_inactivity_timeout;
} else {
rnc->ssp.connection_occupancy_timeout =
ihost->user_parameters.ssp_max_occupancy_timeout;
rnc->ssp.connection_inactivity_timeout =
ihost->user_parameters.ssp_inactivity_timeout;
}
rnc->ssp.initial_arbitration_wait_time = 0;
/* Open Address Frame Parameters */
rnc->ssp.oaf_connection_rate = idev->connection_rate;
rnc->ssp.oaf_features = 0;
rnc->ssp.oaf_source_zone_group = 0;
rnc->ssp.oaf_more_compatibility_features = 0;
}
/**
*
* @sci_rnc:
* @callback:
* @callback_parameter:
*
* This method will setup the remote node context object so it will transition
* to its ready state. If the remote node context is already setup to
* transition to its final state then this function does nothing. none
*/
static void sci_remote_node_context_setup_to_resume(
struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback callback,
void *callback_parameter,
enum sci_remote_node_context_destination_state dest_param)
{
if (sci_rnc->destination_state != RNC_DEST_FINAL) {
sci_rnc->destination_state = dest_param;
if (callback != NULL) {
sci_rnc->user_callback = callback;
sci_rnc->user_cookie = callback_parameter;
}
}
}
static void sci_remote_node_context_setup_to_destroy(
struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback callback,
void *callback_parameter)
{
struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc));
sci_rnc->destination_state = RNC_DEST_FINAL;
sci_rnc->user_callback = callback;
sci_rnc->user_cookie = callback_parameter;
wake_up(&ihost->eventq);
}
/**
*
*
* This method just calls the user callback function and then resets the
* callback.
*/
static void sci_remote_node_context_notify_user(
struct sci_remote_node_context *rnc)
{
if (rnc->user_callback != NULL) {
(*rnc->user_callback)(rnc->user_cookie);
rnc->user_callback = NULL;
rnc->user_cookie = NULL;
}
}
static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
{
switch (rnc->destination_state) {
case RNC_DEST_READY:
case RNC_DEST_SUSPENDED_RESUME:
rnc->destination_state = RNC_DEST_READY;
/* Fall through... */
case RNC_DEST_FINAL:
sci_remote_node_context_resume(rnc, rnc->user_callback,
rnc->user_cookie);
break;
default:
rnc->destination_state = RNC_DEST_UNSPECIFIED;
break;
}
}
static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
{
union scu_remote_node_context *rnc_buffer;
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
struct domain_device *dev = idev->domain_dev;
struct isci_host *ihost = idev->owning_port->owning_controller;
rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
rnc_buffer->ssp.is_valid = true;
if (dev_is_sata(dev) && dev->parent) {
sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
} else {
sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
if (!dev->parent)
sci_port_setup_transports(idev->owning_port,
sci_rnc->remote_node_index);
}
}
static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
{
union scu_remote_node_context *rnc_buffer;
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
struct isci_host *ihost = idev->owning_port->owning_controller;
rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
rnc_buffer->ssp.is_valid = false;
sci_remote_device_post_request(rnc_to_dev(sci_rnc),
SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
}
static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
{
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
struct isci_remote_device *idev = rnc_to_dev(rnc);
struct isci_host *ihost = idev->owning_port->owning_controller;
/* Check to see if we have gotten back to the initial state because
* someone requested to destroy the remote node context object.
*/
if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
rnc->destination_state = RNC_DEST_UNSPECIFIED;
sci_remote_node_context_notify_user(rnc);
smp_wmb();
wake_up(&ihost->eventq);
}
}
static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
{
struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
sci_remote_node_context_validate_context_buffer(sci_rnc);
}
static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
{
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
/* Terminate all outstanding requests. */
sci_remote_device_terminate_requests(rnc_to_dev(rnc));
sci_remote_node_context_invalidate_context_buffer(rnc);
}
static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
{
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
struct isci_remote_device *idev;
struct domain_device *dev;
idev = rnc_to_dev(rnc);
dev = idev->domain_dev;
/*
* For direct attached SATA devices we need to clear the TLCR
* NCQ to TCi tag mapping on the phy and in cases where we
* resume because of a target reset we also need to update
* the STPTLDARNI register with the RNi of the device
*/
if (dev_is_sata(dev) && !dev->parent)
sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
}
static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
{
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
enum sci_remote_node_context_destination_state dest_select;
int tell_user = 1;
dest_select = rnc->destination_state;
rnc->destination_state = RNC_DEST_UNSPECIFIED;
if ((dest_select == RNC_DEST_SUSPENDED) ||
(dest_select == RNC_DEST_SUSPENDED_RESUME)) {
sci_remote_node_context_suspend(
rnc, rnc->suspend_reason,
SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
if (dest_select == RNC_DEST_SUSPENDED_RESUME)
tell_user = 0; /* Wait until ready again. */
}
if (tell_user)
sci_remote_node_context_notify_user(rnc);
}
static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
{
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
sci_remote_node_context_continue_state_transitions(rnc);
}
static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
{
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
struct isci_remote_device *idev = rnc_to_dev(rnc);
struct isci_host *ihost = idev->owning_port->owning_controller;
u32 new_count = rnc->suspend_count + 1;
if (new_count == 0)
rnc->suspend_count = 1;
else
rnc->suspend_count = new_count;
smp_wmb();
/* Terminate outstanding requests pending abort. */
sci_remote_device_abort_requests_pending_abort(idev);
wake_up(&ihost->eventq);
sci_remote_node_context_continue_state_transitions(rnc);
}
static void sci_remote_node_context_await_suspend_state_exit(
struct sci_base_state_machine *sm)
{
struct sci_remote_node_context *rnc
= container_of(sm, typeof(*rnc), sm);
struct isci_remote_device *idev = rnc_to_dev(rnc);
if (dev_is_sata(idev->domain_dev))
isci_dev_set_hang_detection_timeout(idev, 0);
}
static const struct sci_base_state sci_remote_node_context_state_table[] = {
[SCI_RNC_INITIAL] = {
.enter_state = sci_remote_node_context_initial_state_enter,
},
[SCI_RNC_POSTING] = {
.enter_state = sci_remote_node_context_posting_state_enter,
},
[SCI_RNC_INVALIDATING] = {
.enter_state = sci_remote_node_context_invalidating_state_enter,
},
[SCI_RNC_RESUMING] = {
.enter_state = sci_remote_node_context_resuming_state_enter,
},
[SCI_RNC_READY] = {
.enter_state = sci_remote_node_context_ready_state_enter,
},
[SCI_RNC_TX_SUSPENDED] = {
.enter_state = sci_remote_node_context_tx_suspended_state_enter,
},
[SCI_RNC_TX_RX_SUSPENDED] = {
.enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
},
[SCI_RNC_AWAIT_SUSPENSION] = {
.exit_state = sci_remote_node_context_await_suspend_state_exit,
},
};
void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
u16 remote_node_index)
{
memset(rnc, 0, sizeof(struct sci_remote_node_context));
rnc->remote_node_index = remote_node_index;
rnc->destination_state = RNC_DEST_UNSPECIFIED;
sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
}
enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
u32 event_code)
{
enum scis_sds_remote_node_context_states state;
u32 next_state;
state = sci_rnc->sm.current_state_id;
switch (state) {
case SCI_RNC_POSTING:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_POST_RNC_COMPLETE:
sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
break;
default:
goto out;
}
break;
case SCI_RNC_INVALIDATING:
if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
if (sci_rnc->destination_state == RNC_DEST_FINAL)
next_state = SCI_RNC_INITIAL;
else
next_state = SCI_RNC_POSTING;
sci_change_state(&sci_rnc->sm, next_state);
} else {
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
/* We really dont care if the hardware is going to suspend
* the device since it's being invalidated anyway */
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: SCIC Remote Node Context 0x%p was "
"suspeneded by hardware while being "
"invalidated.\n", __func__, sci_rnc);
break;
default:
goto out;
}
}
break;
case SCI_RNC_RESUMING:
if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
} else {
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
/* We really dont care if the hardware is going to suspend
* the device since it's being resumed anyway */
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: SCIC Remote Node Context 0x%p was "
"suspeneded by hardware while being resumed.\n",
__func__, sci_rnc);
break;
default:
goto out;
}
}
break;
case SCI_RNC_READY:
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TL_RNC_SUSPEND_TX:
sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
sci_rnc->suspend_type = scu_get_event_type(event_code);
break;
case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
sci_rnc->suspend_type = scu_get_event_type(event_code);
break;
default:
goto out;
}
break;
case SCI_RNC_AWAIT_SUSPENSION:
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TL_RNC_SUSPEND_TX:
next_state = SCI_RNC_TX_SUSPENDED;
break;
case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
next_state = SCI_RNC_TX_RX_SUSPENDED;
break;
default:
goto out;
}
if (sci_rnc->suspend_type == scu_get_event_type(event_code))
sci_change_state(&sci_rnc->sm, next_state);
break;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: invalid state: %s\n", __func__,
rnc_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
return SCI_SUCCESS;
out:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: code: %#x state: %s\n", __func__, event_code,
rnc_state_name(state));
return SCI_FAILURE;
}
enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p)
{
enum scis_sds_remote_node_context_states state;
state = sci_rnc->sm.current_state_id;
switch (state) {
case SCI_RNC_INVALIDATING:
sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
return SCI_SUCCESS;
case SCI_RNC_POSTING:
case SCI_RNC_RESUMING:
case SCI_RNC_READY:
case SCI_RNC_TX_SUSPENDED:
case SCI_RNC_TX_RX_SUSPENDED:
sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
return SCI_SUCCESS;
case SCI_RNC_AWAIT_SUSPENSION:
sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
return SCI_SUCCESS;
case SCI_RNC_INITIAL:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: invalid state: %s\n", __func__,
rnc_state_name(state));
/* We have decided that the destruct request on the remote node context
* can not fail since it is either in the initial/destroyed state or is
* can be destroyed.
*/
return SCI_SUCCESS;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: invalid state %s\n", __func__,
rnc_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
enum sci_status sci_remote_node_context_suspend(
struct sci_remote_node_context *sci_rnc,
enum sci_remote_node_suspension_reasons suspend_reason,
u32 suspend_type)
{
enum scis_sds_remote_node_context_states state
= sci_rnc->sm.current_state_id;
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
enum sci_status status = SCI_FAILURE_INVALID_STATE;
enum sci_remote_node_context_destination_state dest_param =
RNC_DEST_UNSPECIFIED;
dev_dbg(scirdev_to_dev(idev),
"%s: current state %s, current suspend_type %x dest state %d,"
" arg suspend_reason %d, arg suspend_type %x",
__func__, rnc_state_name(state), sci_rnc->suspend_type,
sci_rnc->destination_state, suspend_reason,
suspend_type);
/* Disable automatic state continuations if explicitly suspending. */
if ((suspend_reason == SCI_HW_SUSPEND) ||
(sci_rnc->destination_state == RNC_DEST_FINAL))
dest_param = sci_rnc->destination_state;
switch (state) {
case SCI_RNC_READY:
break;
case SCI_RNC_INVALIDATING:
if (sci_rnc->destination_state == RNC_DEST_FINAL) {
dev_warn(scirdev_to_dev(idev),
"%s: already destroying %p\n",
__func__, sci_rnc);
return SCI_FAILURE_INVALID_STATE;
}
/* Fall through and handle like SCI_RNC_POSTING */
case SCI_RNC_RESUMING:
/* Fall through and handle like SCI_RNC_POSTING */
case SCI_RNC_POSTING:
/* Set the destination state to AWAIT - this signals the
* entry into the SCI_RNC_READY state that a suspension
* needs to be done immediately.
*/
if (sci_rnc->destination_state != RNC_DEST_FINAL)
sci_rnc->destination_state = RNC_DEST_SUSPENDED;
sci_rnc->suspend_type = suspend_type;
sci_rnc->suspend_reason = suspend_reason;
return SCI_SUCCESS;
case SCI_RNC_TX_SUSPENDED:
if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
status = SCI_SUCCESS;
break;
case SCI_RNC_TX_RX_SUSPENDED:
if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
status = SCI_SUCCESS;
break;
case SCI_RNC_AWAIT_SUSPENSION:
if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
|| (suspend_type == sci_rnc->suspend_type))
return SCI_SUCCESS;
break;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: invalid state %s\n", __func__,
rnc_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
sci_rnc->destination_state = dest_param;
sci_rnc->suspend_type = suspend_type;
sci_rnc->suspend_reason = suspend_reason;
if (status == SCI_SUCCESS) { /* Already in the destination state? */
struct isci_host *ihost = idev->owning_port->owning_controller;
wake_up_all(&ihost->eventq); /* Let observers look. */
return SCI_SUCCESS;
}
if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) ||
(suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) {
if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)
isci_dev_set_hang_detection_timeout(idev, 0x00000001);
sci_remote_device_post_request(
idev, SCI_SOFTWARE_SUSPEND_CMD);
}
if (state != SCI_RNC_AWAIT_SUSPENSION)
sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
return SCI_SUCCESS;
}
enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p)
{
enum scis_sds_remote_node_context_states state;
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
state = sci_rnc->sm.current_state_id;
dev_dbg(scirdev_to_dev(idev),
"%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; "
"dev resume path %s\n",
__func__, rnc_state_name(state), cb_fn, cb_p,
sci_rnc->destination_state,
test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)
? "<abort active>" : "<normal>");
switch (state) {
case SCI_RNC_INITIAL:
if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
return SCI_FAILURE_INVALID_STATE;
sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p,
RNC_DEST_READY);
if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
sci_remote_node_context_construct_buffer(sci_rnc);
sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
}
return SCI_SUCCESS;
case SCI_RNC_POSTING:
case SCI_RNC_INVALIDATING:
case SCI_RNC_RESUMING:
/* We are still waiting to post when a resume was
* requested.
*/
switch (sci_rnc->destination_state) {
case RNC_DEST_SUSPENDED:
case RNC_DEST_SUSPENDED_RESUME:
/* Previously waiting to suspend after posting.
* Now continue onto resumption.
*/
sci_remote_node_context_setup_to_resume(
sci_rnc, cb_fn, cb_p,
RNC_DEST_SUSPENDED_RESUME);
break;
default:
sci_remote_node_context_setup_to_resume(
sci_rnc, cb_fn, cb_p,
RNC_DEST_READY);
break;
}
return SCI_SUCCESS;
case SCI_RNC_TX_SUSPENDED:
case SCI_RNC_TX_RX_SUSPENDED:
{
struct domain_device *dev = idev->domain_dev;
/* If this is an expander attached SATA device we must
* invalidate and repost the RNC since this is the only
* way to clear the TCi to NCQ tag mapping table for
* the RNi. All other device types we can just resume.
*/
sci_remote_node_context_setup_to_resume(
sci_rnc, cb_fn, cb_p, RNC_DEST_READY);
if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
if ((dev_is_sata(dev) && dev->parent) ||
(sci_rnc->destination_state == RNC_DEST_FINAL))
sci_change_state(&sci_rnc->sm,
SCI_RNC_INVALIDATING);
else
sci_change_state(&sci_rnc->sm,
SCI_RNC_RESUMING);
}
}
return SCI_SUCCESS;
case SCI_RNC_AWAIT_SUSPENSION:
sci_remote_node_context_setup_to_resume(
sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME);
return SCI_SUCCESS;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: invalid state %s\n", __func__,
rnc_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
struct isci_request *ireq)
{
enum scis_sds_remote_node_context_states state;
state = sci_rnc->sm.current_state_id;
switch (state) {
case SCI_RNC_READY:
return SCI_SUCCESS;
case SCI_RNC_TX_SUSPENDED:
case SCI_RNC_TX_RX_SUSPENDED:
case SCI_RNC_AWAIT_SUSPENSION:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: invalid state %s\n", __func__,
rnc_state_name(state));
return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
default:
dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: invalid state %s\n", __func__,
rnc_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
enum sci_status sci_remote_node_context_start_task(
struct sci_remote_node_context *sci_rnc,
struct isci_request *ireq,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p)
{
enum sci_status status = sci_remote_node_context_resume(sci_rnc,
cb_fn, cb_p);
if (status != SCI_SUCCESS)
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: resume failed: %d\n", __func__, status);
return status;
}
int sci_remote_node_context_is_safe_to_abort(
struct sci_remote_node_context *sci_rnc)
{
enum scis_sds_remote_node_context_states state;
state = sci_rnc->sm.current_state_id;
switch (state) {
case SCI_RNC_INVALIDATING:
case SCI_RNC_TX_RX_SUSPENDED:
return 1;
case SCI_RNC_POSTING:
case SCI_RNC_RESUMING:
case SCI_RNC_READY:
case SCI_RNC_TX_SUSPENDED:
case SCI_RNC_AWAIT_SUSPENSION:
case SCI_RNC_INITIAL:
return 0;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: invalid state %d\n", __func__, state);
return 0;
}
}
| gpl-2.0 |
IOKP/kernel_samsung_jf | drivers/staging/comedi/drivers/contec_pci_dio.c | 4895 | 6847 | /*
comedi/drivers/contec_pci_dio.c
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 2000 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: contec_pci_dio
Description: Contec PIO1616L digital I/O board
Devices: [Contec] PIO1616L (contec_pci_dio)
Author: Stefano Rivoir <s.rivoir@gts.it>
Updated: Wed, 27 Jun 2007 13:00:06 +0100
Status: works
Configuration Options:
[0] - PCI bus of device (optional)
[1] - PCI slot of device (optional)
If bus/slot is not specified, the first supported
PCI device found will be used.
*/
#include "../comedidev.h"
#include "comedi_pci.h"
enum contec_model {
PIO1616L = 0,
};
struct contec_board {
const char *name;
int model;
int in_ports;
int out_ports;
int in_offs;
int out_offs;
int out_boffs;
};
static const struct contec_board contec_boards[] = {
{"PIO1616L", PIO1616L, 16, 16, 0, 2, 10},
};
#define PCI_DEVICE_ID_PIO1616L 0x8172
static DEFINE_PCI_DEVICE_TABLE(contec_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_CONTEC, PCI_DEVICE_ID_PIO1616L),
.driver_data = PIO1616L },
{0}
};
MODULE_DEVICE_TABLE(pci, contec_pci_table);
#define thisboard ((const struct contec_board *)dev->board_ptr)
struct contec_private {
int data;
struct pci_dev *pci_dev;
};
#define devpriv ((struct contec_private *)dev->private)
static int contec_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int contec_detach(struct comedi_device *dev);
static struct comedi_driver driver_contec = {
.driver_name = "contec_pci_dio",
.module = THIS_MODULE,
.attach = contec_attach,
.detach = contec_detach,
};
/* Classic digital IO */
static int contec_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int contec_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
#if 0
static int contec_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_cmd *cmd);
static int contec_ns_to_timer(unsigned int *ns, int round);
#endif
static int contec_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct pci_dev *pcidev = NULL;
struct comedi_subdevice *s;
printk("comedi%d: contec: ", dev->minor);
dev->board_name = thisboard->name;
if (alloc_private(dev, sizeof(struct contec_private)) < 0)
return -ENOMEM;
if (alloc_subdevices(dev, 2) < 0)
return -ENOMEM;
for_each_pci_dev(pcidev) {
if (pcidev->vendor == PCI_VENDOR_ID_CONTEC &&
pcidev->device == PCI_DEVICE_ID_PIO1616L) {
if (it->options[0] || it->options[1]) {
/* Check bus and slot. */
if (it->options[0] != pcidev->bus->number ||
it->options[1] != PCI_SLOT(pcidev->devfn)) {
continue;
}
}
devpriv->pci_dev = pcidev;
if (comedi_pci_enable(pcidev, "contec_pci_dio")) {
printk
("error enabling PCI device and request regions!\n");
return -EIO;
}
dev->iobase = pci_resource_start(pcidev, 0);
printk(" base addr %lx ", dev->iobase);
dev->board_ptr = contec_boards + 0;
s = dev->subdevices + 0;
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = contec_di_insn_bits;
s = dev->subdevices + 1;
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = contec_do_insn_bits;
printk("attached\n");
return 1;
}
}
printk("card not present!\n");
return -EIO;
}
static int contec_detach(struct comedi_device *dev)
{
printk("comedi%d: contec: remove\n", dev->minor);
if (devpriv && devpriv->pci_dev) {
if (dev->iobase)
comedi_pci_disable(devpriv->pci_dev);
pci_dev_put(devpriv->pci_dev);
}
return 0;
}
#if 0
static int contec_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
printk("contec_cmdtest called\n");
return 0;
}
static int contec_ns_to_timer(unsigned int *ns, int round)
{
return *ns;
}
#endif
static int contec_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
dev_dbg(dev->hw_dev, "contec_do_insn_bits called\n");
dev_dbg(dev->hw_dev, "data: %d %d\n", data[0], data[1]);
if (insn->n != 2)
return -EINVAL;
if (data[0]) {
s->state &= ~data[0];
s->state |= data[0] & data[1];
dev_dbg(dev->hw_dev, "out: %d on %lx\n", s->state,
dev->iobase + thisboard->out_offs);
outw(s->state, dev->iobase + thisboard->out_offs);
}
return 2;
}
static int contec_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
dev_dbg(dev->hw_dev, "contec_di_insn_bits called\n");
dev_dbg(dev->hw_dev, "data: %d %d\n", data[0], data[1]);
if (insn->n != 2)
return -EINVAL;
data[1] = inw(dev->iobase + thisboard->in_offs);
return 2;
}
static int __devinit driver_contec_pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
return comedi_pci_auto_config(dev, driver_contec.driver_name);
}
static void __devexit driver_contec_pci_remove(struct pci_dev *dev)
{
comedi_pci_auto_unconfig(dev);
}
static struct pci_driver driver_contec_pci_driver = {
.id_table = contec_pci_table,
.probe = &driver_contec_pci_probe,
.remove = __devexit_p(&driver_contec_pci_remove)
};
static int __init driver_contec_init_module(void)
{
int retval;
retval = comedi_driver_register(&driver_contec);
if (retval < 0)
return retval;
driver_contec_pci_driver.name = (char *)driver_contec.driver_name;
return pci_register_driver(&driver_contec_pci_driver);
}
static void __exit driver_contec_cleanup_module(void)
{
pci_unregister_driver(&driver_contec_pci_driver);
comedi_driver_unregister(&driver_contec);
}
module_init(driver_contec_init_module);
module_exit(driver_contec_cleanup_module);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
CarbonROM/android_kernel_htc_msm8974 | tools/virtio/virtio_test.c | 4895 | 6192 | #define _GNU_SOURCE
#include <getopt.h>
#include <string.h>
#include <poll.h>
#include <sys/eventfd.h>
#include <stdlib.h>
#include <assert.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <fcntl.h>
#include <linux/vhost.h>
#include <linux/virtio.h>
#include <linux/virtio_ring.h>
#include "../../drivers/vhost/test.h"
struct vq_info {
int kick;
int call;
int num;
int idx;
void *ring;
/* copy used for control */
struct vring vring;
struct virtqueue *vq;
};
struct vdev_info {
struct virtio_device vdev;
int control;
struct pollfd fds[1];
struct vq_info vqs[1];
int nvqs;
void *buf;
size_t buf_size;
struct vhost_memory *mem;
};
void vq_notify(struct virtqueue *vq)
{
struct vq_info *info = vq->priv;
unsigned long long v = 1;
int r;
r = write(info->kick, &v, sizeof v);
assert(r == sizeof v);
}
void vq_callback(struct virtqueue *vq)
{
}
void vhost_vq_setup(struct vdev_info *dev, struct vq_info *info)
{
struct vhost_vring_state state = { .index = info->idx };
struct vhost_vring_file file = { .index = info->idx };
unsigned long long features = dev->vdev.features[0];
struct vhost_vring_addr addr = {
.index = info->idx,
.desc_user_addr = (uint64_t)(unsigned long)info->vring.desc,
.avail_user_addr = (uint64_t)(unsigned long)info->vring.avail,
.used_user_addr = (uint64_t)(unsigned long)info->vring.used,
};
int r;
r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
assert(r >= 0);
state.num = info->vring.num;
r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
assert(r >= 0);
state.num = 0;
r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
assert(r >= 0);
r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
assert(r >= 0);
file.fd = info->kick;
r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
assert(r >= 0);
file.fd = info->call;
r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
assert(r >= 0);
}
static void vq_info_add(struct vdev_info *dev, int num)
{
struct vq_info *info = &dev->vqs[dev->nvqs];
int r;
info->idx = dev->nvqs;
info->kick = eventfd(0, EFD_NONBLOCK);
info->call = eventfd(0, EFD_NONBLOCK);
r = posix_memalign(&info->ring, 4096, vring_size(num, 4096));
assert(r >= 0);
memset(info->ring, 0, vring_size(num, 4096));
vring_init(&info->vring, num, info->ring, 4096);
info->vq = vring_new_virtqueue(info->vring.num, 4096, &dev->vdev,
true, info->ring,
vq_notify, vq_callback, "test");
assert(info->vq);
info->vq->priv = info;
vhost_vq_setup(dev, info);
dev->fds[info->idx].fd = info->call;
dev->fds[info->idx].events = POLLIN;
dev->nvqs++;
}
static void vdev_info_init(struct vdev_info* dev, unsigned long long features)
{
int r;
memset(dev, 0, sizeof *dev);
dev->vdev.features[0] = features;
dev->vdev.features[1] = features >> 32;
dev->buf_size = 1024;
dev->buf = malloc(dev->buf_size);
assert(dev->buf);
dev->control = open("/dev/vhost-test", O_RDWR);
assert(dev->control >= 0);
r = ioctl(dev->control, VHOST_SET_OWNER, NULL);
assert(r >= 0);
dev->mem = malloc(offsetof(struct vhost_memory, regions) +
sizeof dev->mem->regions[0]);
assert(dev->mem);
memset(dev->mem, 0, offsetof(struct vhost_memory, regions) +
sizeof dev->mem->regions[0]);
dev->mem->nregions = 1;
dev->mem->regions[0].guest_phys_addr = (long)dev->buf;
dev->mem->regions[0].userspace_addr = (long)dev->buf;
dev->mem->regions[0].memory_size = dev->buf_size;
r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
assert(r >= 0);
}
/* TODO: this is pretty bad: we get a cache line bounce
* for the wait queue on poll and another one on read,
* plus the read which is there just to clear the
* current state. */
static void wait_for_interrupt(struct vdev_info *dev)
{
int i;
unsigned long long val;
poll(dev->fds, dev->nvqs, -1);
for (i = 0; i < dev->nvqs; ++i)
if (dev->fds[i].revents & POLLIN) {
read(dev->fds[i].fd, &val, sizeof val);
}
}
static void run_test(struct vdev_info *dev, struct vq_info *vq, int bufs)
{
struct scatterlist sl;
long started = 0, completed = 0;
long completed_before;
int r, test = 1;
unsigned len;
long long spurious = 0;
r = ioctl(dev->control, VHOST_TEST_RUN, &test);
assert(r >= 0);
for (;;) {
virtqueue_disable_cb(vq->vq);
completed_before = completed;
do {
if (started < bufs) {
sg_init_one(&sl, dev->buf, dev->buf_size);
r = virtqueue_add_buf(vq->vq, &sl, 1, 0,
dev->buf + started,
GFP_ATOMIC);
if (likely(r >= 0)) {
++started;
virtqueue_kick(vq->vq);
}
} else
r = -1;
/* Flush out completed bufs if any */
if (virtqueue_get_buf(vq->vq, &len)) {
++completed;
r = 0;
}
} while (r >= 0);
if (completed == completed_before)
++spurious;
assert(completed <= bufs);
assert(started <= bufs);
if (completed == bufs)
break;
if (virtqueue_enable_cb(vq->vq)) {
wait_for_interrupt(dev);
}
}
test = 0;
r = ioctl(dev->control, VHOST_TEST_RUN, &test);
assert(r >= 0);
fprintf(stderr, "spurious wakeus: 0x%llx\n", spurious);
}
const char optstring[] = "h";
const struct option longopts[] = {
{
.name = "help",
.val = 'h',
},
{
.name = "event-idx",
.val = 'E',
},
{
.name = "no-event-idx",
.val = 'e',
},
{
.name = "indirect",
.val = 'I',
},
{
.name = "no-indirect",
.val = 'i',
},
{
}
};
static void help()
{
fprintf(stderr, "Usage: virtio_test [--help]"
" [--no-indirect]"
" [--no-event-idx]"
"\n");
}
int main(int argc, char **argv)
{
struct vdev_info dev;
unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
(1ULL << VIRTIO_RING_F_EVENT_IDX);
int o;
for (;;) {
o = getopt_long(argc, argv, optstring, longopts, NULL);
switch (o) {
case -1:
goto done;
case '?':
help();
exit(2);
case 'e':
features &= ~(1ULL << VIRTIO_RING_F_EVENT_IDX);
break;
case 'h':
help();
goto done;
case 'i':
features &= ~(1ULL << VIRTIO_RING_F_INDIRECT_DESC);
break;
default:
assert(0);
break;
}
}
done:
vdev_info_init(&dev, features);
vq_info_add(&dev, 256);
run_test(&dev, &dev.vqs[0], 0x100000);
return 0;
}
| gpl-2.0 |
varunchitre15/android_kernel_sony_tianchi | drivers/staging/comedi/drivers/pcl816.c | 4895 | 36542 | /*
comedi/drivers/pcl816.c
Author: Juan Grigera <juan@grigera.com.ar>
based on pcl818 by Michal Dobes <dobes@tesnet.cz> and bits of pcl812
hardware driver for Advantech cards:
card: PCL-816, PCL814B
driver: pcl816
*/
/*
Driver: pcl816
Description: Advantech PCL-816 cards, PCL-814
Author: Juan Grigera <juan@grigera.com.ar>
Devices: [Advantech] PCL-816 (pcl816), PCL-814B (pcl814b)
Status: works
Updated: Tue, 2 Apr 2002 23:15:21 -0800
PCL 816 and 814B have 16 SE/DIFF ADCs, 16 DACs, 16 DI and 16 DO.
Differences are at resolution (16 vs 12 bits).
The driver support AI command mode, other subdevices not written.
Analog output and digital input and output are not supported.
Configuration Options:
[0] - IO Base
[1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
[2] - DMA (0=disable, 1, 3)
[3] - 0, 10=10MHz clock for 8254
1= 1MHz clock for 8254
*/
#include "../comedidev.h"
#include <linux/ioport.h>
#include <linux/mc146818rtc.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <asm/dma.h>
#include "8253.h"
#define DEBUG(x) x
/* boards constants */
/* IO space len */
#define PCLx1x_RANGE 16
/* #define outb(x,y) printk("OUTB(%x, 200+%d)\n", x,y-0x200); outb(x,y) */
/* INTEL 8254 counters */
#define PCL816_CTR0 4
#define PCL816_CTR1 5
#define PCL816_CTR2 6
/* R: counter read-back register W: counter control */
#define PCL816_CTRCTL 7
/* R: A/D high byte W: A/D range control */
#define PCL816_RANGE 9
/* W: clear INT request */
#define PCL816_CLRINT 10
/* R: next mux scan channel W: mux scan channel & range control pointer */
#define PCL816_MUX 11
/* R/W: operation control register */
#define PCL816_CONTROL 12
/* R: return status byte W: set DMA/IRQ */
#define PCL816_STATUS 13
#define PCL816_STATUS_DRDY_MASK 0x80
/* R: low byte of A/D W: soft A/D trigger */
#define PCL816_AD_LO 8
/* R: high byte of A/D W: A/D range control */
#define PCL816_AD_HI 9
/* type of interrupt handler */
#define INT_TYPE_AI1_INT 1
#define INT_TYPE_AI1_DMA 2
#define INT_TYPE_AI3_INT 4
#define INT_TYPE_AI3_DMA 5
#ifdef unused
#define INT_TYPE_AI1_DMA_RTC 9
#define INT_TYPE_AI3_DMA_RTC 10
/* RTC stuff... */
#define RTC_IRQ 8
#define RTC_IO_EXTENT 0x10
#endif
#define MAGIC_DMA_WORD 0x5a5a
static const struct comedi_lrange range_pcl816 = { 8, {
BIP_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25),
}
};
struct pcl816_board {
const char *name; /* board name */
int n_ranges; /* len of range list */
int n_aichan; /* num of A/D chans in diferencial mode */
unsigned int ai_ns_min; /* minimal allowed delay between samples (in ns) */
int n_aochan; /* num of D/A chans */
int n_dichan; /* num of DI chans */
int n_dochan; /* num of DO chans */
const struct comedi_lrange *ai_range_type; /* default A/D rangelist */
const struct comedi_lrange *ao_range_type; /* default D/A rangelist */
unsigned int io_range; /* len of IO space */
unsigned int IRQbits; /* allowed interrupts */
unsigned int DMAbits; /* allowed DMA chans */
int ai_maxdata; /* maxdata for A/D */
int ao_maxdata; /* maxdata for D/A */
int ai_chanlist; /* allowed len of channel list A/D */
int ao_chanlist; /* allowed len of channel list D/A */
int i8254_osc_base; /* 1/frequency of on board oscilator in ns */
};
static const struct pcl816_board boardtypes[] = {
{"pcl816", 8, 16, 10000, 1, 16, 16, &range_pcl816,
&range_pcl816, PCLx1x_RANGE,
0x00fc, /* IRQ mask */
0x0a, /* DMA mask */
0xffff, /* 16-bit card */
0xffff, /* D/A maxdata */
1024,
1, /* ao chan list */
100},
{"pcl814b", 8, 16, 10000, 1, 16, 16, &range_pcl816,
&range_pcl816, PCLx1x_RANGE,
0x00fc,
0x0a,
0x3fff, /* 14 bit card */
0x3fff,
1024,
1,
100},
};
#define n_boardtypes (sizeof(boardtypes)/sizeof(struct pcl816_board))
#define devpriv ((struct pcl816_private *)dev->private)
#define this_board ((const struct pcl816_board *)dev->board_ptr)
static int pcl816_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int pcl816_detach(struct comedi_device *dev);
#ifdef unused
static int RTC_lock; /* RTC lock */
static int RTC_timer_lock; /* RTC int lock */
#endif
static struct comedi_driver driver_pcl816 = {
.driver_name = "pcl816",
.module = THIS_MODULE,
.attach = pcl816_attach,
.detach = pcl816_detach,
.board_name = &boardtypes[0].name,
.num_names = n_boardtypes,
.offset = sizeof(struct pcl816_board),
};
static int __init driver_pcl816_init_module(void)
{
return comedi_driver_register(&driver_pcl816);
}
static void __exit driver_pcl816_cleanup_module(void)
{
comedi_driver_unregister(&driver_pcl816);
}
module_init(driver_pcl816_init_module);
module_exit(driver_pcl816_cleanup_module);
struct pcl816_private {
unsigned int dma; /* used DMA, 0=don't use DMA */
int dma_rtc; /* 1=RTC used with DMA, 0=no RTC alloc */
#ifdef unused
unsigned long rtc_iobase; /* RTC port region */
unsigned int rtc_iosize;
unsigned int rtc_irq;
#endif
unsigned long dmabuf[2]; /* pointers to begin of DMA buffers */
unsigned int dmapages[2]; /* len of DMA buffers in PAGE_SIZEs */
unsigned int hwdmaptr[2]; /* hardware address of DMA buffers */
unsigned int hwdmasize[2]; /* len of DMA buffers in Bytes */
unsigned int dmasamplsize; /* size in samples hwdmasize[0]/2 */
unsigned int last_top_dma; /* DMA pointer in last RTC int */
int next_dma_buf; /* which DMA buffer will be used next round */
long dma_runs_to_end; /* how many we must permorm DMA transfer to end of record */
unsigned long last_dma_run; /* how many bytes we must transfer on last DMA page */
unsigned int ai_scans; /* len of scanlist */
unsigned char ai_neverending; /* if=1, then we do neverending record (you must use cancel()) */
int irq_free; /* 1=have allocated IRQ */
int irq_blocked; /* 1=IRQ now uses any subdev */
#ifdef unused
int rtc_irq_blocked; /* 1=we now do AI with DMA&RTC */
#endif
int irq_was_now_closed; /* when IRQ finish, there's stored int816_mode for last interrupt */
int int816_mode; /* who now uses IRQ - 1=AI1 int, 2=AI1 dma, 3=AI3 int, 4AI3 dma */
struct comedi_subdevice *last_int_sub; /* ptr to subdevice which now finish */
int ai_act_scan; /* how many scans we finished */
unsigned int ai_act_chanlist[16]; /* MUX setting for actual AI operations */
unsigned int ai_act_chanlist_len; /* how long is actual MUX list */
unsigned int ai_act_chanlist_pos; /* actual position in MUX list */
unsigned int ai_n_chan; /* how many channels per scan */
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
struct comedi_subdevice *sub_ai; /* ptr to AI subdevice */
#ifdef unused
struct timer_list rtc_irq_timer; /* timer for RTC sanity check */
unsigned long rtc_freq; /* RTC int freq */
#endif
};
/*
==============================================================================
*/
static int check_channel_list(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int *chanlist, unsigned int chanlen);
static void setup_channel_list(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int *chanlist, unsigned int seglen);
static int pcl816_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s);
static void start_pacer(struct comedi_device *dev, int mode,
unsigned int divisor1, unsigned int divisor2);
#ifdef unused
static int set_rtc_irq_bit(unsigned char bit);
#endif
static int pcl816_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd);
static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
/*
==============================================================================
ANALOG INPUT MODE0, 816 cards, slow version
*/
static int pcl816_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n;
int timeout;
DPRINTK("mode 0 analog input\n");
/* software trigger, DMA and INT off */
outb(0, dev->iobase + PCL816_CONTROL);
/* clear INT (conversion end) flag */
outb(0, dev->iobase + PCL816_CLRINT);
/* Set the input channel */
outb(CR_CHAN(insn->chanspec) & 0xf, dev->iobase + PCL816_MUX);
/* select gain */
outb(CR_RANGE(insn->chanspec), dev->iobase + PCL816_RANGE);
for (n = 0; n < insn->n; n++) {
outb(0, dev->iobase + PCL816_AD_LO); /* start conversion */
timeout = 100;
while (timeout--) {
if (!(inb(dev->iobase + PCL816_STATUS) &
PCL816_STATUS_DRDY_MASK)) {
/* return read value */
data[n] =
((inb(dev->iobase +
PCL816_AD_HI) << 8) |
(inb(dev->iobase + PCL816_AD_LO)));
/* clear INT (conversion end) flag */
outb(0, dev->iobase + PCL816_CLRINT);
break;
}
udelay(1);
}
/* Return timeout error */
if (!timeout) {
comedi_error(dev, "A/D insn timeout\n");
data[0] = 0;
/* clear INT (conversion end) flag */
outb(0, dev->iobase + PCL816_CLRINT);
return -EIO;
}
}
return n;
}
/*
==============================================================================
analog input interrupt mode 1 & 3, 818 cards
one sample per interrupt version
*/
static irqreturn_t interrupt_pcl816_ai_mode13_int(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->subdevices + 0;
int low, hi;
int timeout = 50; /* wait max 50us */
while (timeout--) {
if (!(inb(dev->iobase + PCL816_STATUS) &
PCL816_STATUS_DRDY_MASK))
break;
udelay(1);
}
if (!timeout) { /* timeout, bail error */
outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
comedi_error(dev, "A/D mode1/3 IRQ without DRDY!");
pcl816_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
return IRQ_HANDLED;
}
/* get the sample */
low = inb(dev->iobase + PCL816_AD_LO);
hi = inb(dev->iobase + PCL816_AD_HI);
comedi_buf_put(s->async, (hi << 8) | low);
outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
if (++devpriv->ai_act_chanlist_pos >= devpriv->ai_act_chanlist_len)
devpriv->ai_act_chanlist_pos = 0;
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
s->async->cur_chan = 0;
devpriv->ai_act_scan++;
}
if (!devpriv->ai_neverending)
/* all data sampled */
if (devpriv->ai_act_scan >= devpriv->ai_scans) {
/* all data sampled */
pcl816_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
}
comedi_event(dev, s);
return IRQ_HANDLED;
}
/*
==============================================================================
analog input dma mode 1 & 3, 816 cards
*/
static void transfer_from_dma_buf(struct comedi_device *dev,
struct comedi_subdevice *s, short *ptr,
unsigned int bufptr, unsigned int len)
{
int i;
s->async->events = 0;
for (i = 0; i < len; i++) {
comedi_buf_put(s->async, ptr[bufptr++]);
if (++devpriv->ai_act_chanlist_pos >=
devpriv->ai_act_chanlist_len) {
devpriv->ai_act_chanlist_pos = 0;
}
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
s->async->cur_chan = 0;
devpriv->ai_act_scan++;
}
if (!devpriv->ai_neverending)
/* all data sampled */
if (devpriv->ai_act_scan >= devpriv->ai_scans) {
pcl816_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_BLOCK;
break;
}
}
comedi_event(dev, s);
}
static irqreturn_t interrupt_pcl816_ai_mode13_dma(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->subdevices + 0;
int len, bufptr, this_dma_buf;
unsigned long dma_flags;
short *ptr;
disable_dma(devpriv->dma);
this_dma_buf = devpriv->next_dma_buf;
/* switch dma bufs */
if ((devpriv->dma_runs_to_end > -1) || devpriv->ai_neverending) {
devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
set_dma_mode(devpriv->dma, DMA_MODE_READ);
dma_flags = claim_dma_lock();
/* clear_dma_ff (devpriv->dma); */
set_dma_addr(devpriv->dma,
devpriv->hwdmaptr[devpriv->next_dma_buf]);
if (devpriv->dma_runs_to_end) {
set_dma_count(devpriv->dma,
devpriv->hwdmasize[devpriv->
next_dma_buf]);
} else {
set_dma_count(devpriv->dma, devpriv->last_dma_run);
}
release_dma_lock(dma_flags);
enable_dma(devpriv->dma);
}
devpriv->dma_runs_to_end--;
outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
ptr = (short *)devpriv->dmabuf[this_dma_buf];
len = (devpriv->hwdmasize[0] >> 1) - devpriv->ai_poll_ptr;
bufptr = devpriv->ai_poll_ptr;
devpriv->ai_poll_ptr = 0;
transfer_from_dma_buf(dev, s, ptr, bufptr, len);
return IRQ_HANDLED;
}
/*
==============================================================================
INT procedure
*/
static irqreturn_t interrupt_pcl816(int irq, void *d)
{
struct comedi_device *dev = d;
DPRINTK("<I>");
if (!dev->attached) {
comedi_error(dev, "premature interrupt");
return IRQ_HANDLED;
}
switch (devpriv->int816_mode) {
case INT_TYPE_AI1_DMA:
case INT_TYPE_AI3_DMA:
return interrupt_pcl816_ai_mode13_dma(irq, d);
case INT_TYPE_AI1_INT:
case INT_TYPE_AI3_INT:
return interrupt_pcl816_ai_mode13_int(irq, d);
}
outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
if ((!dev->irq) | (!devpriv->irq_free) | (!devpriv->irq_blocked) |
(!devpriv->int816_mode)) {
if (devpriv->irq_was_now_closed) {
devpriv->irq_was_now_closed = 0;
/* comedi_error(dev,"last IRQ.."); */
return IRQ_HANDLED;
}
comedi_error(dev, "bad IRQ!");
return IRQ_NONE;
}
comedi_error(dev, "IRQ from unknown source!");
return IRQ_NONE;
}
/*
==============================================================================
COMMAND MODE
*/
static void pcl816_cmdtest_out(int e, struct comedi_cmd *cmd)
{
printk(KERN_INFO "pcl816 e=%d startsrc=%x scansrc=%x convsrc=%x\n", e,
cmd->start_src, cmd->scan_begin_src, cmd->convert_src);
printk(KERN_INFO "pcl816 e=%d startarg=%d scanarg=%d convarg=%d\n", e,
cmd->start_arg, cmd->scan_begin_arg, cmd->convert_arg);
printk(KERN_INFO "pcl816 e=%d stopsrc=%x scanend=%x\n", e,
cmd->stop_src, cmd->scan_end_src);
printk(KERN_INFO "pcl816 e=%d stoparg=%d scanendarg=%d chanlistlen=%d\n",
e, cmd->stop_arg, cmd->scan_end_arg, cmd->chanlist_len);
}
/*
==============================================================================
*/
static int pcl816_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
int err = 0;
int tmp, divisor1 = 0, divisor2 = 0;
DEBUG(printk(KERN_INFO "pcl816 pcl812_ai_cmdtest\n");
pcl816_cmdtest_out(-1, cmd);
);
/* step 1: make sure trigger sources are trivially valid */
tmp = cmd->start_src;
cmd->start_src &= TRIG_NOW;
if (!cmd->start_src || tmp != cmd->start_src)
err++;
tmp = cmd->scan_begin_src;
cmd->scan_begin_src &= TRIG_FOLLOW;
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
tmp = cmd->convert_src;
cmd->convert_src &= TRIG_EXT | TRIG_TIMER;
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
tmp = cmd->scan_end_src;
cmd->scan_end_src &= TRIG_COUNT;
if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
err++;
tmp = cmd->stop_src;
cmd->stop_src &= TRIG_COUNT | TRIG_NONE;
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
if (err)
return 1;
/*
* step 2: make sure trigger sources
* are unique and mutually compatible
*/
if (cmd->start_src != TRIG_NOW) {
cmd->start_src = TRIG_NOW;
err++;
}
if (cmd->scan_begin_src != TRIG_FOLLOW) {
cmd->scan_begin_src = TRIG_FOLLOW;
err++;
}
if (cmd->convert_src != TRIG_EXT && cmd->convert_src != TRIG_TIMER) {
cmd->convert_src = TRIG_TIMER;
err++;
}
if (cmd->scan_end_src != TRIG_COUNT) {
cmd->scan_end_src = TRIG_COUNT;
err++;
}
if (cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_COUNT)
err++;
if (err)
return 2;
/* step 3: make sure arguments are trivially compatible */
if (cmd->start_arg != 0) {
cmd->start_arg = 0;
err++;
}
if (cmd->scan_begin_arg != 0) {
cmd->scan_begin_arg = 0;
err++;
}
if (cmd->convert_src == TRIG_TIMER) {
if (cmd->convert_arg < this_board->ai_ns_min) {
cmd->convert_arg = this_board->ai_ns_min;
err++;
}
} else { /* TRIG_EXT */
if (cmd->convert_arg != 0) {
cmd->convert_arg = 0;
err++;
}
}
if (cmd->scan_end_arg != cmd->chanlist_len) {
cmd->scan_end_arg = cmd->chanlist_len;
err++;
}
if (cmd->stop_src == TRIG_COUNT) {
if (!cmd->stop_arg) {
cmd->stop_arg = 1;
err++;
}
} else { /* TRIG_NONE */
if (cmd->stop_arg != 0) {
cmd->stop_arg = 0;
err++;
}
}
if (err)
return 3;
/* step 4: fix up any arguments */
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
i8253_cascade_ns_to_timer(this_board->i8254_osc_base,
&divisor1, &divisor2,
&cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
if (cmd->convert_arg < this_board->ai_ns_min)
cmd->convert_arg = this_board->ai_ns_min;
if (tmp != cmd->convert_arg)
err++;
}
if (err)
return 4;
/* step 5: complain about special chanlist considerations */
if (cmd->chanlist) {
if (!check_channel_list(dev, s, cmd->chanlist,
cmd->chanlist_len))
return 5; /* incorrect channels list */
}
return 0;
}
static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
unsigned int divisor1 = 0, divisor2 = 0, dma_flags, bytes, dmairq;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int seglen;
if (cmd->start_src != TRIG_NOW)
return -EINVAL;
if (cmd->scan_begin_src != TRIG_FOLLOW)
return -EINVAL;
if (cmd->scan_end_src != TRIG_COUNT)
return -EINVAL;
if (cmd->scan_end_arg != cmd->chanlist_len)
return -EINVAL;
/* if(cmd->chanlist_len>MAX_CHANLIST_LEN) return -EINVAL; */
if (devpriv->irq_blocked)
return -EBUSY;
if (cmd->convert_src == TRIG_TIMER) {
if (cmd->convert_arg < this_board->ai_ns_min)
cmd->convert_arg = this_board->ai_ns_min;
i8253_cascade_ns_to_timer(this_board->i8254_osc_base, &divisor1,
&divisor2, &cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
/* PCL816 crash if any divisor is set to 1 */
if (divisor1 == 1) {
divisor1 = 2;
divisor2 /= 2;
}
if (divisor2 == 1) {
divisor2 = 2;
divisor1 /= 2;
}
}
start_pacer(dev, -1, 0, 0); /* stop pacer */
seglen = check_channel_list(dev, s, cmd->chanlist, cmd->chanlist_len);
if (seglen < 1)
return -EINVAL;
setup_channel_list(dev, s, cmd->chanlist, seglen);
udelay(1);
devpriv->ai_n_chan = cmd->chanlist_len;
devpriv->ai_act_scan = 0;
s->async->cur_chan = 0;
devpriv->irq_blocked = 1;
devpriv->ai_poll_ptr = 0;
devpriv->irq_was_now_closed = 0;
if (cmd->stop_src == TRIG_COUNT) {
devpriv->ai_scans = cmd->stop_arg;
devpriv->ai_neverending = 0;
} else {
devpriv->ai_scans = 0;
devpriv->ai_neverending = 1;
}
/* don't we want wake up every scan? */
if ((cmd->flags & TRIG_WAKE_EOS)) {
printk(KERN_INFO
"pl816: You wankt WAKE_EOS but I dont want handle it");
/* devpriv->ai_eos=1; */
/* if (devpriv->ai_n_chan==1) */
/* devpriv->dma=0; // DMA is useless for this situation */
}
if (devpriv->dma) {
bytes = devpriv->hwdmasize[0];
if (!devpriv->ai_neverending) {
/* how many */
bytes = s->async->cmd.chanlist_len *
s->async->cmd.chanlist_len *
sizeof(short);
/* how many DMA pages we must fill */
devpriv->dma_runs_to_end = bytes /
devpriv->hwdmasize[0];
/* on last dma transfer must be moved */
devpriv->last_dma_run = bytes % devpriv->hwdmasize[0];
devpriv->dma_runs_to_end--;
if (devpriv->dma_runs_to_end >= 0)
bytes = devpriv->hwdmasize[0];
} else
devpriv->dma_runs_to_end = -1;
devpriv->next_dma_buf = 0;
set_dma_mode(devpriv->dma, DMA_MODE_READ);
dma_flags = claim_dma_lock();
clear_dma_ff(devpriv->dma);
set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
set_dma_count(devpriv->dma, bytes);
release_dma_lock(dma_flags);
enable_dma(devpriv->dma);
}
start_pacer(dev, 1, divisor1, divisor2);
dmairq = ((devpriv->dma & 0x3) << 4) | (dev->irq & 0x7);
switch (cmd->convert_src) {
case TRIG_TIMER:
devpriv->int816_mode = INT_TYPE_AI1_DMA;
/* Pacer+IRQ+DMA */
outb(0x32, dev->iobase + PCL816_CONTROL);
/* write irq and DMA to card */
outb(dmairq, dev->iobase + PCL816_STATUS);
break;
default:
devpriv->int816_mode = INT_TYPE_AI3_DMA;
/* Ext trig+IRQ+DMA */
outb(0x34, dev->iobase + PCL816_CONTROL);
/* write irq to card */
outb(dmairq, dev->iobase + PCL816_STATUS);
break;
}
DPRINTK("pcl816 END: pcl812_ai_cmd()\n");
return 0;
}
static int pcl816_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
{
unsigned long flags;
unsigned int top1, top2, i;
if (!devpriv->dma)
return 0; /* poll is valid only for DMA transfer */
spin_lock_irqsave(&dev->spinlock, flags);
for (i = 0; i < 20; i++) {
top1 = get_dma_residue(devpriv->dma); /* where is now DMA */
top2 = get_dma_residue(devpriv->dma);
if (top1 == top2)
break;
}
if (top1 != top2) {
spin_unlock_irqrestore(&dev->spinlock, flags);
return 0;
}
/* where is now DMA in buffer */
top1 = devpriv->hwdmasize[0] - top1;
top1 >>= 1; /* sample position */
top2 = top1 - devpriv->ai_poll_ptr;
if (top2 < 1) { /* no new samples */
spin_unlock_irqrestore(&dev->spinlock, flags);
return 0;
}
transfer_from_dma_buf(dev, s,
(short *)devpriv->dmabuf[devpriv->next_dma_buf],
devpriv->ai_poll_ptr, top2);
devpriv->ai_poll_ptr = top1; /* new buffer position */
spin_unlock_irqrestore(&dev->spinlock, flags);
return s->async->buf_write_count - s->async->buf_read_count;
}
/*
==============================================================================
cancel any mode 1-4 AI
*/
static int pcl816_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
/* DEBUG(printk("pcl816_ai_cancel()\n");) */
if (devpriv->irq_blocked > 0) {
switch (devpriv->int816_mode) {
#ifdef unused
case INT_TYPE_AI1_DMA_RTC:
case INT_TYPE_AI3_DMA_RTC:
set_rtc_irq_bit(0); /* stop RTC */
del_timer(&devpriv->rtc_irq_timer);
#endif
case INT_TYPE_AI1_DMA:
case INT_TYPE_AI3_DMA:
disable_dma(devpriv->dma);
case INT_TYPE_AI1_INT:
case INT_TYPE_AI3_INT:
outb(inb(dev->iobase + PCL816_CONTROL) & 0x73,
dev->iobase + PCL816_CONTROL); /* Stop A/D */
udelay(1);
outb(0, dev->iobase + PCL816_CONTROL); /* Stop A/D */
/* Stop pacer */
outb(0xb0, dev->iobase + PCL816_CTRCTL);
outb(0x70, dev->iobase + PCL816_CTRCTL);
outb(0, dev->iobase + PCL816_AD_LO);
inb(dev->iobase + PCL816_AD_LO);
inb(dev->iobase + PCL816_AD_HI);
/* clear INT request */
outb(0, dev->iobase + PCL816_CLRINT);
/* Stop A/D */
outb(0, dev->iobase + PCL816_CONTROL);
devpriv->irq_blocked = 0;
devpriv->irq_was_now_closed = devpriv->int816_mode;
devpriv->int816_mode = 0;
devpriv->last_int_sub = s;
/* s->busy = 0; */
break;
}
}
DEBUG(printk("comedi: pcl816_ai_cancel() successful\n");)
return 0;
}
/*
==============================================================================
chech for PCL816
*/
static int pcl816_check(unsigned long iobase)
{
outb(0x00, iobase + PCL816_MUX);
udelay(1);
if (inb(iobase + PCL816_MUX) != 0x00)
return 1; /* there isn't card */
outb(0x55, iobase + PCL816_MUX);
udelay(1);
if (inb(iobase + PCL816_MUX) != 0x55)
return 1; /* there isn't card */
outb(0x00, iobase + PCL816_MUX);
udelay(1);
outb(0x18, iobase + PCL816_CONTROL);
udelay(1);
if (inb(iobase + PCL816_CONTROL) != 0x18)
return 1; /* there isn't card */
return 0; /* ok, card exist */
}
/*
==============================================================================
reset whole PCL-816 cards
*/
static void pcl816_reset(struct comedi_device *dev)
{
/* outb (0, dev->iobase + PCL818_DA_LO); DAC=0V */
/* outb (0, dev->iobase + PCL818_DA_HI); */
/* udelay (1); */
/* outb (0, dev->iobase + PCL818_DO_HI); DO=$0000 */
/* outb (0, dev->iobase + PCL818_DO_LO); */
/* udelay (1); */
outb(0, dev->iobase + PCL816_CONTROL);
outb(0, dev->iobase + PCL816_MUX);
outb(0, dev->iobase + PCL816_CLRINT);
outb(0xb0, dev->iobase + PCL816_CTRCTL); /* Stop pacer */
outb(0x70, dev->iobase + PCL816_CTRCTL);
outb(0x30, dev->iobase + PCL816_CTRCTL);
outb(0, dev->iobase + PCL816_RANGE);
}
/*
==============================================================================
Start/stop pacer onboard pacer
*/
static void
start_pacer(struct comedi_device *dev, int mode, unsigned int divisor1,
unsigned int divisor2)
{
outb(0x32, dev->iobase + PCL816_CTRCTL);
outb(0xff, dev->iobase + PCL816_CTR0);
outb(0x00, dev->iobase + PCL816_CTR0);
udelay(1);
/* set counter 2 as mode 3 */
outb(0xb4, dev->iobase + PCL816_CTRCTL);
/* set counter 1 as mode 3 */
outb(0x74, dev->iobase + PCL816_CTRCTL);
udelay(1);
if (mode == 1) {
DPRINTK("mode %d, divisor1 %d, divisor2 %d\n", mode, divisor1,
divisor2);
outb(divisor2 & 0xff, dev->iobase + PCL816_CTR2);
outb((divisor2 >> 8) & 0xff, dev->iobase + PCL816_CTR2);
outb(divisor1 & 0xff, dev->iobase + PCL816_CTR1);
outb((divisor1 >> 8) & 0xff, dev->iobase + PCL816_CTR1);
}
/* clear pending interrupts (just in case) */
/* outb(0, dev->iobase + PCL816_CLRINT); */
}
/*
==============================================================================
Check if channel list from user is builded correctly
If it's ok, then return non-zero length of repeated segment of channel list
*/
static int
check_channel_list(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned int *chanlist,
unsigned int chanlen)
{
unsigned int chansegment[16];
unsigned int i, nowmustbechan, seglen, segpos;
/* correct channel and range number check itself comedi/range.c */
if (chanlen < 1) {
comedi_error(dev, "range/channel list is empty!");
return 0;
}
if (chanlen > 1) {
/* first channel is every time ok */
chansegment[0] = chanlist[0];
for (i = 1, seglen = 1; i < chanlen; i++, seglen++) {
/* build part of chanlist */
DEBUG(printk(KERN_INFO "%d. %d %d\n", i,
CR_CHAN(chanlist[i]),
CR_RANGE(chanlist[i]));)
/* we detect loop, this must by finish */
if (chanlist[0] == chanlist[i])
break;
nowmustbechan =
(CR_CHAN(chansegment[i - 1]) + 1) % chanlen;
if (nowmustbechan != CR_CHAN(chanlist[i])) {
/* channel list isn't continuous :-( */
printk(KERN_WARNING
"comedi%d: pcl816: channel list must "
"be continuous! chanlist[%i]=%d but "
"must be %d or %d!\n", dev->minor,
i, CR_CHAN(chanlist[i]), nowmustbechan,
CR_CHAN(chanlist[0]));
return 0;
}
/* well, this is next correct channel in list */
chansegment[i] = chanlist[i];
}
/* check whole chanlist */
for (i = 0, segpos = 0; i < chanlen; i++) {
DEBUG(printk("%d %d=%d %d\n",
CR_CHAN(chansegment[i % seglen]),
CR_RANGE(chansegment[i % seglen]),
CR_CHAN(chanlist[i]),
CR_RANGE(chanlist[i]));)
if (chanlist[i] != chansegment[i % seglen]) {
printk(KERN_WARNING
"comedi%d: pcl816: bad channel or range"
" number! chanlist[%i]=%d,%d,%d and not"
" %d,%d,%d!\n", dev->minor, i,
CR_CHAN(chansegment[i]),
CR_RANGE(chansegment[i]),
CR_AREF(chansegment[i]),
CR_CHAN(chanlist[i % seglen]),
CR_RANGE(chanlist[i % seglen]),
CR_AREF(chansegment[i % seglen]));
return 0; /* chan/gain list is strange */
}
}
} else {
seglen = 1;
}
return seglen; /* we can serve this with MUX logic */
}
/*
==============================================================================
Program scan/gain logic with channel list.
*/
static void
setup_channel_list(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned int *chanlist,
unsigned int seglen)
{
unsigned int i;
devpriv->ai_act_chanlist_len = seglen;
devpriv->ai_act_chanlist_pos = 0;
for (i = 0; i < seglen; i++) { /* store range list to card */
devpriv->ai_act_chanlist[i] = CR_CHAN(chanlist[i]);
outb(CR_CHAN(chanlist[0]) & 0xf, dev->iobase + PCL816_MUX);
/* select gain */
outb(CR_RANGE(chanlist[0]), dev->iobase + PCL816_RANGE);
}
udelay(1);
/* select channel interval to scan */
outb(devpriv->ai_act_chanlist[0] |
(devpriv->ai_act_chanlist[seglen - 1] << 4),
dev->iobase + PCL816_MUX);
}
#ifdef unused
/*
==============================================================================
Enable(1)/disable(0) periodic interrupts from RTC
*/
static int set_rtc_irq_bit(unsigned char bit)
{
unsigned char val;
unsigned long flags;
if (bit == 1) {
RTC_timer_lock++;
if (RTC_timer_lock > 1)
return 0;
} else {
RTC_timer_lock--;
if (RTC_timer_lock < 0)
RTC_timer_lock = 0;
if (RTC_timer_lock > 0)
return 0;
}
save_flags(flags);
cli();
val = CMOS_READ(RTC_CONTROL);
if (bit)
val |= RTC_PIE;
else
val &= ~RTC_PIE;
CMOS_WRITE(val, RTC_CONTROL);
CMOS_READ(RTC_INTR_FLAGS);
restore_flags(flags);
return 0;
}
#endif
/*
==============================================================================
Free any resources that we have claimed
*/
static void free_resources(struct comedi_device *dev)
{
/* printk("free_resource()\n"); */
if (dev->private) {
pcl816_ai_cancel(dev, devpriv->sub_ai);
pcl816_reset(dev);
if (devpriv->dma)
free_dma(devpriv->dma);
if (devpriv->dmabuf[0])
free_pages(devpriv->dmabuf[0], devpriv->dmapages[0]);
if (devpriv->dmabuf[1])
free_pages(devpriv->dmabuf[1], devpriv->dmapages[1]);
#ifdef unused
if (devpriv->rtc_irq)
free_irq(devpriv->rtc_irq, dev);
if ((devpriv->dma_rtc) && (RTC_lock == 1)) {
if (devpriv->rtc_iobase)
release_region(devpriv->rtc_iobase,
devpriv->rtc_iosize);
}
#endif
}
if (dev->irq)
free_irq(dev->irq, dev);
if (dev->iobase)
release_region(dev->iobase, this_board->io_range);
/* printk("free_resource() end\n"); */
}
/*
==============================================================================
Initialization
*/
static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
int ret;
unsigned long iobase;
unsigned int irq, dma;
unsigned long pages;
/* int i; */
struct comedi_subdevice *s;
/* claim our I/O space */
iobase = it->options[0];
printk("comedi%d: pcl816: board=%s, ioport=0x%03lx", dev->minor,
this_board->name, iobase);
if (!request_region(iobase, this_board->io_range, "pcl816")) {
printk("I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
if (pcl816_check(iobase)) {
printk(KERN_ERR ", I cann't detect board. FAIL!\n");
return -EIO;
}
ret = alloc_private(dev, sizeof(struct pcl816_private));
if (ret < 0)
return ret; /* Can't alloc mem */
/* set up some name stuff */
dev->board_name = this_board->name;
/* grab our IRQ */
irq = 0;
if (this_board->IRQbits != 0) { /* board support IRQ */
irq = it->options[1];
if (irq) { /* we want to use IRQ */
if (((1 << irq) & this_board->IRQbits) == 0) {
printk
(", IRQ %u is out of allowed range, "
"DISABLING IT", irq);
irq = 0; /* Bad IRQ */
} else {
if (request_irq
(irq, interrupt_pcl816, 0, "pcl816", dev)) {
printk
(", unable to allocate IRQ %u, "
"DISABLING IT", irq);
irq = 0; /* Can't use IRQ */
} else {
printk(KERN_INFO ", irq=%u", irq);
}
}
}
}
dev->irq = irq;
if (irq) /* 1=we have allocated irq */
devpriv->irq_free = 1;
else
devpriv->irq_free = 0;
devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */
devpriv->int816_mode = 0; /* mode of irq */
#ifdef unused
/* grab RTC for DMA operations */
devpriv->dma_rtc = 0;
if (it->options[2] > 0) { /* we want to use DMA */
if (RTC_lock == 0) {
if (!request_region(RTC_PORT(0), RTC_IO_EXTENT,
"pcl816 (RTC)"))
goto no_rtc;
}
devpriv->rtc_iobase = RTC_PORT(0);
devpriv->rtc_iosize = RTC_IO_EXTENT;
RTC_lock++;
#ifdef UNTESTED_CODE
if (!request_irq(RTC_IRQ, interrupt_pcl816_ai_mode13_dma_rtc, 0,
"pcl816 DMA (RTC)", dev)) {
devpriv->dma_rtc = 1;
devpriv->rtc_irq = RTC_IRQ;
printk(", dma_irq=%u", devpriv->rtc_irq);
} else {
RTC_lock--;
if (RTC_lock == 0) {
if (devpriv->rtc_iobase)
release_region(devpriv->rtc_iobase,
devpriv->rtc_iosize);
}
devpriv->rtc_iobase = 0;
devpriv->rtc_iosize = 0;
}
#else
printk("pcl816: RTC code missing");
#endif
}
no_rtc:
#endif
/* grab our DMA */
dma = 0;
devpriv->dma = dma;
if ((devpriv->irq_free == 0) && (devpriv->dma_rtc == 0))
goto no_dma; /* if we haven't IRQ, we can't use DMA */
if (this_board->DMAbits != 0) { /* board support DMA */
dma = it->options[2];
if (dma < 1)
goto no_dma; /* DMA disabled */
if (((1 << dma) & this_board->DMAbits) == 0) {
printk(", DMA is out of allowed range, FAIL!\n");
return -EINVAL; /* Bad DMA */
}
ret = request_dma(dma, "pcl816");
if (ret) {
printk(KERN_ERR
", unable to allocate DMA %u, FAIL!\n", dma);
return -EBUSY; /* DMA isn't free */
}
devpriv->dma = dma;
printk(KERN_INFO ", dma=%u", dma);
pages = 2; /* we need 16KB */
devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[0]) {
printk(", unable to allocate DMA buffer, FAIL!\n");
/*
* maybe experiment with try_to_free_pages()
* will help ....
*/
return -EBUSY; /* no buffer :-( */
}
devpriv->dmapages[0] = pages;
devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE;
/* printk("%d %d %ld, ",devpriv->dmapages[0],devpriv->hwdmasize[0],PAGE_SIZE); */
if (devpriv->dma_rtc == 0) { /* we must do duble buff :-( */
devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[1]) {
printk(KERN_ERR
", unable to allocate DMA buffer, "
"FAIL!\n");
return -EBUSY;
}
devpriv->dmapages[1] = pages;
devpriv->hwdmaptr[1] =
virt_to_bus((void *)devpriv->dmabuf[1]);
devpriv->hwdmasize[1] = (1 << pages) * PAGE_SIZE;
}
}
no_dma:
/* if (this_board->n_aochan > 0)
subdevs[1] = COMEDI_SUBD_AO;
if (this_board->n_dichan > 0)
subdevs[2] = COMEDI_SUBD_DI;
if (this_board->n_dochan > 0)
subdevs[3] = COMEDI_SUBD_DO;
*/
ret = alloc_subdevices(dev, 1);
if (ret < 0)
return ret;
s = dev->subdevices + 0;
if (this_board->n_aichan > 0) {
s->type = COMEDI_SUBD_AI;
devpriv->sub_ai = s;
dev->read_subdev = s;
s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
s->n_chan = this_board->n_aichan;
s->subdev_flags |= SDF_DIFF;
/* printk (", %dchans DIFF DAC - %d", s->n_chan, i); */
s->maxdata = this_board->ai_maxdata;
s->len_chanlist = this_board->ai_chanlist;
s->range_table = this_board->ai_range_type;
s->cancel = pcl816_ai_cancel;
s->do_cmdtest = pcl816_ai_cmdtest;
s->do_cmd = pcl816_ai_cmd;
s->poll = pcl816_ai_poll;
s->insn_read = pcl816_ai_insn_read;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
#if 0
case COMEDI_SUBD_AO:
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = this_board->n_aochan;
s->maxdata = this_board->ao_maxdata;
s->len_chanlist = this_board->ao_chanlist;
s->range_table = this_board->ao_range_type;
break;
case COMEDI_SUBD_DI:
s->subdev_flags = SDF_READABLE;
s->n_chan = this_board->n_dichan;
s->maxdata = 1;
s->len_chanlist = this_board->n_dichan;
s->range_table = &range_digital;
break;
case COMEDI_SUBD_DO:
s->subdev_flags = SDF_WRITABLE;
s->n_chan = this_board->n_dochan;
s->maxdata = 1;
s->len_chanlist = this_board->n_dochan;
s->range_table = &range_digital;
break;
#endif
pcl816_reset(dev);
printk("\n");
return 0;
}
/*
==============================================================================
Removes device
*/
static int pcl816_detach(struct comedi_device *dev)
{
DEBUG(printk(KERN_INFO "comedi%d: pcl816: remove\n", dev->minor);)
free_resources(dev);
#ifdef unused
if (devpriv->dma_rtc)
RTC_lock--;
#endif
return 0;
}
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
brymaster5000/m7wlv_4.3 | security/selinux/netlink.c | 5151 | 2479 | /*
* Netlink event notifications for SELinux.
*
* Author: James Morris <jmorris@redhat.com>
*
* Copyright (C) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/selinux_netlink.h>
#include <net/net_namespace.h>
#include "security.h"
static struct sock *selnl;
static int selnl_msglen(int msgtype)
{
int ret = 0;
switch (msgtype) {
case SELNL_MSG_SETENFORCE:
ret = sizeof(struct selnl_msg_setenforce);
break;
case SELNL_MSG_POLICYLOAD:
ret = sizeof(struct selnl_msg_policyload);
break;
default:
BUG();
}
return ret;
}
static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void *data)
{
switch (msgtype) {
case SELNL_MSG_SETENFORCE: {
struct selnl_msg_setenforce *msg = NLMSG_DATA(nlh);
memset(msg, 0, len);
msg->val = *((int *)data);
break;
}
case SELNL_MSG_POLICYLOAD: {
struct selnl_msg_policyload *msg = NLMSG_DATA(nlh);
memset(msg, 0, len);
msg->seqno = *((u32 *)data);
break;
}
default:
BUG();
}
}
static void selnl_notify(int msgtype, void *data)
{
int len;
sk_buff_data_t tmp;
struct sk_buff *skb;
struct nlmsghdr *nlh;
len = selnl_msglen(msgtype);
skb = alloc_skb(NLMSG_SPACE(len), GFP_USER);
if (!skb)
goto oom;
tmp = skb->tail;
nlh = NLMSG_PUT(skb, 0, 0, msgtype, len);
selnl_add_payload(nlh, len, msgtype, data);
nlh->nlmsg_len = skb->tail - tmp;
NETLINK_CB(skb).dst_group = SELNLGRP_AVC;
netlink_broadcast(selnl, skb, 0, SELNLGRP_AVC, GFP_USER);
out:
return;
nlmsg_failure:
kfree_skb(skb);
oom:
printk(KERN_ERR "SELinux: OOM in %s\n", __func__);
goto out;
}
void selnl_notify_setenforce(int val)
{
selnl_notify(SELNL_MSG_SETENFORCE, &val);
}
void selnl_notify_policyload(u32 seqno)
{
selnl_notify(SELNL_MSG_POLICYLOAD, &seqno);
}
static int __init selnl_init(void)
{
selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX,
SELNLGRP_MAX, NULL, NULL, THIS_MODULE);
if (selnl == NULL)
panic("SELinux: Cannot create netlink socket.");
netlink_set_nonroot(NETLINK_SELINUX, NL_NONROOT_RECV);
return 0;
}
__initcall(selnl_init);
| gpl-2.0 |
tako0910/android_kernel_htc_msm8960 | drivers/target/iscsi/iscsi_target_device.c | 5151 | 2307 | /*******************************************************************************
* This file contains the iSCSI Virtual Device and Disk Transport
* agnostic related functions.
*
\u00a9 Copyright 2007-2011 RisingTide Systems LLC.
*
* Licensed to the Linux Foundation under the General Public License (GPL) version 2.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
******************************************************************************/
#include <scsi/scsi_device.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_device.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
{
struct se_node_acl *se_nacl;
/*
* This is a discovery session, the single queue slot was already
* assigned in iscsi_login_zero_tsih(). Since only Logout and
* Text Opcodes are allowed during discovery we do not have to worry
* about the HBA's queue depth here.
*/
if (sess->sess_ops->SessionType)
return;
se_nacl = sess->se_sess->se_node_acl;
/*
* This is a normal session, set the Session's CmdSN window to the
* struct se_node_acl->queue_depth. The value in struct se_node_acl->queue_depth
* has already been validated as a legal value in
* core_set_queue_depth_for_node().
*/
sess->cmdsn_window = se_nacl->queue_depth;
sess->max_cmd_sn = (sess->max_cmd_sn + se_nacl->queue_depth) - 1;
}
void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess)
{
if (cmd->immediate_cmd || cmd->maxcmdsn_inc)
return;
cmd->maxcmdsn_inc = 1;
mutex_lock(&sess->cmdsn_mutex);
sess->max_cmd_sn += 1;
pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
mutex_unlock(&sess->cmdsn_mutex);
}
| gpl-2.0 |
ChaOSChriS/android_kernel_google_msm | drivers/isdn/act2000/module.c | 7711 | 19496 | /* $Id: module.c,v 1.14.6.4 2001/09/23 22:24:32 kai Exp $
*
* ISDN lowlevel-module for the IBM ISDN-S0 Active 2000.
*
* Author Fritz Elfert
* Copyright by Fritz Elfert <fritz@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* Thanks to Friedemann Baitinger and IBM Germany
*
*/
#include "act2000.h"
#include "act2000_isa.h"
#include "capi.h"
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
static unsigned short act2000_isa_ports[] =
{
0x0200, 0x0240, 0x0280, 0x02c0, 0x0300, 0x0340, 0x0380,
0xcfe0, 0xcfa0, 0xcf60, 0xcf20, 0xcee0, 0xcea0, 0xce60,
};
static act2000_card *cards = (act2000_card *) NULL;
/* Parameters to be set by insmod */
static int act_bus = 0;
static int act_port = -1; /* -1 = Autoprobe */
static int act_irq = -1;
static char *act_id = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
MODULE_DESCRIPTION("ISDN4Linux: Driver for IBM Active 2000 ISDN card");
MODULE_AUTHOR("Fritz Elfert");
MODULE_LICENSE("GPL");
MODULE_PARM_DESC(act_bus, "BusType of first card, 1=ISA, 2=MCA, 3=PCMCIA, currently only ISA");
MODULE_PARM_DESC(membase, "Base port address of first card");
MODULE_PARM_DESC(act_irq, "IRQ of first card");
MODULE_PARM_DESC(act_id, "ID-String of first card");
module_param(act_bus, int, 0);
module_param(act_port, int, 0);
module_param(act_irq, int, 0);
module_param(act_id, charp, 0);
static int act2000_addcard(int, int, int, char *);
static act2000_chan *
find_channel(act2000_card *card, int channel)
{
if ((channel >= 0) && (channel < ACT2000_BCH))
return &(card->bch[channel]);
printk(KERN_WARNING "act2000: Invalid channel %d\n", channel);
return NULL;
}
/*
* Free MSN list
*/
static void
act2000_clear_msn(act2000_card *card)
{
struct msn_entry *p = card->msn_list;
struct msn_entry *q;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
card->msn_list = NULL;
spin_unlock_irqrestore(&card->lock, flags);
while (p) {
q = p->next;
kfree(p);
p = q;
}
}
/*
* Find an MSN entry in the list.
* If ia5 != 0, return IA5-encoded EAZ, else
* return a bitmask with corresponding bit set.
*/
static __u16
act2000_find_msn(act2000_card *card, char *msn, int ia5)
{
struct msn_entry *p = card->msn_list;
__u8 eaz = '0';
while (p) {
if (!strcmp(p->msn, msn)) {
eaz = p->eaz;
break;
}
p = p->next;
}
if (!ia5)
return (1 << (eaz - '0'));
else
return eaz;
}
/*
* Find an EAZ entry in the list.
* return a string with corresponding msn.
*/
char *
act2000_find_eaz(act2000_card *card, char eaz)
{
struct msn_entry *p = card->msn_list;
while (p) {
if (p->eaz == eaz)
return (p->msn);
p = p->next;
}
return ("\0");
}
/*
* Add or delete an MSN to the MSN list
*
* First character of msneaz is EAZ, rest is MSN.
* If length of eazmsn is 1, delete that entry.
*/
static int
act2000_set_msn(act2000_card *card, char *eazmsn)
{
struct msn_entry *p = card->msn_list;
struct msn_entry *q = NULL;
unsigned long flags;
int i;
if (!strlen(eazmsn))
return 0;
if (strlen(eazmsn) > 16)
return -EINVAL;
for (i = 0; i < strlen(eazmsn); i++)
if (!isdigit(eazmsn[i]))
return -EINVAL;
if (strlen(eazmsn) == 1) {
/* Delete a single MSN */
while (p) {
if (p->eaz == eazmsn[0]) {
spin_lock_irqsave(&card->lock, flags);
if (q)
q->next = p->next;
else
card->msn_list = p->next;
spin_unlock_irqrestore(&card->lock, flags);
kfree(p);
printk(KERN_DEBUG
"Mapping for EAZ %c deleted\n",
eazmsn[0]);
return 0;
}
q = p;
p = p->next;
}
return 0;
}
/* Add a single MSN */
while (p) {
/* Found in list, replace MSN */
if (p->eaz == eazmsn[0]) {
spin_lock_irqsave(&card->lock, flags);
strcpy(p->msn, &eazmsn[1]);
spin_unlock_irqrestore(&card->lock, flags);
printk(KERN_DEBUG
"Mapping for EAZ %c changed to %s\n",
eazmsn[0],
&eazmsn[1]);
return 0;
}
p = p->next;
}
/* Not found in list, add new entry */
p = kmalloc(sizeof(msn_entry), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->eaz = eazmsn[0];
strcpy(p->msn, &eazmsn[1]);
p->next = card->msn_list;
spin_lock_irqsave(&card->lock, flags);
card->msn_list = p;
spin_unlock_irqrestore(&card->lock, flags);
printk(KERN_DEBUG
"Mapping %c -> %s added\n",
eazmsn[0],
&eazmsn[1]);
return 0;
}
static void
act2000_transmit(struct work_struct *work)
{
struct act2000_card *card =
container_of(work, struct act2000_card, snd_tq);
switch (card->bus) {
case ACT2000_BUS_ISA:
act2000_isa_send(card);
break;
case ACT2000_BUS_PCMCIA:
case ACT2000_BUS_MCA:
default:
printk(KERN_WARNING
"act2000_transmit: Illegal bustype %d\n", card->bus);
}
}
static void
act2000_receive(struct work_struct *work)
{
struct act2000_card *card =
container_of(work, struct act2000_card, poll_tq);
switch (card->bus) {
case ACT2000_BUS_ISA:
act2000_isa_receive(card);
break;
case ACT2000_BUS_PCMCIA:
case ACT2000_BUS_MCA:
default:
printk(KERN_WARNING
"act2000_receive: Illegal bustype %d\n", card->bus);
}
}
static void
act2000_poll(unsigned long data)
{
act2000_card *card = (act2000_card *)data;
unsigned long flags;
act2000_receive(&card->poll_tq);
spin_lock_irqsave(&card->lock, flags);
mod_timer(&card->ptimer, jiffies + 3);
spin_unlock_irqrestore(&card->lock, flags);
}
static int
act2000_command(act2000_card *card, isdn_ctrl *c)
{
ulong a;
act2000_chan *chan;
act2000_cdef cdef;
isdn_ctrl cmd;
char tmp[17];
int ret;
unsigned long flags;
void __user *arg;
switch (c->command) {
case ISDN_CMD_IOCTL:
memcpy(&a, c->parm.num, sizeof(ulong));
arg = (void __user *)a;
switch (c->arg) {
case ACT2000_IOCTL_LOADBOOT:
switch (card->bus) {
case ACT2000_BUS_ISA:
ret = act2000_isa_download(card,
arg);
if (!ret) {
card->flags |= ACT2000_FLAGS_LOADED;
if (!(card->flags & ACT2000_FLAGS_IVALID)) {
card->ptimer.expires = jiffies + 3;
card->ptimer.function = act2000_poll;
card->ptimer.data = (unsigned long)card;
add_timer(&card->ptimer);
}
actcapi_manufacturer_req_errh(card);
}
break;
default:
printk(KERN_WARNING
"act2000: Illegal BUS type %d\n",
card->bus);
ret = -EIO;
}
return ret;
case ACT2000_IOCTL_SETPROTO:
card->ptype = a ? ISDN_PTYPE_EURO : ISDN_PTYPE_1TR6;
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return 0;
actcapi_manufacturer_req_net(card);
return 0;
case ACT2000_IOCTL_SETMSN:
if (copy_from_user(tmp, arg,
sizeof(tmp)))
return -EFAULT;
if ((ret = act2000_set_msn(card, tmp)))
return ret;
if (card->flags & ACT2000_FLAGS_RUNNING)
return (actcapi_manufacturer_req_msn(card));
return 0;
case ACT2000_IOCTL_ADDCARD:
if (copy_from_user(&cdef, arg,
sizeof(cdef)))
return -EFAULT;
if (act2000_addcard(cdef.bus, cdef.port, cdef.irq, cdef.id))
return -EIO;
return 0;
case ACT2000_IOCTL_TEST:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return 0;
default:
return -EINVAL;
}
break;
case ISDN_CMD_DIAL:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
spin_lock_irqsave(&card->lock, flags);
if (chan->fsm_state != ACT2000_STATE_NULL) {
spin_unlock_irqrestore(&card->lock, flags);
printk(KERN_WARNING "Dial on channel with state %d\n",
chan->fsm_state);
return -EBUSY;
}
if (card->ptype == ISDN_PTYPE_EURO)
tmp[0] = act2000_find_msn(card, c->parm.setup.eazmsn, 1);
else
tmp[0] = c->parm.setup.eazmsn[0];
chan->fsm_state = ACT2000_STATE_OCALL;
chan->callref = 0xffff;
spin_unlock_irqrestore(&card->lock, flags);
ret = actcapi_connect_req(card, chan, c->parm.setup.phone,
tmp[0], c->parm.setup.si1,
c->parm.setup.si2);
if (ret) {
cmd.driver = card->myid;
cmd.command = ISDN_STAT_DHUP;
cmd.arg &= 0x0f;
card->interface.statcallb(&cmd);
}
return ret;
case ISDN_CMD_ACCEPTD:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
if (chan->fsm_state == ACT2000_STATE_ICALL)
actcapi_select_b2_protocol_req(card, chan);
return 0;
case ISDN_CMD_ACCEPTB:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return 0;
case ISDN_CMD_HANGUP:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
switch (chan->fsm_state) {
case ACT2000_STATE_ICALL:
case ACT2000_STATE_BSETUP:
actcapi_connect_resp(card, chan, 0x15);
break;
case ACT2000_STATE_ACTIVE:
actcapi_disconnect_b3_req(card, chan);
break;
}
return 0;
case ISDN_CMD_SETEAZ:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
if (strlen(c->parm.num)) {
if (card->ptype == ISDN_PTYPE_EURO) {
chan->eazmask = act2000_find_msn(card, c->parm.num, 0);
}
if (card->ptype == ISDN_PTYPE_1TR6) {
int i;
chan->eazmask = 0;
for (i = 0; i < strlen(c->parm.num); i++)
if (isdigit(c->parm.num[i]))
chan->eazmask |= (1 << (c->parm.num[i] - '0'));
}
} else
chan->eazmask = 0x3ff;
actcapi_listen_req(card);
return 0;
case ISDN_CMD_CLREAZ:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
chan->eazmask = 0;
actcapi_listen_req(card);
return 0;
case ISDN_CMD_SETL2:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
chan->l2prot = (c->arg >> 8);
return 0;
case ISDN_CMD_SETL3:
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if ((c->arg >> 8) != ISDN_PROTO_L3_TRANS) {
printk(KERN_WARNING "L3 protocol unknown\n");
return -1;
}
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
chan->l3prot = (c->arg >> 8);
return 0;
}
return -EINVAL;
}
static int
act2000_sendbuf(act2000_card *card, int channel, int ack, struct sk_buff *skb)
{
struct sk_buff *xmit_skb;
int len;
act2000_chan *chan;
actcapi_msg *msg;
if (!(chan = find_channel(card, channel)))
return -1;
if (chan->fsm_state != ACT2000_STATE_ACTIVE)
return -1;
len = skb->len;
if ((chan->queued + len) >= ACT2000_MAX_QUEUED)
return 0;
if (!len)
return 0;
if (skb_headroom(skb) < 19) {
printk(KERN_WARNING "act2000_sendbuf: Headroom only %d\n",
skb_headroom(skb));
xmit_skb = alloc_skb(len + 19, GFP_ATOMIC);
if (!xmit_skb) {
printk(KERN_WARNING "act2000_sendbuf: Out of memory\n");
return 0;
}
skb_reserve(xmit_skb, 19);
skb_copy_from_linear_data(skb, skb_put(xmit_skb, len), len);
} else {
xmit_skb = skb_clone(skb, GFP_ATOMIC);
if (!xmit_skb) {
printk(KERN_WARNING "act2000_sendbuf: Out of memory\n");
return 0;
}
}
dev_kfree_skb(skb);
msg = (actcapi_msg *)skb_push(xmit_skb, 19);
msg->hdr.len = 19 + len;
msg->hdr.applicationID = 1;
msg->hdr.cmd.cmd = 0x86;
msg->hdr.cmd.subcmd = 0x00;
msg->hdr.msgnum = actcapi_nextsmsg(card);
msg->msg.data_b3_req.datalen = len;
msg->msg.data_b3_req.blocknr = (msg->hdr.msgnum & 0xff);
msg->msg.data_b3_req.fakencci = MAKE_NCCI(chan->plci, 0, chan->ncci);
msg->msg.data_b3_req.flags = ack; /* Will be set to 0 on actual sending */
actcapi_debug_msg(xmit_skb, 1);
chan->queued += len;
skb_queue_tail(&card->sndq, xmit_skb);
act2000_schedule_tx(card);
return len;
}
/* Read the Status-replies from the Interface */
static int
act2000_readstatus(u_char __user *buf, int len, act2000_card *card)
{
int count;
u_char __user *p;
for (p = buf, count = 0; count < len; p++, count++) {
if (card->status_buf_read == card->status_buf_write)
return count;
put_user(*card->status_buf_read++, p);
if (card->status_buf_read > card->status_buf_end)
card->status_buf_read = card->status_buf;
}
return count;
}
/*
* Find card with given driverId
*/
static inline act2000_card *
act2000_findcard(int driverid)
{
act2000_card *p = cards;
while (p) {
if (p->myid == driverid)
return p;
p = p->next;
}
return (act2000_card *) 0;
}
/*
* Wrapper functions for interface to linklevel
*/
static int
if_command(isdn_ctrl *c)
{
act2000_card *card = act2000_findcard(c->driver);
if (card)
return (act2000_command(card, c));
printk(KERN_ERR
"act2000: if_command %d called with invalid driverId %d!\n",
c->command, c->driver);
return -ENODEV;
}
static int
if_writecmd(const u_char __user *buf, int len, int id, int channel)
{
act2000_card *card = act2000_findcard(id);
if (card) {
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return (len);
}
printk(KERN_ERR
"act2000: if_writecmd called with invalid driverId!\n");
return -ENODEV;
}
static int
if_readstatus(u_char __user *buf, int len, int id, int channel)
{
act2000_card *card = act2000_findcard(id);
if (card) {
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return (act2000_readstatus(buf, len, card));
}
printk(KERN_ERR
"act2000: if_readstatus called with invalid driverId!\n");
return -ENODEV;
}
static int
if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
{
act2000_card *card = act2000_findcard(id);
if (card) {
if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return (act2000_sendbuf(card, channel, ack, skb));
}
printk(KERN_ERR
"act2000: if_sendbuf called with invalid driverId!\n");
return -ENODEV;
}
/*
* Allocate a new card-struct, initialize it
* link it into cards-list.
*/
static void
act2000_alloccard(int bus, int port, int irq, char *id)
{
int i;
act2000_card *card;
if (!(card = kzalloc(sizeof(act2000_card), GFP_KERNEL))) {
printk(KERN_WARNING
"act2000: (%s) Could not allocate card-struct.\n", id);
return;
}
spin_lock_init(&card->lock);
spin_lock_init(&card->mnlock);
skb_queue_head_init(&card->sndq);
skb_queue_head_init(&card->rcvq);
skb_queue_head_init(&card->ackq);
INIT_WORK(&card->snd_tq, act2000_transmit);
INIT_WORK(&card->rcv_tq, actcapi_dispatch);
INIT_WORK(&card->poll_tq, act2000_receive);
init_timer(&card->ptimer);
card->interface.owner = THIS_MODULE;
card->interface.channels = ACT2000_BCH;
card->interface.maxbufsize = 4000;
card->interface.command = if_command;
card->interface.writebuf_skb = if_sendbuf;
card->interface.writecmd = if_writecmd;
card->interface.readstat = if_readstatus;
card->interface.features =
ISDN_FEATURE_L2_X75I |
ISDN_FEATURE_L2_HDLC |
ISDN_FEATURE_L3_TRANS |
ISDN_FEATURE_P_UNKNOWN;
card->interface.hl_hdrlen = 20;
card->ptype = ISDN_PTYPE_EURO;
strlcpy(card->interface.id, id, sizeof(card->interface.id));
for (i = 0; i < ACT2000_BCH; i++) {
card->bch[i].plci = 0x8000;
card->bch[i].ncci = 0x8000;
card->bch[i].l2prot = ISDN_PROTO_L2_X75I;
card->bch[i].l3prot = ISDN_PROTO_L3_TRANS;
}
card->myid = -1;
card->bus = bus;
card->port = port;
card->irq = irq;
card->next = cards;
cards = card;
}
/*
* register card at linklevel
*/
static int
act2000_registercard(act2000_card *card)
{
switch (card->bus) {
case ACT2000_BUS_ISA:
break;
case ACT2000_BUS_MCA:
case ACT2000_BUS_PCMCIA:
default:
printk(KERN_WARNING
"act2000: Illegal BUS type %d\n",
card->bus);
return -1;
}
if (!register_isdn(&card->interface)) {
printk(KERN_WARNING
"act2000: Unable to register %s\n",
card->interface.id);
return -1;
}
card->myid = card->interface.channels;
sprintf(card->regname, "act2000-isdn (%s)", card->interface.id);
return 0;
}
static void
unregister_card(act2000_card *card)
{
isdn_ctrl cmd;
cmd.command = ISDN_STAT_UNLOAD;
cmd.driver = card->myid;
card->interface.statcallb(&cmd);
switch (card->bus) {
case ACT2000_BUS_ISA:
act2000_isa_release(card);
break;
case ACT2000_BUS_MCA:
case ACT2000_BUS_PCMCIA:
default:
printk(KERN_WARNING
"act2000: Invalid BUS type %d\n",
card->bus);
break;
}
}
static int
act2000_addcard(int bus, int port, int irq, char *id)
{
act2000_card *p;
act2000_card *q = NULL;
int initialized;
int added = 0;
int failed = 0;
int i;
if (!bus)
bus = ACT2000_BUS_ISA;
if (port != -1) {
/* Port defined, do fixed setup */
act2000_alloccard(bus, port, irq, id);
} else {
/* No port defined, perform autoprobing.
* This may result in more than one card detected.
*/
switch (bus) {
case ACT2000_BUS_ISA:
for (i = 0; i < ARRAY_SIZE(act2000_isa_ports); i++)
if (act2000_isa_detect(act2000_isa_ports[i])) {
printk(KERN_INFO "act2000: Detected "
"ISA card at port 0x%x\n",
act2000_isa_ports[i]);
act2000_alloccard(bus,
act2000_isa_ports[i], irq, id);
}
break;
case ACT2000_BUS_MCA:
case ACT2000_BUS_PCMCIA:
default:
printk(KERN_WARNING
"act2000: addcard: Invalid BUS type %d\n", bus);
}
}
if (!cards)
return 1;
p = cards;
while (p) {
initialized = 0;
if (!p->interface.statcallb) {
/* Not yet registered.
* Try to register and activate it.
*/
added++;
switch (p->bus) {
case ACT2000_BUS_ISA:
if (act2000_isa_detect(p->port)) {
if (act2000_registercard(p))
break;
if (act2000_isa_config_port(p, p->port)) {
printk(KERN_WARNING
"act2000: Could not request port 0x%04x\n",
p->port);
unregister_card(p);
p->interface.statcallb = NULL;
break;
}
if (act2000_isa_config_irq(p, p->irq)) {
printk(KERN_INFO
"act2000: No IRQ available, fallback to polling\n");
/* Fall back to polled operation */
p->irq = 0;
}
printk(KERN_INFO
"act2000: ISA"
"-type card at port "
"0x%04x ",
p->port);
if (p->irq)
printk("irq %d\n", p->irq);
else
printk("polled\n");
initialized = 1;
}
break;
case ACT2000_BUS_MCA:
case ACT2000_BUS_PCMCIA:
default:
printk(KERN_WARNING
"act2000: addcard: Invalid BUS type %d\n",
p->bus);
}
} else
/* Card already initialized */
initialized = 1;
if (initialized) {
/* Init OK, next card ... */
q = p;
p = p->next;
} else {
/* Init failed, remove card from list, free memory */
printk(KERN_WARNING
"act2000: Initialization of %s failed\n",
p->interface.id);
if (q) {
q->next = p->next;
kfree(p);
p = q->next;
} else {
cards = p->next;
kfree(p);
p = cards;
}
failed++;
}
}
return (added - failed);
}
#define DRIVERNAME "IBM Active 2000 ISDN driver"
static int __init act2000_init(void)
{
printk(KERN_INFO "%s\n", DRIVERNAME);
if (!cards)
act2000_addcard(act_bus, act_port, act_irq, act_id);
if (!cards)
printk(KERN_INFO "act2000: No cards defined yet\n");
return 0;
}
static void __exit act2000_exit(void)
{
act2000_card *card = cards;
act2000_card *last;
while (card) {
unregister_card(card);
del_timer(&card->ptimer);
card = card->next;
}
card = cards;
while (card) {
last = card;
card = card->next;
act2000_clear_msn(last);
kfree(last);
}
printk(KERN_INFO "%s unloaded\n", DRIVERNAME);
}
module_init(act2000_init);
module_exit(act2000_exit);
| gpl-2.0 |
LeeDroid-/Pyramid-2.6.35 | arch/mips/pci/fixup-lemote2f.c | 8479 | 4879 | /*
* Copyright (C) 2008 Lemote Technology
* Copyright (C) 2004 ICT CAS
* Author: Li xiaoyu, lixy@ict.ac.cn
*
* Copyright (C) 2007 Lemote, Inc.
* Author: Fuxin Zhang, zhangfx@lemote.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <loongson.h>
#include <cs5536/cs5536.h>
#include <cs5536/cs5536_pci.h>
/* PCI interrupt pins
*
* These should not be changed, or you should consider loongson2f interrupt
* register and your pci card dispatch
*/
#define PCIA 4
#define PCIB 5
#define PCIC 6
#define PCID 7
/* all the pci device has the PCIA pin, check the datasheet. */
static char irq_tab[][5] __initdata = {
/* INTA INTB INTC INTD */
{0, 0, 0, 0, 0}, /* 11: Unused */
{0, 0, 0, 0, 0}, /* 12: Unused */
{0, 0, 0, 0, 0}, /* 13: Unused */
{0, 0, 0, 0, 0}, /* 14: Unused */
{0, 0, 0, 0, 0}, /* 15: Unused */
{0, 0, 0, 0, 0}, /* 16: Unused */
{0, PCIA, 0, 0, 0}, /* 17: RTL8110-0 */
{0, PCIB, 0, 0, 0}, /* 18: RTL8110-1 */
{0, PCIC, 0, 0, 0}, /* 19: SiI3114 */
{0, PCID, 0, 0, 0}, /* 20: 3-ports nec usb */
{0, PCIA, PCIB, PCIC, PCID}, /* 21: PCI-SLOT */
{0, 0, 0, 0, 0}, /* 22: Unused */
{0, 0, 0, 0, 0}, /* 23: Unused */
{0, 0, 0, 0, 0}, /* 24: Unused */
{0, 0, 0, 0, 0}, /* 25: Unused */
{0, 0, 0, 0, 0}, /* 26: Unused */
{0, 0, 0, 0, 0}, /* 27: Unused */
};
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int virq;
if ((PCI_SLOT(dev->devfn) != PCI_IDSEL_CS5536)
&& (PCI_SLOT(dev->devfn) < 32)) {
virq = irq_tab[slot][pin];
printk(KERN_INFO "slot: %d, pin: %d, irq: %d\n", slot, pin,
virq + LOONGSON_IRQ_BASE);
if (virq != 0)
return LOONGSON_IRQ_BASE + virq;
else
return 0;
} else if (PCI_SLOT(dev->devfn) == PCI_IDSEL_CS5536) { /* cs5536 */
switch (PCI_FUNC(dev->devfn)) {
case 2:
pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
CS5536_IDE_INTR);
return CS5536_IDE_INTR; /* for IDE */
case 3:
pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
CS5536_ACC_INTR);
return CS5536_ACC_INTR; /* for AUDIO */
case 4: /* for OHCI */
case 5: /* for EHCI */
case 6: /* for UDC */
case 7: /* for OTG */
pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
CS5536_USB_INTR);
return CS5536_USB_INTR;
}
return dev->irq;
} else {
printk(KERN_INFO " strange pci slot number.\n");
return 0;
}
}
/* Do platform specific device initialization at pci_enable_device() time */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
/* CS5536 SPEC. fixup */
static void __init loongson_cs5536_isa_fixup(struct pci_dev *pdev)
{
/* the uart1 and uart2 interrupt in PIC is enabled as default */
pci_write_config_dword(pdev, PCI_UART1_INT_REG, 1);
pci_write_config_dword(pdev, PCI_UART2_INT_REG, 1);
}
static void __init loongson_cs5536_ide_fixup(struct pci_dev *pdev)
{
/* setting the mutex pin as IDE function */
pci_write_config_dword(pdev, PCI_IDE_CFG_REG,
CS5536_IDE_FLASH_SIGNATURE);
}
static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev)
{
/* enable the AUDIO interrupt in PIC */
pci_write_config_dword(pdev, PCI_ACC_INT_REG, 1);
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xc0);
}
static void __init loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
{
/* enable the OHCI interrupt in PIC */
/* THE OHCI, EHCI, UDC, OTG are shared with interrupt in PIC */
pci_write_config_dword(pdev, PCI_OHCI_INT_REG, 1);
}
static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
{
u32 hi, lo;
/* Serial short detect enable */
_rdmsr(USB_MSR_REG(USB_CONFIG), &hi, &lo);
_wrmsr(USB_MSR_REG(USB_CONFIG), (1 << 1) | (1 << 3), lo);
/* setting the USB2.0 micro frame length */
pci_write_config_dword(pdev, PCI_EHCI_FLADJ_REG, 0x2000);
}
static void __init loongson_nec_fixup(struct pci_dev *pdev)
{
unsigned int val;
pci_read_config_dword(pdev, 0xe0, &val);
/* Only 2 port be used */
pci_write_config_dword(pdev, 0xe0, (val & ~3) | 0x2);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA,
loongson_cs5536_isa_fixup);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_OHC,
loongson_cs5536_ohci_fixup);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_EHC,
loongson_cs5536_ehci_fixup);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_AUDIO,
loongson_cs5536_acc_fixup);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE,
loongson_cs5536_ide_fixup);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
loongson_nec_fixup);
| gpl-2.0 |
MaxiCM-Private/android_kernel_lge_jagnm | arch/mips/txx9/rbtx4938/setup.c | 8735 | 10511 | /*
* Setup pointers to hardware-dependent routines.
* Copyright (C) 2000-2001 Toshiba Corporation
*
* 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
* terms of the GNU General Public License version 2. This program is
* licensed "as is" without any warranty of any kind, whether express
* or implied.
*
* Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/mtd/physmap.h>
#include <asm/reboot.h>
#include <asm/io.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/pci.h>
#include <asm/txx9/rbtx4938.h>
#include <linux/spi/spi.h>
#include <asm/txx9/spi.h>
#include <asm/txx9pio.h>
static void rbtx4938_machine_restart(char *command)
{
local_irq_disable();
writeb(1, rbtx4938_softresetlock_addr);
writeb(1, rbtx4938_sfvol_addr);
writeb(1, rbtx4938_softreset_addr);
/* fallback */
(*_machine_halt)();
}
static void __init rbtx4938_pci_setup(void)
{
#ifdef CONFIG_PCI
int extarb = !(__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCIARB);
struct pci_controller *c = &txx9_primary_pcic;
register_pci_controller(c);
if (__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCI66)
txx9_pci_option =
(txx9_pci_option & ~TXX9_PCI_OPT_CLK_MASK) |
TXX9_PCI_OPT_CLK_66; /* already configured */
/* Reset PCI Bus */
writeb(0, rbtx4938_pcireset_addr);
/* Reset PCIC */
txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
TXX9_PCI_OPT_CLK_66)
tx4938_pciclk66_setup();
mdelay(10);
/* clear PCIC reset */
txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
writeb(1, rbtx4938_pcireset_addr);
iob();
tx4938_report_pciclk();
tx4927_pcic_setup(tx4938_pcicptr, c, extarb);
if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
TXX9_PCI_OPT_CLK_AUTO &&
txx9_pci66_check(c, 0, 0)) {
/* Reset PCI Bus */
writeb(0, rbtx4938_pcireset_addr);
/* Reset PCIC */
txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
tx4938_pciclk66_setup();
mdelay(10);
/* clear PCIC reset */
txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
writeb(1, rbtx4938_pcireset_addr);
iob();
/* Reinitialize PCIC */
tx4938_report_pciclk();
tx4927_pcic_setup(tx4938_pcicptr, c, extarb);
}
if (__raw_readq(&tx4938_ccfgptr->pcfg) &
(TX4938_PCFG_ETH0_SEL|TX4938_PCFG_ETH1_SEL)) {
/* Reset PCIC1 */
txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIC1RST);
/* PCI1DMD==0 => PCI1CLK==GBUSCLK/2 => PCI66 */
if (!(__raw_readq(&tx4938_ccfgptr->ccfg)
& TX4938_CCFG_PCI1DMD))
tx4938_ccfg_set(TX4938_CCFG_PCI1_66);
mdelay(10);
/* clear PCIC1 reset */
txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIC1RST);
tx4938_report_pci1clk();
/* mem:64K(max), io:64K(max) (enough for ETH0,ETH1) */
c = txx9_alloc_pci_controller(NULL, 0, 0x10000, 0, 0x10000);
register_pci_controller(c);
tx4927_pcic_setup(tx4938_pcic1ptr, c, 0);
}
tx4938_setup_pcierr_irq();
#endif /* CONFIG_PCI */
}
/* SPI support */
/* chip select for SPI devices */
#define SEEPROM1_CS 7 /* PIO7 */
#define SEEPROM2_CS 0 /* IOC */
#define SEEPROM3_CS 1 /* IOC */
#define SRTC_CS 2 /* IOC */
#define SPI_BUSNO 0
static int __init rbtx4938_ethaddr_init(void)
{
#ifdef CONFIG_PCI
unsigned char dat[17];
unsigned char sum;
int i;
/* 0-3: "MAC\0", 4-9:eth0, 10-15:eth1, 16:sum */
if (spi_eeprom_read(SPI_BUSNO, SEEPROM1_CS, 0, dat, sizeof(dat))) {
printk(KERN_ERR "seeprom: read error.\n");
return -ENODEV;
} else {
if (strcmp(dat, "MAC") != 0)
printk(KERN_WARNING "seeprom: bad signature.\n");
for (i = 0, sum = 0; i < sizeof(dat); i++)
sum += dat[i];
if (sum)
printk(KERN_WARNING "seeprom: bad checksum.\n");
}
tx4938_ethaddr_init(&dat[4], &dat[4 + 6]);
#endif /* CONFIG_PCI */
return 0;
}
static void __init rbtx4938_spi_setup(void)
{
/* set SPI_SEL */
txx9_set64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_SPI_SEL);
}
static struct resource rbtx4938_fpga_resource;
static void __init rbtx4938_time_init(void)
{
tx4938_time_init(0);
}
static void __init rbtx4938_mem_setup(void)
{
unsigned long long pcfg;
if (txx9_master_clock == 0)
txx9_master_clock = 25000000; /* 25MHz */
tx4938_setup();
#ifdef CONFIG_PCI
txx9_alloc_pci_controller(&txx9_primary_pcic, 0, 0, 0, 0);
txx9_board_pcibios_setup = tx4927_pcibios_setup;
#else
set_io_port_base(RBTX4938_ETHER_BASE);
#endif
tx4938_sio_init(7372800, 0);
#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_PIO58_61
pr_info("PIOSEL: disabling both ATA and NAND selection\n");
txx9_clear64(&tx4938_ccfgptr->pcfg,
TX4938_PCFG_NDF_SEL | TX4938_PCFG_ATA_SEL);
#endif
#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_NAND
pr_info("PIOSEL: enabling NAND selection\n");
txx9_set64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_NDF_SEL);
txx9_clear64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_ATA_SEL);
#endif
#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_ATA
pr_info("PIOSEL: enabling ATA selection\n");
txx9_set64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_ATA_SEL);
txx9_clear64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_NDF_SEL);
#endif
#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_KEEP
pcfg = ____raw_readq(&tx4938_ccfgptr->pcfg);
pr_info("PIOSEL: NAND %s, ATA %s\n",
(pcfg & TX4938_PCFG_NDF_SEL) ? "enabled" : "disabled",
(pcfg & TX4938_PCFG_ATA_SEL) ? "enabled" : "disabled");
#endif
rbtx4938_spi_setup();
pcfg = ____raw_readq(&tx4938_ccfgptr->pcfg); /* updated */
/* fixup piosel */
if ((pcfg & (TX4938_PCFG_ATA_SEL | TX4938_PCFG_NDF_SEL)) ==
TX4938_PCFG_ATA_SEL)
writeb((readb(rbtx4938_piosel_addr) & 0x03) | 0x04,
rbtx4938_piosel_addr);
else if ((pcfg & (TX4938_PCFG_ATA_SEL | TX4938_PCFG_NDF_SEL)) ==
TX4938_PCFG_NDF_SEL)
writeb((readb(rbtx4938_piosel_addr) & 0x03) | 0x08,
rbtx4938_piosel_addr);
else
writeb(readb(rbtx4938_piosel_addr) & ~(0x08 | 0x04),
rbtx4938_piosel_addr);
rbtx4938_fpga_resource.name = "FPGA Registers";
rbtx4938_fpga_resource.start = CPHYSADDR(RBTX4938_FPGA_REG_ADDR);
rbtx4938_fpga_resource.end = CPHYSADDR(RBTX4938_FPGA_REG_ADDR) + 0xffff;
rbtx4938_fpga_resource.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&txx9_ce_res[2], &rbtx4938_fpga_resource))
printk(KERN_ERR "request resource for fpga failed\n");
_machine_restart = rbtx4938_machine_restart;
writeb(0xff, rbtx4938_led_addr);
printk(KERN_INFO "RBTX4938 --- FPGA(Rev %02x) DIPSW:%02x,%02x\n",
readb(rbtx4938_fpga_rev_addr),
readb(rbtx4938_dipsw_addr), readb(rbtx4938_bdipsw_addr));
}
static void __init rbtx4938_ne_init(void)
{
struct resource res[] = {
{
.start = RBTX4938_RTL_8019_BASE,
.end = RBTX4938_RTL_8019_BASE + 0x20 - 1,
.flags = IORESOURCE_IO,
}, {
.start = RBTX4938_RTL_8019_IRQ,
.flags = IORESOURCE_IRQ,
}
};
platform_device_register_simple("ne", -1, res, ARRAY_SIZE(res));
}
static DEFINE_SPINLOCK(rbtx4938_spi_gpio_lock);
static void rbtx4938_spi_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
u8 val;
unsigned long flags;
spin_lock_irqsave(&rbtx4938_spi_gpio_lock, flags);
val = readb(rbtx4938_spics_addr);
if (value)
val |= 1 << offset;
else
val &= ~(1 << offset);
writeb(val, rbtx4938_spics_addr);
mmiowb();
spin_unlock_irqrestore(&rbtx4938_spi_gpio_lock, flags);
}
static int rbtx4938_spi_gpio_dir_out(struct gpio_chip *chip,
unsigned int offset, int value)
{
rbtx4938_spi_gpio_set(chip, offset, value);
return 0;
}
static struct gpio_chip rbtx4938_spi_gpio_chip = {
.set = rbtx4938_spi_gpio_set,
.direction_output = rbtx4938_spi_gpio_dir_out,
.label = "RBTX4938-SPICS",
.base = 16,
.ngpio = 3,
};
static int __init rbtx4938_spi_init(void)
{
struct spi_board_info srtc_info = {
.modalias = "rtc-rs5c348",
.max_speed_hz = 1000000, /* 1.0Mbps @ Vdd 2.0V */
.bus_num = 0,
.chip_select = 16 + SRTC_CS,
/* Mode 1 (High-Active, Shift-Then-Sample), High Avtive CS */
.mode = SPI_MODE_1 | SPI_CS_HIGH,
};
spi_register_board_info(&srtc_info, 1);
spi_eeprom_register(SPI_BUSNO, SEEPROM1_CS, 128);
spi_eeprom_register(SPI_BUSNO, 16 + SEEPROM2_CS, 128);
spi_eeprom_register(SPI_BUSNO, 16 + SEEPROM3_CS, 128);
gpio_request(16 + SRTC_CS, "rtc-rs5c348");
gpio_direction_output(16 + SRTC_CS, 0);
gpio_request(SEEPROM1_CS, "seeprom1");
gpio_direction_output(SEEPROM1_CS, 1);
gpio_request(16 + SEEPROM2_CS, "seeprom2");
gpio_direction_output(16 + SEEPROM2_CS, 1);
gpio_request(16 + SEEPROM3_CS, "seeprom3");
gpio_direction_output(16 + SEEPROM3_CS, 1);
tx4938_spi_init(SPI_BUSNO);
return 0;
}
static void __init rbtx4938_mtd_init(void)
{
struct physmap_flash_data pdata = {
.width = 4,
};
switch (readb(rbtx4938_bdipsw_addr) & 7) {
case 0:
/* Boot */
txx9_physmap_flash_init(0, 0x1fc00000, 0x400000, &pdata);
/* System */
txx9_physmap_flash_init(1, 0x1e000000, 0x1000000, &pdata);
break;
case 1:
/* System */
txx9_physmap_flash_init(0, 0x1f000000, 0x1000000, &pdata);
/* Boot */
txx9_physmap_flash_init(1, 0x1ec00000, 0x400000, &pdata);
break;
case 2:
/* Ext */
txx9_physmap_flash_init(0, 0x1f000000, 0x1000000, &pdata);
/* System */
txx9_physmap_flash_init(1, 0x1e000000, 0x1000000, &pdata);
/* Boot */
txx9_physmap_flash_init(2, 0x1dc00000, 0x400000, &pdata);
break;
case 3:
/* Boot */
txx9_physmap_flash_init(1, 0x1bc00000, 0x400000, &pdata);
/* System */
txx9_physmap_flash_init(2, 0x1a000000, 0x1000000, &pdata);
break;
}
}
static void __init rbtx4938_arch_init(void)
{
gpiochip_add(&rbtx4938_spi_gpio_chip);
rbtx4938_pci_setup();
rbtx4938_spi_init();
}
static void __init rbtx4938_device_init(void)
{
rbtx4938_ethaddr_init();
rbtx4938_ne_init();
tx4938_wdt_init();
rbtx4938_mtd_init();
/* TC58DVM82A1FT: tDH=10ns, tWP=tRP=tREADID=35ns */
tx4938_ndfmc_init(10, 35);
tx4938_ata_init(RBTX4938_IRQ_IOC_ATA, 0, 1);
tx4938_dmac_init(0, 2);
tx4938_aclc_init();
platform_device_register_simple("txx9aclc-generic", -1, NULL, 0);
tx4938_sramc_init();
txx9_iocled_init(RBTX4938_LED_ADDR - IO_BASE, -1, 8, 1, "green", NULL);
}
struct txx9_board_vec rbtx4938_vec __initdata = {
.system = "Toshiba RBTX4938",
.prom_init = rbtx4938_prom_init,
.mem_setup = rbtx4938_mem_setup,
.irq_setup = rbtx4938_irq_setup,
.time_init = rbtx4938_time_init,
.device_init = rbtx4938_device_init,
.arch_init = rbtx4938_arch_init,
#ifdef CONFIG_PCI
.pci_map_irq = rbtx4938_pci_map_irq,
#endif
};
| gpl-2.0 |
NoelMacwan/android_kernel_sony_msm8226 | net/ipv6/xfrm6_input.c | 11807 | 3071 | /*
* xfrm6_input.c: based on net/ipv4/xfrm4_input.c
*
* Authors:
* Mitsuru KANDA @USAGI
* Kazunori MIYAZAWA @USAGI
* Kunihiro Ishiguro <kunihiro@ipinfusion.com>
* YOSHIFUJI Hideaki @USAGI
* IPv6 support
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <net/ipv6.h>
#include <net/xfrm.h>
int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb)
{
return xfrm6_extract_header(skb);
}
int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
{
XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
return xfrm_input(skb, nexthdr, spi, 0);
}
EXPORT_SYMBOL(xfrm6_rcv_spi);
int xfrm6_transport_finish(struct sk_buff *skb, int async)
{
skb_network_header(skb)[IP6CB(skb)->nhoff] =
XFRM_MODE_SKB_CB(skb)->protocol;
#ifndef CONFIG_NETFILTER
if (!async)
return 1;
#endif
ipv6_hdr(skb)->payload_len = htons(skb->len);
__skb_push(skb, skb->data - skb_network_header(skb));
NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
ip6_rcv_finish);
return -1;
}
int xfrm6_rcv(struct sk_buff *skb)
{
return xfrm6_rcv_spi(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
0);
}
EXPORT_SYMBOL(xfrm6_rcv);
int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
xfrm_address_t *saddr, u8 proto)
{
struct net *net = dev_net(skb->dev);
struct xfrm_state *x = NULL;
int i = 0;
/* Allocate new secpath or COW existing one. */
if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
struct sec_path *sp;
sp = secpath_dup(skb->sp);
if (!sp) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
goto drop;
}
if (skb->sp)
secpath_put(skb->sp);
skb->sp = sp;
}
if (1 + skb->sp->len == XFRM_MAX_DEPTH) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
goto drop;
}
for (i = 0; i < 3; i++) {
xfrm_address_t *dst, *src;
switch (i) {
case 0:
dst = daddr;
src = saddr;
break;
case 1:
/* lookup state with wild-card source address */
dst = daddr;
src = (xfrm_address_t *)&in6addr_any;
break;
default:
/* lookup state with wild-card addresses */
dst = (xfrm_address_t *)&in6addr_any;
src = (xfrm_address_t *)&in6addr_any;
break;
}
x = xfrm_state_lookup_byaddr(net, skb->mark, dst, src, proto, AF_INET6);
if (!x)
continue;
spin_lock(&x->lock);
if ((!i || (x->props.flags & XFRM_STATE_WILDRECV)) &&
likely(x->km.state == XFRM_STATE_VALID) &&
!xfrm_state_check_expire(x)) {
spin_unlock(&x->lock);
if (x->type->input(x, skb) > 0) {
/* found a valid state */
break;
}
} else
spin_unlock(&x->lock);
xfrm_state_put(x);
x = NULL;
}
if (!x) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
xfrm_audit_state_notfound_simple(skb, AF_INET6);
goto drop;
}
skb->sp->xvec[skb->sp->len++] = x;
spin_lock(&x->lock);
x->curlft.bytes += skb->len;
x->curlft.packets++;
spin_unlock(&x->lock);
return 1;
drop:
return -1;
}
EXPORT_SYMBOL(xfrm6_input_addr);
| gpl-2.0 |
hwlzc/2.6.34 | drivers/input/mouse/bcm5974.c | 32 | 24394 | /*
* Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver
*
* Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se)
*
* The USB initialization and package decoding was made by
* Scott Shawcroft as part of the touchd user-space driver project:
* Copyright (C) 2008 Scott Shawcroft (scott.shawcroft@gmail.com)
*
* The BCM5974 driver is based on the appletouch driver:
* Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2005 Johannes Berg (johannes@sipsolutions.net)
* Copyright (C) 2005 Stelian Pop (stelian@popies.net)
* Copyright (C) 2005 Frank Arnold (frank@scirocco-5v-turbo.de)
* Copyright (C) 2005 Peter Osterlund (petero2@telia.com)
* Copyright (C) 2005 Michael Hanselmann (linux-kernel@hansmi.ch)
* Copyright (C) 2006 Nicolas Boichat (nicolas@boichat.ch)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb/input.h>
#include <linux/hid.h>
#include <linux/mutex.h>
#define USB_VENDOR_ID_APPLE 0x05ac
/* MacbookAir, aka wellspring */
#define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223
#define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224
#define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225
/* MacbookProPenryn, aka wellspring2 */
#define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230
#define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231
#define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232
/* Macbook5,1 (unibody), aka wellspring3 */
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237
#define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
#define BCM5974_DEVICE(prod) { \
.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
USB_DEVICE_ID_MATCH_INT_CLASS | \
USB_DEVICE_ID_MATCH_INT_PROTOCOL), \
.idVendor = USB_VENDOR_ID_APPLE, \
.idProduct = (prod), \
.bInterfaceClass = USB_INTERFACE_CLASS_HID, \
.bInterfaceProtocol = USB_INTERFACE_PROTOCOL_MOUSE \
}
/* table of devices that work with this driver */
static const struct usb_device_id bcm5974_table[] = {
/* MacbookAir1.1 */
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_JIS),
/* MacbookProPenryn */
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_JIS),
/* Macbook5,1 */
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
/* Terminating entry */
{}
};
MODULE_DEVICE_TABLE(usb, bcm5974_table);
MODULE_AUTHOR("Henrik Rydberg");
MODULE_DESCRIPTION("Apple USB BCM5974 multitouch driver");
MODULE_LICENSE("GPL");
#define dprintk(level, format, a...)\
{ if (debug >= level) printk(KERN_DEBUG format, ##a); }
static int debug = 1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Activate debugging output");
/* button data structure */
struct bt_data {
u8 unknown1; /* constant */
u8 button; /* left button */
u8 rel_x; /* relative x coordinate */
u8 rel_y; /* relative y coordinate */
};
/* trackpad header types */
enum tp_type {
TYPE1, /* plain trackpad */
TYPE2 /* button integrated in trackpad */
};
/* trackpad finger data offsets, le16-aligned */
#define FINGER_TYPE1 (13 * sizeof(__le16))
#define FINGER_TYPE2 (15 * sizeof(__le16))
/* trackpad button data offsets */
#define BUTTON_TYPE2 15
/* list of device capability bits */
#define HAS_INTEGRATED_BUTTON 1
/* trackpad finger structure, le16-aligned */
struct tp_finger {
__le16 origin; /* zero when switching track finger */
__le16 abs_x; /* absolute x coodinate */
__le16 abs_y; /* absolute y coodinate */
__le16 rel_x; /* relative x coodinate */
__le16 rel_y; /* relative y coodinate */
__le16 size_major; /* finger size, major axis? */
__le16 size_minor; /* finger size, minor axis? */
__le16 orientation; /* 16384 when point, else 15 bit angle */
__le16 force_major; /* trackpad force, major axis? */
__le16 force_minor; /* trackpad force, minor axis? */
__le16 unused[3]; /* zeros */
__le16 multi; /* one finger: varies, more fingers: constant */
} __attribute__((packed,aligned(2)));
/* trackpad finger data size, empirically at least ten fingers */
#define SIZEOF_FINGER sizeof(struct tp_finger)
#define SIZEOF_ALL_FINGERS (16 * SIZEOF_FINGER)
#define MAX_FINGER_ORIENTATION 16384
/* device-specific parameters */
struct bcm5974_param {
int dim; /* logical dimension */
int fuzz; /* logical noise value */
int devmin; /* device minimum reading */
int devmax; /* device maximum reading */
};
/* device-specific configuration */
struct bcm5974_config {
int ansi, iso, jis; /* the product id of this device */
int caps; /* device capability bitmask */
int bt_ep; /* the endpoint of the button interface */
int bt_datalen; /* data length of the button interface */
int tp_ep; /* the endpoint of the trackpad interface */
enum tp_type tp_type; /* type of trackpad interface */
int tp_offset; /* offset to trackpad finger data */
int tp_datalen; /* data length of the trackpad interface */
struct bcm5974_param p; /* finger pressure limits */
struct bcm5974_param w; /* finger width limits */
struct bcm5974_param x; /* horizontal limits */
struct bcm5974_param y; /* vertical limits */
};
/* logical device structure */
struct bcm5974 {
char phys[64];
struct usb_device *udev; /* usb device */
struct usb_interface *intf; /* our interface */
struct input_dev *input; /* input dev */
struct bcm5974_config cfg; /* device configuration */
struct mutex pm_mutex; /* serialize access to open/suspend */
int opened; /* 1: opened, 0: closed */
struct urb *bt_urb; /* button usb request block */
struct bt_data *bt_data; /* button transferred data */
struct urb *tp_urb; /* trackpad usb request block */
u8 *tp_data; /* trackpad transferred data */
int fingers; /* number of fingers on trackpad */
};
/* logical dimensions */
#define DIM_PRESSURE 256 /* maximum finger pressure */
#define DIM_WIDTH 16 /* maximum finger width */
#define DIM_X 1280 /* maximum trackpad x value */
#define DIM_Y 800 /* maximum trackpad y value */
/* logical signal quality */
#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */
#define SN_WIDTH 100 /* width signal-to-noise ratio */
#define SN_COORD 250 /* coordinate signal-to-noise ratio */
/* pressure thresholds */
#define PRESSURE_LOW (2 * DIM_PRESSURE / SN_PRESSURE)
#define PRESSURE_HIGH (3 * PRESSURE_LOW)
/* device constants */
static const struct bcm5974_config bcm5974_config_table[] = {
{
USB_DEVICE_ID_APPLE_WELLSPRING_ANSI,
USB_DEVICE_ID_APPLE_WELLSPRING_ISO,
USB_DEVICE_ID_APPLE_WELLSPRING_JIS,
0,
0x84, sizeof(struct bt_data),
0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS,
{ DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 },
{ DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
{ DIM_X, DIM_X / SN_COORD, -4824, 5342 },
{ DIM_Y, DIM_Y / SN_COORD, -172, 5820 }
},
{
USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI,
USB_DEVICE_ID_APPLE_WELLSPRING2_ISO,
USB_DEVICE_ID_APPLE_WELLSPRING2_JIS,
0,
0x84, sizeof(struct bt_data),
0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS,
{ DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 },
{ DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
{ DIM_X, DIM_X / SN_COORD, -4824, 4824 },
{ DIM_Y, DIM_Y / SN_COORD, -172, 4290 }
},
{
USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI,
USB_DEVICE_ID_APPLE_WELLSPRING3_ISO,
USB_DEVICE_ID_APPLE_WELLSPRING3_JIS,
HAS_INTEGRATED_BUTTON,
0x84, sizeof(struct bt_data),
0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
{ DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
{ DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
{ DIM_X, DIM_X / SN_COORD, -4460, 5166 },
{ DIM_Y, DIM_Y / SN_COORD, -75, 6700 }
},
{}
};
/* return the device-specific configuration by device */
static const struct bcm5974_config *bcm5974_get_config(struct usb_device *udev)
{
u16 id = le16_to_cpu(udev->descriptor.idProduct);
const struct bcm5974_config *cfg;
for (cfg = bcm5974_config_table; cfg->ansi; ++cfg)
if (cfg->ansi == id || cfg->iso == id || cfg->jis == id)
return cfg;
return bcm5974_config_table;
}
/* convert 16-bit little endian to signed integer */
static inline int raw2int(__le16 x)
{
return (signed short)le16_to_cpu(x);
}
/* scale device data to logical dimensions (asserts devmin < devmax) */
static inline int int2scale(const struct bcm5974_param *p, int x)
{
return x * p->dim / (p->devmax - p->devmin);
}
/* all logical value ranges are [0,dim). */
static inline int int2bound(const struct bcm5974_param *p, int x)
{
int s = int2scale(p, x);
return clamp_val(s, 0, p->dim - 1);
}
/* setup which logical events to report */
static void setup_events_to_report(struct input_dev *input_dev,
const struct bcm5974_config *cfg)
{
__set_bit(EV_ABS, input_dev->evbit);
input_set_abs_params(input_dev, ABS_PRESSURE,
0, cfg->p.dim, cfg->p.fuzz, 0);
input_set_abs_params(input_dev, ABS_TOOL_WIDTH,
0, cfg->w.dim, cfg->w.fuzz, 0);
input_set_abs_params(input_dev, ABS_X,
0, cfg->x.dim, cfg->x.fuzz, 0);
input_set_abs_params(input_dev, ABS_Y,
0, cfg->y.dim, cfg->y.fuzz, 0);
/* finger touch area */
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
cfg->w.devmin, cfg->w.devmax, 0, 0);
input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
cfg->w.devmin, cfg->w.devmax, 0, 0);
/* finger approach area */
input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR,
cfg->w.devmin, cfg->w.devmax, 0, 0);
input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR,
cfg->w.devmin, cfg->w.devmax, 0, 0);
/* finger orientation */
input_set_abs_params(input_dev, ABS_MT_ORIENTATION,
-MAX_FINGER_ORIENTATION,
MAX_FINGER_ORIENTATION, 0, 0);
/* finger position */
input_set_abs_params(input_dev, ABS_MT_POSITION_X,
cfg->x.devmin, cfg->x.devmax, 0, 0);
input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
cfg->y.devmin, cfg->y.devmax, 0, 0);
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(BTN_TOOL_FINGER, input_dev->keybit);
__set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
__set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
__set_bit(BTN_TOOL_QUADTAP, input_dev->keybit);
__set_bit(BTN_LEFT, input_dev->keybit);
}
/* report button data as logical button state */
static int report_bt_state(struct bcm5974 *dev, int size)
{
if (size != sizeof(struct bt_data))
return -EIO;
dprintk(7,
"bcm5974: button data: %x %x %x %x\n",
dev->bt_data->unknown1, dev->bt_data->button,
dev->bt_data->rel_x, dev->bt_data->rel_y);
input_report_key(dev->input, BTN_LEFT, dev->bt_data->button);
input_sync(dev->input);
return 0;
}
static void report_finger_data(struct input_dev *input,
const struct bcm5974_config *cfg,
const struct tp_finger *f)
{
input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major));
input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor));
input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major));
input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor));
input_report_abs(input, ABS_MT_ORIENTATION,
MAX_FINGER_ORIENTATION - raw2int(f->orientation));
input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
input_report_abs(input, ABS_MT_POSITION_Y,
cfg->y.devmin + cfg->y.devmax - raw2int(f->abs_y));
input_mt_sync(input);
}
/* report trackpad data as logical trackpad state */
static int report_tp_state(struct bcm5974 *dev, int size)
{
const struct bcm5974_config *c = &dev->cfg;
const struct tp_finger *f;
struct input_dev *input = dev->input;
int raw_p, raw_w, raw_x, raw_y, raw_n, i;
int ptest, origin, ibt = 0, nmin = 0, nmax = 0;
int abs_p = 0, abs_w = 0, abs_x = 0, abs_y = 0;
if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0)
return -EIO;
/* finger data, le16-aligned */
f = (const struct tp_finger *)(dev->tp_data + c->tp_offset);
raw_n = (size - c->tp_offset) / SIZEOF_FINGER;
/* always track the first finger; when detached, start over */
if (raw_n) {
/* report raw trackpad data */
for (i = 0; i < raw_n; i++)
report_finger_data(input, c, &f[i]);
raw_p = raw2int(f->force_major);
raw_w = raw2int(f->size_major);
raw_x = raw2int(f->abs_x);
raw_y = raw2int(f->abs_y);
dprintk(9,
"bcm5974: "
"raw: p: %+05d w: %+05d x: %+05d y: %+05d n: %d\n",
raw_p, raw_w, raw_x, raw_y, raw_n);
ptest = int2bound(&c->p, raw_p);
origin = raw2int(f->origin);
/* set the integrated button if applicable */
if (c->tp_type == TYPE2)
ibt = raw2int(dev->tp_data[BUTTON_TYPE2]);
/* while tracking finger still valid, count all fingers */
if (ptest > PRESSURE_LOW && origin) {
abs_p = ptest;
abs_w = int2bound(&c->w, raw_w);
abs_x = int2bound(&c->x, raw_x - c->x.devmin);
abs_y = int2bound(&c->y, c->y.devmax - raw_y);
while (raw_n--) {
ptest = int2bound(&c->p,
raw2int(f->force_major));
if (ptest > PRESSURE_LOW)
nmax++;
if (ptest > PRESSURE_HIGH)
nmin++;
f++;
}
}
}
if (dev->fingers < nmin)
dev->fingers = nmin;
if (dev->fingers > nmax)
dev->fingers = nmax;
input_report_key(input, BTN_TOUCH, dev->fingers > 0);
input_report_key(input, BTN_TOOL_FINGER, dev->fingers == 1);
input_report_key(input, BTN_TOOL_DOUBLETAP, dev->fingers == 2);
input_report_key(input, BTN_TOOL_TRIPLETAP, dev->fingers == 3);
input_report_key(input, BTN_TOOL_QUADTAP, dev->fingers > 3);
input_report_abs(input, ABS_PRESSURE, abs_p);
input_report_abs(input, ABS_TOOL_WIDTH, abs_w);
if (abs_p) {
input_report_abs(input, ABS_X, abs_x);
input_report_abs(input, ABS_Y, abs_y);
dprintk(8,
"bcm5974: abs: p: %+05d w: %+05d x: %+05d y: %+05d "
"nmin: %d nmax: %d n: %d ibt: %d\n", abs_p, abs_w,
abs_x, abs_y, nmin, nmax, dev->fingers, ibt);
}
/* type 2 reports button events via ibt only */
if (c->tp_type == TYPE2)
input_report_key(input, BTN_LEFT, ibt);
input_sync(input);
return 0;
}
/* Wellspring initialization constants */
#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300
#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0
#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01
#define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08
static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
{
char *data = kmalloc(8, GFP_KERNEL);
int retval = 0, size;
if (!data) {
err("bcm5974: out of memory");
retval = -ENOMEM;
goto out;
}
/* read configuration */
size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
BCM5974_WELLSPRING_MODE_READ_REQUEST_ID,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
if (size != 8) {
err("bcm5974: could not read from device");
retval = -EIO;
goto out;
}
/* apply the mode switch */
data[0] = on ?
BCM5974_WELLSPRING_MODE_VENDOR_VALUE :
BCM5974_WELLSPRING_MODE_NORMAL_VALUE;
/* write configuration */
size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
if (size != 8) {
err("bcm5974: could not write to device");
retval = -EIO;
goto out;
}
dprintk(2, "bcm5974: switched to %s mode.\n",
on ? "wellspring" : "normal");
out:
kfree(data);
return retval;
}
static void bcm5974_irq_button(struct urb *urb)
{
struct bcm5974 *dev = urb->context;
int error;
switch (urb->status) {
case 0:
break;
case -EOVERFLOW:
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
dbg("bcm5974: button urb shutting down: %d", urb->status);
return;
default:
dbg("bcm5974: button urb status: %d", urb->status);
goto exit;
}
if (report_bt_state(dev, dev->bt_urb->actual_length))
dprintk(1, "bcm5974: bad button package, length: %d\n",
dev->bt_urb->actual_length);
exit:
error = usb_submit_urb(dev->bt_urb, GFP_ATOMIC);
if (error)
err("bcm5974: button urb failed: %d", error);
}
static void bcm5974_irq_trackpad(struct urb *urb)
{
struct bcm5974 *dev = urb->context;
int error;
switch (urb->status) {
case 0:
break;
case -EOVERFLOW:
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
dbg("bcm5974: trackpad urb shutting down: %d", urb->status);
return;
default:
dbg("bcm5974: trackpad urb status: %d", urb->status);
goto exit;
}
/* control response ignored */
if (dev->tp_urb->actual_length == 2)
goto exit;
if (report_tp_state(dev, dev->tp_urb->actual_length))
dprintk(1, "bcm5974: bad trackpad package, length: %d\n",
dev->tp_urb->actual_length);
exit:
error = usb_submit_urb(dev->tp_urb, GFP_ATOMIC);
if (error)
err("bcm5974: trackpad urb failed: %d", error);
}
/*
* The Wellspring trackpad, like many recent Apple trackpads, share
* the usb device with the keyboard. Since keyboards are usually
* handled by the HID system, the device ends up being handled by two
* modules. Setting up the device therefore becomes slightly
* complicated. To enable multitouch features, a mode switch is
* required, which is usually applied via the control interface of the
* device. It can be argued where this switch should take place. In
* some drivers, like appletouch, the switch is made during
* probe. However, the hid module may also alter the state of the
* device, resulting in trackpad malfunction under certain
* circumstances. To get around this problem, there is at least one
* example that utilizes the USB_QUIRK_RESET_RESUME quirk in order to
* recieve a reset_resume request rather than the normal resume.
* Since the implementation of reset_resume is equal to mode switch
* plus start_traffic, it seems easier to always do the switch when
* starting traffic on the device.
*/
static int bcm5974_start_traffic(struct bcm5974 *dev)
{
if (bcm5974_wellspring_mode(dev, true)) {
dprintk(1, "bcm5974: mode switch failed\n");
goto error;
}
if (usb_submit_urb(dev->bt_urb, GFP_KERNEL))
goto error;
if (usb_submit_urb(dev->tp_urb, GFP_KERNEL))
goto err_kill_bt;
return 0;
err_kill_bt:
usb_kill_urb(dev->bt_urb);
error:
return -EIO;
}
static void bcm5974_pause_traffic(struct bcm5974 *dev)
{
usb_kill_urb(dev->tp_urb);
usb_kill_urb(dev->bt_urb);
bcm5974_wellspring_mode(dev, false);
}
/*
* The code below implements open/close and manual suspend/resume.
* All functions may be called in random order.
*
* Opening a suspended device fails with EACCES - permission denied.
*
* Failing a resume leaves the device resumed but closed.
*/
static int bcm5974_open(struct input_dev *input)
{
struct bcm5974 *dev = input_get_drvdata(input);
int error;
error = usb_autopm_get_interface(dev->intf);
if (error)
return error;
mutex_lock(&dev->pm_mutex);
error = bcm5974_start_traffic(dev);
if (!error)
dev->opened = 1;
mutex_unlock(&dev->pm_mutex);
if (error)
usb_autopm_put_interface(dev->intf);
return error;
}
static void bcm5974_close(struct input_dev *input)
{
struct bcm5974 *dev = input_get_drvdata(input);
mutex_lock(&dev->pm_mutex);
bcm5974_pause_traffic(dev);
dev->opened = 0;
mutex_unlock(&dev->pm_mutex);
usb_autopm_put_interface(dev->intf);
}
static int bcm5974_suspend(struct usb_interface *iface, pm_message_t message)
{
struct bcm5974 *dev = usb_get_intfdata(iface);
mutex_lock(&dev->pm_mutex);
if (dev->opened)
bcm5974_pause_traffic(dev);
mutex_unlock(&dev->pm_mutex);
return 0;
}
static int bcm5974_resume(struct usb_interface *iface)
{
struct bcm5974 *dev = usb_get_intfdata(iface);
int error = 0;
mutex_lock(&dev->pm_mutex);
if (dev->opened)
error = bcm5974_start_traffic(dev);
mutex_unlock(&dev->pm_mutex);
return error;
}
static int bcm5974_probe(struct usb_interface *iface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(iface);
const struct bcm5974_config *cfg;
struct bcm5974 *dev;
struct input_dev *input_dev;
int error = -ENOMEM;
/* find the product index */
cfg = bcm5974_get_config(udev);
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(struct bcm5974), GFP_KERNEL);
input_dev = input_allocate_device();
if (!dev || !input_dev) {
err("bcm5974: out of memory");
goto err_free_devs;
}
dev->udev = udev;
dev->intf = iface;
dev->input = input_dev;
dev->cfg = *cfg;
mutex_init(&dev->pm_mutex);
/* setup urbs */
dev->bt_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->bt_urb)
goto err_free_devs;
dev->tp_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->tp_urb)
goto err_free_bt_urb;
dev->bt_data = usb_buffer_alloc(dev->udev,
dev->cfg.bt_datalen, GFP_KERNEL,
&dev->bt_urb->transfer_dma);
if (!dev->bt_data)
goto err_free_urb;
dev->tp_data = usb_buffer_alloc(dev->udev,
dev->cfg.tp_datalen, GFP_KERNEL,
&dev->tp_urb->transfer_dma);
if (!dev->tp_data)
goto err_free_bt_buffer;
usb_fill_int_urb(dev->bt_urb, udev,
usb_rcvintpipe(udev, cfg->bt_ep),
dev->bt_data, dev->cfg.bt_datalen,
bcm5974_irq_button, dev, 1);
usb_fill_int_urb(dev->tp_urb, udev,
usb_rcvintpipe(udev, cfg->tp_ep),
dev->tp_data, dev->cfg.tp_datalen,
bcm5974_irq_trackpad, dev, 1);
/* create bcm5974 device */
usb_make_path(udev, dev->phys, sizeof(dev->phys));
strlcat(dev->phys, "/input0", sizeof(dev->phys));
input_dev->name = "bcm5974";
input_dev->phys = dev->phys;
usb_to_input_id(dev->udev, &input_dev->id);
/* report driver capabilities via the version field */
input_dev->id.version = cfg->caps;
input_dev->dev.parent = &iface->dev;
input_set_drvdata(input_dev, dev);
input_dev->open = bcm5974_open;
input_dev->close = bcm5974_close;
setup_events_to_report(input_dev, cfg);
error = input_register_device(dev->input);
if (error)
goto err_free_buffer;
/* save our data pointer in this interface device */
usb_set_intfdata(iface, dev);
return 0;
err_free_buffer:
usb_buffer_free(dev->udev, dev->cfg.tp_datalen,
dev->tp_data, dev->tp_urb->transfer_dma);
err_free_bt_buffer:
usb_buffer_free(dev->udev, dev->cfg.bt_datalen,
dev->bt_data, dev->bt_urb->transfer_dma);
err_free_urb:
usb_free_urb(dev->tp_urb);
err_free_bt_urb:
usb_free_urb(dev->bt_urb);
err_free_devs:
usb_set_intfdata(iface, NULL);
input_free_device(input_dev);
kfree(dev);
return error;
}
static void bcm5974_disconnect(struct usb_interface *iface)
{
struct bcm5974 *dev = usb_get_intfdata(iface);
usb_set_intfdata(iface, NULL);
input_unregister_device(dev->input);
usb_buffer_free(dev->udev, dev->cfg.tp_datalen,
dev->tp_data, dev->tp_urb->transfer_dma);
usb_buffer_free(dev->udev, dev->cfg.bt_datalen,
dev->bt_data, dev->bt_urb->transfer_dma);
usb_free_urb(dev->tp_urb);
usb_free_urb(dev->bt_urb);
kfree(dev);
}
static struct usb_driver bcm5974_driver = {
.name = "bcm5974",
.probe = bcm5974_probe,
.disconnect = bcm5974_disconnect,
.suspend = bcm5974_suspend,
.resume = bcm5974_resume,
.id_table = bcm5974_table,
.supports_autosuspend = 1,
};
static int __init bcm5974_init(void)
{
return usb_register(&bcm5974_driver);
}
static void __exit bcm5974_exit(void)
{
usb_deregister(&bcm5974_driver);
}
module_init(bcm5974_init);
module_exit(bcm5974_exit);
| gpl-2.0 |
RobertoMalatesta/synergy | src/lib/arch/win32/ArchLogWindows.cpp | 32 | 2145 | /*
* synergy -- mouse and keyboard sharing utility
* Copyright (C) 2012 Synergy Si Ltd.
* Copyright (C) 2002 Chris Schoeneman
*
* This package is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* found in the file LICENSE that should have accompanied this file.
*
* This package is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "arch/win32/ArchLogWindows.h"
#include "arch/win32/ArchMiscWindows.h"
#include <string.h>
//
// ArchLogWindows
//
ArchLogWindows::ArchLogWindows() : m_eventLog(NULL)
{
// do nothing
}
ArchLogWindows::~ArchLogWindows()
{
// do nothing
}
void
ArchLogWindows::openLog(const char* name)
{
if (m_eventLog == NULL) {
m_eventLog = RegisterEventSource(NULL, name);
}
}
void
ArchLogWindows::closeLog()
{
if (m_eventLog != NULL) {
DeregisterEventSource(m_eventLog);
m_eventLog = NULL;
}
}
void
ArchLogWindows::showLog(bool)
{
// do nothing
}
void
ArchLogWindows::writeLog(ELevel level, const char* msg)
{
if (m_eventLog != NULL) {
// convert priority
WORD type;
switch (level) {
case kERROR:
type = EVENTLOG_ERROR_TYPE;
break;
case kWARNING:
type = EVENTLOG_WARNING_TYPE;
break;
default:
type = EVENTLOG_INFORMATION_TYPE;
break;
}
// log it
// FIXME -- win32 wants to use a message table to look up event
// strings. log messages aren't organized that way so we'll
// just dump our string into the raw data section of the event
// so users can at least see the message. note that we use our
// level as the event category.
ReportEvent(m_eventLog, type, static_cast<WORD>(level),
0, // event ID
NULL,
0,
(DWORD)strlen(msg) + 1, // raw data size
NULL,
const_cast<char*>(msg));// raw data
}
}
| gpl-2.0 |
yodok/u8833_nethunter_kernel | drivers/media/video/msm/sensors/gc0313_v4l2.c | 32 | 21486 | /* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "msm_sensor.h"
#include "msm.h"
#define SENSOR_NAME "gc0313"
#define PLATFORM_DRIVER_NAME "msm_camera_gc0313"
#define gc0313_obj gc0313_##obj
#include "linux/hardware_self_adapt.h"
DEFINE_MUTEX(gc0313_mut);
static struct msm_sensor_ctrl_t gc0313_s_ctrl;
static struct msm_camera_i2c_reg_conf gc0313_start_settings[] = {
};
static struct msm_camera_i2c_reg_conf gc0313_stop_settings[] = {
};
static struct msm_camera_i2c_reg_conf gc0313_reset_settings[] = {
{0xfe, 0x80},
{0xfe, 0x80},
{0xfe, 0x80},
{0xf1, 0xf0},//cary f0
{0xf2, 0x00},
{0xf6, 0x03},
{0xf7, 0x03},
{0xfc, 0x1e},
{0xfe, 0x00},
};
static struct msm_camera_i2c_conf_array gc0313_reset_confs[] = {
{&gc0313_reset_settings[0], ARRAY_SIZE(gc0313_reset_settings), 50,
MSM_CAMERA_I2C_BYTE_DATA},
};
static struct msm_camera_i2c_reg_conf gc0313_init_settings[] =
{
/////////////////////////////////////////////////////
////////////////// Window Setting ///////////////////
/////////////////////////////////////////////////////
{0xfe, 0x00},
{0x42, 0xfd},
{0x77, 0x55},//5b 40 44
{0x78, 0x40},
{0x79, 0x80},
{0x4f, 0x00},
{0x03, 0x02},//5b 40
{0x04, 0x58},//add freda
{0x42, 0xff},
{0x0d, 0x01},
{0x0e, 0xe8},
{0x0f, 0x02},
{0x10, 0x88},
{0x05, 0x00},
{0x06, 0xde},
{0x07, 0x00},
{0x08, 0xa7},
{0x09, 0x00},
{0x0a, 0x00},
{0x0b, 0x00},
{0x0c, 0x04},
/////////////////////////////////////////////////////
////////////////// Analog & CISCTL //////////////////
/////////////////////////////////////////////////////
{0x17, 0x14},
{0x19, 0x04},
{0x1b, 0x48},
{0x1f, 0x08},
{0x20, 0x01},
{0x21, 0x48},
{0x22, 0x9a},
{0x23, 0x07},
{0x24, 0x16},
/////////////////////////////////////////////////////
/////////////////// ISP Realated ////////////////////
/////////////////////////////////////////////////////
//////////////////////////////////
{0x40, 0xdf},//freda defected before
{0x41, 0x24},//freda defected before
//{0x42, 0xff},
{0x44, 0x20}, //0x20
{0x45, 0x00},
{0x46, 0x02},
{0x4d, 0x01},
{0x4f, 0x01},//freda defected before
{0x50, 0x01},
{0x70, 0x66},//freda defected before 70 freda
/////////////////////////////////////////////////////
/////////////////////// BLK /////////////////////////
/////////////////////////////////////////////////////
{0x26, 0xf7},
{0x27, 0x01},
{0x28, 0x7f},
{0x29, 0x38},
{0x33, 0x1a},
{0x34, 0x1a},
{0x35, 0x1a},
{0x36, 0x1a},
{0x37, 0x1a},
{0x38, 0x1a},
{0x39, 0x1a},
{0x3a, 0x1a},
////////////////////////////////////////////////////
//////////////////// Y Gamma ///////////////////////
////////////////////////////////////////////////////
{0xfe, 0x00},
{0x63, 0x00},
{0x64, 0x06},
{0x65, 0x0f},
{0x66, 0x21},
{0x67, 0x34},
{0x68, 0x47},
{0x69, 0x59},
{0x6a, 0x6c},
{0x6b, 0x8e},
{0x6c, 0xab},
{0x6d, 0xc5},
{0x6e, 0xe0},
{0x6f, 0xfa},
////////////////////////////////////////////////////
////////////////// YUV to RGB //////////////////////
////////////////////////////////////////////////////
{0xb0, 0x13},
{0xb1, 0x27},
{0xb2, 0x07},
{0xb3, 0xf6},
{0xb4, 0xe0},
{0xb5, 0x29},
{0xb6, 0x24},
{0xb7, 0xdf},
{0xb8, 0xfd},
////////////////////////////////////////////////////
/////////////////////// DNDD ///////////////////////
////////////////////////////////////////////////////
{0x7e, 0x12},
{0x7f, 0xc3},
{0x82, 0x78},// freda defected before
{0x84, 0x02},
{0x89, 0xa4},
////////////////////////////////////////////////////
////////////////////// INTPEE //////////////////////
////////////////////////////////////////////////////
{0x90, 0xbc},//freda defected before
{0x92, 0x08},
{0x94, 0x08},
{0x95, 0x64},//freda defected before
////////////////////////////////////////////////////
/////////////////////// ASDE ///////////////////////
////////////////////////////////////////////////////
{0xfe, 0x01},
{0x18, 0x01},
{0xfe, 0x00},
{0x9a, 0x20},
{0x9c, 0x98},
{0x9e, 0x08},
{0xa2, 0x32},
{0xa4, 0x40},
{0xaa, 0x50},
////////////////////////////////////////////////////
//////////////////// RGB Gamma /////////////////////
////////////////////////////////////////////////////
#if 1
{0xbf, 0x08},
{0xc0, 0x0f},
{0xc1, 0x21},
{0xc2, 0x32},
{0xc3, 0x43},
{0xc4, 0x50},
{0xc5, 0x5e},
{0xc6, 0x78},
{0xc7, 0x90},
{0xc8, 0xa6},
{0xc9, 0xb9},
{0xca, 0xc9},
{0xcb, 0xd6},
{0xcc, 0xe0},
{0xcd, 0xee},
{0xce, 0xf8},
{0xcf, 0xff},
#endif
////////////////////////////////////////////////////
/////////////////////// AEC ////////////////////////
////////////////////////////////////////////////////
{0xfe, 0x01},
{0x10, 0x08},
{0x11, 0x11},
{0x12, 0x13},
{0x13, 0x40},//freda defected before
{0x16, 0x18},
{0x17, 0x88},
{0x29, 0x00},
{0x2a, 0x83},
{0x2b, 0x02},
{0x2c, 0x8f},
{0x2d, 0x03},
{0x2e, 0x95},
{0x2f, 0x06},
{0x30, 0x24},
{0x31, 0x0c},
{0x32, 0x48},
{0x33, 0x20},
{0x3c, 0x60},
{0x3e, 0x40},
////////////////////////////////////////////////////
/////////////////////// YCP ////////////////////////
////////////////////////////////////////////////////
{0xfe, 0x00},
{0xd1, 0x30},//freda defected before
{0xd2, 0x30},//freda defected before
{0xde, 0x38},
{0xd8, 0x15},
{0xdd, 0x00},
{0xe4, 0x8f},
{0xe5, 0x50},
////////////////////////////////////////////////////
//////////////////// DARK & RC /////////////////////
////////////////////////////////////////////////////
{0xfe, 0x01},
{0x40, 0x8f},
{0x41, 0x83},
{0x42, 0xff},//freda defected before
{0x43, 0x06},
{0x44, 0x1f},
{0x45, 0xff},
{0x46, 0xff},
{0x47, 0x04},
////////////////////////////////////////////////////
////////////////////// AWB /////////////////////////
////////////////////////////////////////////////////
{0x06, 0x0d},
{0x07, 0x06},
{0x08, 0xa4},
{0x09, 0xf2},
{0x50, 0xfd},
{0x51, 0x20},
{0x52, 0x24},
{0x53, 0x08},
{0x54, 0x3b},//0b freda
{0x55, 0x3f},//0f freda
{0x56, 0x3b},//0b freda
{0x57, 0x30},//20 freda
{0x58, 0xf6},
{0x59, 0x0b},
{0x5a, 0x11},
{0x5b, 0xf0},
{0x5c, 0xe8},
{0x5d, 0x10},
{0x5e, 0x20},
{0x5f, 0xe0},
{0x60, 0x00},//add freda
{0x67, 0x00},
{0x6d, 0x32},
{0x6e, 0x08},
{0x6f, 0x08},
{0x70, 0x40},
{0x71, 0x83},
{0x72, 0x24},//26 freda
{0x73, 0x72},//62 freda
{0x74, 0x13},//03 freda
{0x75, 0x48},
{0x76, 0x40},
{0x77, 0xc2},
{0x78, 0xa5},
{0x79, 0x18},
{0x7a, 0x40},
{0x7b, 0xb0},
{0x7c, 0xf5},
{0x81, 0x80},//80 freda 50
{0x82, 0x50},//60 freda 40
{0x83, 0xd0},//80 freda f0
{0x92, 0x00},
{0xd5, 0x0C},
{0xd6, 0x02},
{0xd7, 0x06},
{0xd8, 0x05},
{0xdd, 0x00},
{0xfe, 0x00},
////////////////////////////////////////////////////
////////////////////// LSC /////////////////////////
////////////////////////////////////////////////////
{0xfe, 0x01},
{0xa0, 0x00},
{0xa1, 0x3c},
{0xa2, 0x50},
{0xa3, 0x00},
{0xa4, 0x00},
{0xa5, 0x00},
{0xa6, 0x00},
{0xa7, 0x00},
{0xa8, 0x00},
{0xa9, 0x00},
{0xaa, 0x00},
{0xab, 0x00},
{0xac, 0x00},
{0xad, 0x00},
{0xae, 0x00},
{0xaf, 0x00},
{0xb0, 0x00},
{0xb1, 0x00},
{0xb2, 0x00},
{0xb3, 0x00},
{0xb4, 0x21},
{0xb5, 0x1e},
{0xb6, 0x18},
{0xba, 0x28},
{0xbb, 0x24},
{0xbc, 0x1c},
{0xc0, 0x15},
{0xc1, 0x14},
{0xc2, 0x11},
{0xc6, 0x12},
{0xc7, 0x12},
{0xc8, 0x11},
{0xb7, 0x20},
{0xb8, 0x20},
{0xb9, 0x20},
{0xbd, 0x20},
{0xbe, 0x20},
{0xbf, 0x20},
{0xc3, 0x00},
{0xc4, 0x00},
{0xc5, 0x00},
{0xc9, 0x00},
{0xca, 0x00},
{0xcb, 0x00},
{0xfe, 0x00},
{0x05, 0x01},
{0x06, 0xfa},
{0x07, 0x00},
{0x08, 0x70},
{0xfe, 0x01},
{0x29, 0x00},
{0x2a, 0x64},
{0x2b, 0x02},
{0x2c, 0x58},
{0x2d, 0x03},
{0x2e, 0x20},
{0x2f, 0x03},
{0x30, 0xe8},
{0x31, 0x06},
{0x32, 0x40},
{0x33, 0x20},
{0xfe, 0x00},
//////////////////////////////////////////////////
////////////////////// MIPI //////////////////////
//////////////////////////////////////////////////
{0xfe, 0x03},
{0x01, 0x03},
{0x02, 0x21},
{0x03, 0x10},
{0x04, 0x80},
{0x05, 0x02},
{0x06, 0x80},
{0x11, 0x1e},
{0x12, 0x00},
{0x13, 0x05},
{0x15, 0x12},
{0x17, 0x00},
{0x21, 0x01},
{0x22, 0x02},
{0x23, 0x01},
{0x29, 0x02},
{0x2a, 0x01},
{0x10, 0x94},
{0xfe, 0x00},
{0x17, 0x17},
};
static struct msm_camera_i2c_reg_conf gc0313_wb_auto_reg_config[] =
{
{0xfe, 0x00},{0x42, 0xff},
};
static struct msm_camera_i2c_reg_conf gc0313_wb_incandescent_reg_config[] =
{
{0xfe, 0x00},{0x42, 0xfd}, {0x77, 0x43},{0x78, 0x40},{0x79, 0x80},
};
static struct msm_camera_i2c_reg_conf gc0313_wb_fluorescent_reg_config[] =
{
{0xfe, 0x00},{0x42, 0xfd}, {0x77, 0x55},{0x78, 0x40},{0x79, 0x6a},
};
static struct msm_camera_i2c_reg_conf gc0313_wb_daylight_reg_config[] =
{
{0xfe, 0x00},{0x42, 0xfd}, {0x77, 0x65},{0x78, 0x40},{0x79, 0x57},
};
static struct msm_camera_i2c_reg_conf gc0313_wb_cloudy_reg_config[] =
{
{0xfe, 0x00},{0x42, 0xfd}, {0x77, 0x73},{0x78, 0x40},{0x79, 0x47},
};
static struct msm_camera_i2c_reg_conf gc0313_effect_off_reg_config[] =
{
{0xfe,0x00},{0x43,0x00},{0x40,0xff},{0x41,0x22},{0x96,0x82},{0x90,0xbc},
{0x4f,0x01},{0xd4,0x80},{0xda,0x00},{0xdb,0x00},{0xbf,0x08},{0xc0,0x0f},
{0xc1,0x21},{0xc2,0x32},{0xc3,0x43},{0xc4,0x50},{0xc5,0x5e},{0xc6,0x78},{0xc7,0x90},{0xc8,0xa6},
{0xc9,0xb9},{0xca,0xc9},{0xcb,0xd6},{0xcc,0xe0},{0xcd,0xee},{0xce,0xf8},{0xcf,0xff},{0xfe,0x00},
{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0x82,0x78},
};
static struct msm_camera_i2c_reg_conf gc0313_effect_mono_reg_config[] =
{
{0xfe,0x00},{0x43,0x02},{0x40,0xff},{0x41,0x22},{0x42,0xff},{0x96,0x82},{0x90,0xbc},
{0x4f,0x01},{0xd4,0x80},{0xda,0x00},{0xdb,0x00},{0xbf,0x0e},{0xc0,0x1c},
{0xc1,0x34},{0xc2,0x48},{0xc3,0x5a},{0xc4,0x6b},{0xc5,0x7b},{0xc6,0x95},{0xc7,0xab},{0xc8,0xbf},
{0xc9,0xce},{0xca,0xd9},{0xcb,0xe4},{0xcc,0xec},{0xcd,0xf7},{0xce,0xfd},{0xcf,0xff},{0xfe,0x00},
{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0x82,0x78},
};
static struct msm_camera_i2c_reg_conf gc0313_effect_negative_reg_config[] =
{
{0xfe,0x00},{0x43,0x01},{0x40,0xff},{0x41,0x22},{0x42,0xff},{0x96,0x82},{0x90,0xbc},
{0x4f,0x01},{0xd4,0x80},{0xda,0x00},{0xdb,0x00},{0xbf,0x0e},{0xc0,0x1c},
{0xc1,0x34},{0xc2,0x48},{0xc3,0x5a},{0xc4,0x6b},{0xc5,0x7b},{0xc6,0x95},{0xc7,0xab},{0xc8,0xbf},
{0xc9,0xce},{0xca,0xd9},{0xcb,0xe4},{0xcc,0xec},{0xcd,0xf7},{0xce,0xfd},{0xcf,0xff},{0xfe,0x00},
{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0x82,0x78},
};
static struct msm_camera_i2c_reg_conf gc0313_effect_sepia_reg_config[] =
{
{0xfe,0x00},{0x43,0x02},{0x40,0xff},{0x41,0x22},{0x42,0xff},{0x96,0x82},{0x90,0xbc},
{0x4f,0x01},{0xd4,0x80},{0xda,0xd0},{0xdb,0x28},{0xbf,0x0e},{0xc0,0x1c},
{0xc1,0x34},{0xc2,0x48},{0xc3,0x5a},{0xc4,0x6b},{0xc5,0x7b},{0xc6,0x95},{0xc7,0xab},{0xc8,0xbf},
{0xc9,0xce},{0xca,0xd9},{0xcb,0xe4},{0xcc,0xec},{0xcd,0xf7},{0xce,0xfd},{0xcf,0xff},{0xfe,0x00},
{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0x82,0x78},
};
static struct msm_camera_i2c_reg_conf gc0313_effect_aqua_reg_config[] =
{
{0xfe,0x00},{0x43,0x02},{0x40,0xff},{0x41,0x22},{0x42,0xff},{0x96,0x82},{0x90,0xbc},
{0x4f,0x01},{0xd4,0x80},{0xda,0xd0},{0xdb,0x48},{0xbf,0x0e},{0xc0,0x1c},
{0xc1,0x34},{0xc2,0x48},{0xc3,0x5a},{0xc4,0x6b},{0xc5,0x7b},{0xc6,0x95},{0xc7,0xab},{0xc8,0xbf},
{0xc9,0xce},{0xca,0xd9},{0xcb,0xe4},{0xcc,0xec},{0xcd,0xf7},{0xce,0xfd},{0xcf,0xff},{0xfe,0x00},
{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0xfe,0x00},{0x82,0x78},
};
static struct v4l2_subdev_info gc0313_subdev_info[] = {
{
.code = V4L2_MBUS_FMT_YUYV8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.fmt = 1,
.order = 0,
},
/* more can be supported, to be added later */
};
static struct msm_camera_i2c_conf_array gc0313_init_conf[] = {
{&gc0313_init_settings[0],
ARRAY_SIZE(gc0313_init_settings), 0, MSM_CAMERA_I2C_BYTE_DATA},
};
static struct msm_camera_i2c_conf_array gc0313_confs[] = {
{NULL,0, 0, MSM_CAMERA_I2C_BYTE_DATA},
{NULL,0, 0, MSM_CAMERA_I2C_BYTE_DATA},
};
static struct msm_sensor_output_info_t gc0313_dimensions[] = {
{
.x_output = 0x280,
.y_output = 0x1E0,
.line_length_pclk = 0x290,
.frame_length_lines = 0x1EC,
.vt_pixel_clk = 24000000,
.op_pixel_clk = 24000000,
.binning_factor = 1,
},
{
.x_output = 0x280,
.y_output = 0x1E0,
.line_length_pclk = 0x290,
.frame_length_lines = 0x1EC,
.vt_pixel_clk = 24000000,
.op_pixel_clk = 24000000,
.binning_factor = 1,
},
};
static struct msm_camera_csi_params gc0313_csi_params = {
.data_format = CSI_8BIT,
.lane_cnt = 1,
.lane_assign = 0xe4,
.dpcm_scheme = 0,
.settle_cnt = 0x14,
};
static struct msm_camera_csi_params *gc0313_csi_params_array[] = {
&gc0313_csi_params,
&gc0313_csi_params,
};
static struct msm_sensor_output_reg_addr_t gc0313_reg_addr = {
.x_output = 0xCC,
.y_output = 0xCE,
.line_length_pclk = 0xC8,
.frame_length_lines = 0xCA,
};
static struct msm_sensor_id_info_t gc0313_id_info = {
.sensor_id_reg_addr = 0xf0,
.sensor_id = 0xd0,
};
static const struct i2c_device_id gc0313_i2c_id[] = {
{SENSOR_NAME, (kernel_ulong_t)&gc0313_s_ctrl},
{ }
};
static struct i2c_driver gc0313_i2c_driver = {
.id_table = gc0313_i2c_id,
.probe = msm_sensor_i2c_probe,
.driver = {
.name = SENSOR_NAME,
},
};
static struct msm_camera_i2c_client gc0313_sensor_i2c_client = {
.addr_type = MSM_CAMERA_I2C_BYTE_ADDR,
};
static int __init msm_sensor_init_module(void)
{
return i2c_add_driver(&gc0313_i2c_driver);
}
int32_t gc0313_write_init_settings(struct msm_sensor_ctrl_t *s_ctrl)
{
int32_t rc=0, i;
printk("%s is called !\n", __func__);
for (i = 0; i < s_ctrl->msm_sensor_reg->init_size; i++)
{
rc = msm_sensor_write_conf_array(
s_ctrl->sensor_i2c_client,
s_ctrl->msm_sensor_reg->init_settings, i);
if (rc < 0)
break;
}
msleep(20);
return rc;
}
int32_t gc0313_mirrorandflip_self_adapt(struct msm_sensor_ctrl_t *s_ctrl)
{
int32_t rc = 0;
CDBG("%s is called !\n", __func__);
/*must mend 0x17 if 0x17 is mended in intializtion register sequence*/
/*the 0 and 1 bit are used to control mirror and flip */
if((HW_MIRROR_AND_FLIP << 1) == get_hw_camera_mirror_type())
{
rc = msm_camera_i2c_write(
s_ctrl->sensor_i2c_client,
0x17, 0x14,
MSM_CAMERA_I2C_BYTE_DATA);
if (rc < 0)
{
pr_err("%s: write register error\n", __func__);
}
}
return rc;
}
int32_t gc0313_match_id(struct msm_sensor_ctrl_t *s_ctrl)
{
int32_t rc = 0;
uint16_t chipid = 0;
/*second match id*/
rc = msm_camera_i2c_read(
s_ctrl->sensor_i2c_client,
s_ctrl->sensor_id_info->sensor_id_reg_addr, &chipid,
MSM_CAMERA_I2C_BYTE_DATA);
if (rc < 0) {
pr_err("%s: %s: read id failed\n", __func__,
s_ctrl->sensordata->sensor_name);
return rc;
}
printk("msm_sensor id: %d\n", chipid);
if (chipid != s_ctrl->sensor_id_info->sensor_id) {
pr_err("msm_sensor_match_id chip id doesnot match\n");
return -ENODEV;
}
return rc;
}
/*func for gc0313 to set effect*/
int32_t gc0313_sensor_set_effect(struct msm_sensor_ctrl_t *s_ctrl, int effect)
{
struct msm_camera_i2c_reg_conf *reg_conf_tbl = NULL;
uint16_t num_of_items_in_table = 0;
int rc = 0;
printk("%s, to set effect = %d \n", __func__,effect);
switch (effect)
{
case CAMERA_EFFECT_OFF:
reg_conf_tbl = &gc0313_effect_off_reg_config[0];
num_of_items_in_table = ARRAY_SIZE(gc0313_effect_off_reg_config);
break;
case CAMERA_EFFECT_MONO:
reg_conf_tbl = &gc0313_effect_mono_reg_config[0];
num_of_items_in_table = ARRAY_SIZE(gc0313_effect_mono_reg_config);
break;
case CAMERA_EFFECT_NEGATIVE:
reg_conf_tbl = &gc0313_effect_negative_reg_config[0];
num_of_items_in_table = ARRAY_SIZE(gc0313_effect_negative_reg_config);
break;
case CAMERA_EFFECT_SEPIA:
reg_conf_tbl = &gc0313_effect_sepia_reg_config[0];
num_of_items_in_table = ARRAY_SIZE(gc0313_effect_sepia_reg_config);
break;
case CAMERA_EFFECT_AQUA:
reg_conf_tbl = &gc0313_effect_aqua_reg_config[0];
num_of_items_in_table = ARRAY_SIZE(gc0313_effect_aqua_reg_config);
break;
default:
return 0;
}
rc = msm_camera_i2c_write_tbl(
s_ctrl->sensor_i2c_client,
reg_conf_tbl,
num_of_items_in_table,
MSM_CAMERA_I2C_BYTE_DATA);
return rc;
}
int32_t gc0313_sensor_set_wb(struct msm_sensor_ctrl_t *s_ctrl, int wb)
{
struct msm_camera_i2c_reg_conf *reg_conf_tbl = NULL;
uint16_t num_of_items_in_table = 0;
int rc = 0;
printk("%s, to set wb = %d \n", __func__,wb);
switch (wb)
{
case CAMERA_WB_AUTO:
reg_conf_tbl = &gc0313_wb_auto_reg_config[0];
num_of_items_in_table = ARRAY_SIZE(gc0313_wb_auto_reg_config);
break;
case CAMERA_WB_INCANDESCENT:
reg_conf_tbl = &gc0313_wb_incandescent_reg_config[0];
num_of_items_in_table = ARRAY_SIZE(gc0313_wb_incandescent_reg_config);
break;
case CAMERA_WB_FLUORESCENT:
reg_conf_tbl = &gc0313_wb_fluorescent_reg_config[0];
num_of_items_in_table = ARRAY_SIZE(gc0313_wb_fluorescent_reg_config);
break;
case CAMERA_WB_DAYLIGHT:
reg_conf_tbl = &gc0313_wb_daylight_reg_config[0];
num_of_items_in_table = ARRAY_SIZE(gc0313_wb_daylight_reg_config);
break;
case CAMERA_WB_CLOUDY_DAYLIGHT:
reg_conf_tbl = &gc0313_wb_cloudy_reg_config[0];
num_of_items_in_table = ARRAY_SIZE(gc0313_wb_cloudy_reg_config);
break;
default:
return 0;
}
rc = msm_camera_i2c_write_tbl(
s_ctrl->sensor_i2c_client,
reg_conf_tbl,
num_of_items_in_table,
MSM_CAMERA_I2C_BYTE_DATA);
return rc;
}
static struct v4l2_subdev_core_ops gc0313_subdev_core_ops = {
.ioctl = msm_sensor_subdev_ioctl,
.s_power = msm_sensor_power,
};
static struct v4l2_subdev_video_ops gc0313_subdev_video_ops = {
.enum_mbus_fmt = msm_sensor_v4l2_enum_fmt,
};
static struct v4l2_subdev_ops gc0313_subdev_ops = {
.core = &gc0313_subdev_core_ops,
.video = &gc0313_subdev_video_ops,
};
int32_t gc0313_sensor_setting(struct msm_sensor_ctrl_t *s_ctrl,
int update_type, int res)
{
int32_t rc = 0;
static int csi_config;
s_ctrl->func_tbl->sensor_stop_stream(s_ctrl);
msleep(30);
if (update_type == MSM_SENSOR_REG_INIT) {
CDBG("Register INIT\n");
s_ctrl->curr_csi_params = NULL;
msm_sensor_write_conf_array(
s_ctrl->sensor_i2c_client,
gc0313_reset_confs, 0);
msm_sensor_enable_debugfs(s_ctrl);
csi_config = 0;
} else if (update_type == MSM_SENSOR_UPDATE_PERIODIC) {
CDBG("PERIODIC : %d\n", res);
if (!csi_config) {
s_ctrl->curr_csic_params = s_ctrl->csic_params[res];
CDBG("CSI config in progress\n");
v4l2_subdev_notify(&s_ctrl->sensor_v4l2_subdev,
NOTIFY_CSIC_CFG,
s_ctrl->curr_csic_params);
CDBG("CSI config is done\n");
mb();
msleep(50);
msm_sensor_write_conf_array(
s_ctrl->sensor_i2c_client,
s_ctrl->msm_sensor_reg->init_settings, res);
if(s_ctrl->func_tbl->sensor_mirrorandflip_self_adapt)
{
s_ctrl->func_tbl->sensor_mirrorandflip_self_adapt(s_ctrl);
}
msleep(20);
csi_config = 1;
}
v4l2_subdev_notify(&s_ctrl->sensor_v4l2_subdev,
NOTIFY_PCLK_CHANGE,
&s_ctrl->sensordata->pdata->ioclk.vfe_clk_rate);
s_ctrl->func_tbl->sensor_start_stream(s_ctrl);
msleep(50);
}
return rc;
}
static struct msm_sensor_fn_t gc0313_func_tbl = {
.sensor_start_stream = msm_sensor_start_stream,
.sensor_stop_stream = msm_sensor_stop_stream,
.sensor_csi_setting = gc0313_sensor_setting,
.sensor_set_sensor_mode = msm_sensor_set_sensor_mode,
.sensor_mode_init = msm_sensor_mode_init,
.sensor_get_output_info = msm_sensor_get_output_info,
.sensor_config = msm_sensor_config,
.sensor_power_up = msm_sensor_power_up,
.sensor_power_down = msm_sensor_power_down,
.sensor_match_id = gc0313_match_id,
.sensor_set_wb = gc0313_sensor_set_wb,
.sensor_set_effect = gc0313_sensor_set_effect,
.sensor_mirrorandflip_self_adapt = gc0313_mirrorandflip_self_adapt,
};
static struct msm_sensor_reg_t gc0313_regs = {
.default_data_type = MSM_CAMERA_I2C_BYTE_DATA,
.start_stream_conf = gc0313_start_settings,
.start_stream_conf_size = ARRAY_SIZE(gc0313_start_settings),
.stop_stream_conf = gc0313_stop_settings,
.stop_stream_conf_size = ARRAY_SIZE(gc0313_stop_settings),
.init_settings = &gc0313_init_conf[0],
.init_size = ARRAY_SIZE(gc0313_init_conf),
.mode_settings = &gc0313_confs[0],
.output_settings = &gc0313_dimensions[0],
.num_conf = ARRAY_SIZE(gc0313_dimensions),
};
static struct msm_sensor_ctrl_t gc0313_s_ctrl = {
.msm_sensor_reg = &gc0313_regs,
.sensor_i2c_client = &gc0313_sensor_i2c_client,
.sensor_i2c_addr = 0x42,
.sensor_output_reg_addr = &gc0313_reg_addr,
.sensor_id_info = &gc0313_id_info,
.cam_mode = MSM_SENSOR_MODE_INVALID,
.csic_params = &gc0313_csi_params_array[0],
.msm_sensor_mutex = &gc0313_mut,
.sensor_i2c_driver = &gc0313_i2c_driver,
.sensor_v4l2_subdev_info = gc0313_subdev_info,
.sensor_v4l2_subdev_info_size = ARRAY_SIZE(gc0313_subdev_info),
.sensor_v4l2_subdev_ops = &gc0313_subdev_ops,
.func_tbl = &gc0313_func_tbl,
.clk_rate = MSM_SENSOR_MCLK_24HZ,
.sensor_name = "23060075FF-GC-F",
};
module_init(msm_sensor_init_module);
MODULE_DESCRIPTION("GC VGA YUV sensor driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
forumber/temiz_kernel_g2 | drivers/rtc/class.c | 32 | 6916 | /*
* RTC subsystem, base class
*
* Copyright (C) 2005 Tower Technologies
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* class skeleton from drivers/hwmon/hwmon.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/kdev_t.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/zwait.h>
#include "rtc-core.h"
static DEFINE_IDA(rtc_ida);
struct class *rtc_class;
static void rtc_device_release(struct device *dev)
{
struct rtc_device *rtc = to_rtc_device(dev);
ida_simple_remove(&rtc_ida, rtc->id);
kfree(rtc);
}
#if defined(CONFIG_PM) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
/*
* On suspend(), measure the delta between one RTC and the
* system's wall clock; restore it on resume().
*/
static struct timespec old_rtc, old_system, old_delta;
static int rtc_suspend(struct device *dev, pm_message_t mesg)
{
struct rtc_device *rtc = to_rtc_device(dev);
struct rtc_time tm;
struct timespec delta, delta_delta;
if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
return 0;
/* snapshot the current RTC and system time at suspend*/
rtc_read_time(rtc, &tm);
getnstimeofday(&old_system);
rtc_tm_to_time(&tm, &old_rtc.tv_sec);
/*
* To avoid drift caused by repeated suspend/resumes,
* which each can add ~1 second drift error,
* try to compensate so the difference in system time
* and rtc time stays close to constant.
*/
delta = timespec_sub(old_system, old_rtc);
delta_delta = timespec_sub(delta, old_delta);
if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
/*
* if delta_delta is too large, assume time correction
* has occured and set old_delta to the current delta.
*/
old_delta = delta;
} else {
/* Otherwise try to adjust old_system to compensate */
old_system = timespec_sub(old_system, delta_delta);
}
return 0;
}
static int rtc_resume(struct device *dev)
{
struct rtc_device *rtc = to_rtc_device(dev);
struct rtc_time tm;
struct timespec new_system, new_rtc;
struct timespec sleep_time;
if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
return 0;
/* snapshot the current rtc and system time at resume */
getnstimeofday(&new_system);
rtc_read_time(rtc, &tm);
if (rtc_valid_tm(&tm) != 0) {
pr_debug("%s: bogus resume time\n", dev_name(&rtc->dev));
return 0;
}
rtc_tm_to_time(&tm, &new_rtc.tv_sec);
new_rtc.tv_nsec = 0;
if (new_rtc.tv_sec < old_rtc.tv_sec) {
pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
return 0;
}
/* calculate the RTC time delta (sleep time)*/
sleep_time = timespec_sub(new_rtc, old_rtc);
/*
* Since these RTC suspend/resume handlers are not called
* at the very end of suspend or the start of resume,
* some run-time may pass on either sides of the sleep time
* so subtract kernel run-time between rtc_suspend to rtc_resume
* to keep things accurate.
*/
sleep_time = timespec_sub(sleep_time,
timespec_sub(new_system, old_system));
if (sleep_time.tv_sec >= 0)
timekeeping_inject_sleeptime(&sleep_time);
return 0;
}
#else
#define rtc_suspend NULL
#define rtc_resume NULL
#endif
/**
* rtc_device_register - register w/ RTC class
* @dev: the device to register
*
* rtc_device_unregister() must be called when the class device is no
* longer needed.
*
* Returns the pointer to the new struct class device.
*/
struct rtc_device *rtc_device_register(const char *name, struct device *dev,
const struct rtc_class_ops *ops,
struct module *owner)
{
struct rtc_device *rtc;
struct rtc_wkalrm alrm;
int id, err;
id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
err = id;
goto exit;
}
rtc = kzalloc(sizeof(struct rtc_device), GFP_KERNEL);
if (rtc == NULL) {
err = -ENOMEM;
goto exit_ida;
}
rtc->id = id;
rtc->ops = ops;
rtc->owner = owner;
rtc->irq_freq = 1;
rtc->max_user_freq = 64;
rtc->dev.parent = dev;
rtc->dev.class = rtc_class;
rtc->dev.release = rtc_device_release;
mutex_init(&rtc->ops_lock);
spin_lock_init(&rtc->irq_lock);
spin_lock_init(&rtc->irq_task_lock);
init_waitqueue_head(&rtc->irq_queue);
/* Init timerqueue */
timerqueue_init_head(&rtc->timerqueue);
INIT_WORK(&rtc->irqwork, rtc_timer_do_work);
/* Init aie timer */
rtc_timer_init(&rtc->aie_timer, rtc_aie_update_irq, (void *)rtc);
/* Init uie timer */
rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, (void *)rtc);
/* Init pie timer */
hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rtc->pie_timer.function = rtc_pie_update_irq;
rtc->pie_enabled = 0;
/* Check to see if there is an ALARM already set in hw */
err = __rtc_read_alarm(rtc, &alrm);
if (!err && !rtc_valid_tm(&alrm.time))
rtc_initialize_alarm(rtc, &alrm);
strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
dev_set_name(&rtc->dev, "rtc%d", id);
rtc_dev_prepare(rtc);
err = device_register(&rtc->dev);
if (err) {
put_device(&rtc->dev);
goto exit_kfree;
}
err = zw_rtc_info_register(rtc);
if (err) {
device_unregister(&rtc->dev);
put_device(&rtc->dev);
goto exit_kfree;
}
rtc_dev_add_device(rtc);
rtc_sysfs_add_device(rtc);
rtc_proc_add_device(rtc);
dev_info(dev, "rtc core: registered %s as %s\n",
rtc->name, dev_name(&rtc->dev));
return rtc;
exit_kfree:
kfree(rtc);
exit_ida:
ida_simple_remove(&rtc_ida, id);
exit:
dev_err(dev, "rtc core: unable to register %s, err = %d\n",
name, err);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(rtc_device_register);
/**
* rtc_device_unregister - removes the previously registered RTC class device
*
* @rtc: the RTC class device to destroy
*/
void rtc_device_unregister(struct rtc_device *rtc)
{
if (get_device(&rtc->dev) != NULL) {
mutex_lock(&rtc->ops_lock);
/* remove innards of this RTC, then disable it, before
* letting any rtc_class_open() users access it again
*/
rtc_sysfs_del_device(rtc);
rtc_dev_del_device(rtc);
rtc_proc_del_device(rtc);
zw_rtc_info_unregister(rtc);
device_unregister(&rtc->dev);
rtc->ops = NULL;
mutex_unlock(&rtc->ops_lock);
put_device(&rtc->dev);
}
}
EXPORT_SYMBOL_GPL(rtc_device_unregister);
static int __init rtc_init(void)
{
rtc_class = class_create(THIS_MODULE, "rtc");
if (IS_ERR(rtc_class)) {
printk(KERN_ERR "%s: couldn't create class\n", __FILE__);
return PTR_ERR(rtc_class);
}
rtc_class->suspend = rtc_suspend;
rtc_class->resume = rtc_resume;
rtc_dev_init();
rtc_sysfs_init(rtc_class);
return 0;
}
static void __exit rtc_exit(void)
{
rtc_dev_exit();
class_destroy(rtc_class);
ida_destroy(&rtc_ida);
}
subsys_initcall(rtc_init);
module_exit(rtc_exit);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("RTC class support");
MODULE_LICENSE("GPL");
| gpl-2.0 |
palmer-dabbelt/linux | drivers/usb/phy/phy-generic.c | 288 | 8934 | /*
* NOP USB transceiver for all USB transceiver which are either built-in
* into USB IP or which are mostly autonomous.
*
* Copyright (C) 2009 Texas Instruments Inc
* Author: Ajay Kumar Gupta <ajay.gupta@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Current status:
* This provides a "nop" transceiver for PHYs which are
* autonomous such as isp1504, isp1707, etc.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/usb/usb_phy_generic.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include "phy-generic.h"
#define VBUS_IRQ_FLAGS \
(IRQF_SHARED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | \
IRQF_ONESHOT)
struct platform_device *usb_phy_generic_register(void)
{
return platform_device_register_simple("usb_phy_generic",
PLATFORM_DEVID_AUTO, NULL, 0);
}
EXPORT_SYMBOL_GPL(usb_phy_generic_register);
void usb_phy_generic_unregister(struct platform_device *pdev)
{
platform_device_unregister(pdev);
}
EXPORT_SYMBOL_GPL(usb_phy_generic_unregister);
static int nop_set_suspend(struct usb_phy *x, int suspend)
{
return 0;
}
static void nop_reset(struct usb_phy_generic *nop)
{
if (!nop->gpiod_reset)
return;
gpiod_set_value(nop->gpiod_reset, 1);
usleep_range(10000, 20000);
gpiod_set_value(nop->gpiod_reset, 0);
}
/* interface to regulator framework */
static void nop_set_vbus_draw(struct usb_phy_generic *nop, unsigned mA)
{
struct regulator *vbus_draw = nop->vbus_draw;
int enabled;
int ret;
if (!vbus_draw)
return;
enabled = nop->vbus_draw_enabled;
if (mA) {
regulator_set_current_limit(vbus_draw, 0, 1000 * mA);
if (!enabled) {
ret = regulator_enable(vbus_draw);
if (ret < 0)
return;
nop->vbus_draw_enabled = 1;
}
} else {
if (enabled) {
ret = regulator_disable(vbus_draw);
if (ret < 0)
return;
nop->vbus_draw_enabled = 0;
}
}
nop->mA = mA;
}
static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
{
struct usb_phy_generic *nop = data;
struct usb_otg *otg = nop->phy.otg;
int vbus, status;
vbus = gpiod_get_value(nop->gpiod_vbus);
if ((vbus ^ nop->vbus) == 0)
return IRQ_HANDLED;
nop->vbus = vbus;
if (vbus) {
status = USB_EVENT_VBUS;
otg->state = OTG_STATE_B_PERIPHERAL;
nop->phy.last_event = status;
usb_gadget_vbus_connect(otg->gadget);
/* drawing a "unit load" is *always* OK, except for OTG */
nop_set_vbus_draw(nop, 100);
atomic_notifier_call_chain(&nop->phy.notifier, status,
otg->gadget);
} else {
nop_set_vbus_draw(nop, 0);
usb_gadget_vbus_disconnect(otg->gadget);
status = USB_EVENT_NONE;
otg->state = OTG_STATE_B_IDLE;
nop->phy.last_event = status;
atomic_notifier_call_chain(&nop->phy.notifier, status,
otg->gadget);
}
return IRQ_HANDLED;
}
int usb_gen_phy_init(struct usb_phy *phy)
{
struct usb_phy_generic *nop = dev_get_drvdata(phy->dev);
if (!IS_ERR(nop->vcc)) {
if (regulator_enable(nop->vcc))
dev_err(phy->dev, "Failed to enable power\n");
}
if (!IS_ERR(nop->clk))
clk_prepare_enable(nop->clk);
nop_reset(nop);
return 0;
}
EXPORT_SYMBOL_GPL(usb_gen_phy_init);
void usb_gen_phy_shutdown(struct usb_phy *phy)
{
struct usb_phy_generic *nop = dev_get_drvdata(phy->dev);
gpiod_set_value(nop->gpiod_reset, 1);
if (!IS_ERR(nop->clk))
clk_disable_unprepare(nop->clk);
if (!IS_ERR(nop->vcc)) {
if (regulator_disable(nop->vcc))
dev_err(phy->dev, "Failed to disable power\n");
}
}
EXPORT_SYMBOL_GPL(usb_gen_phy_shutdown);
static int nop_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget)
{
if (!otg)
return -ENODEV;
if (!gadget) {
otg->gadget = NULL;
return -ENODEV;
}
otg->gadget = gadget;
otg->state = OTG_STATE_B_IDLE;
return 0;
}
static int nop_set_host(struct usb_otg *otg, struct usb_bus *host)
{
if (!otg)
return -ENODEV;
if (!host) {
otg->host = NULL;
return -ENODEV;
}
otg->host = host;
return 0;
}
int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
struct usb_phy_generic_platform_data *pdata)
{
enum usb_phy_type type = USB_PHY_TYPE_USB2;
int err = 0;
u32 clk_rate = 0;
bool needs_vcc = false;
if (dev->of_node) {
struct device_node *node = dev->of_node;
if (of_property_read_u32(node, "clock-frequency", &clk_rate))
clk_rate = 0;
needs_vcc = of_property_read_bool(node, "vcc-supply");
nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
GPIOD_ASIS);
err = PTR_ERR_OR_ZERO(nop->gpiod_reset);
if (!err) {
nop->gpiod_vbus = devm_gpiod_get_optional(dev,
"vbus-detect",
GPIOD_ASIS);
err = PTR_ERR_OR_ZERO(nop->gpiod_vbus);
}
} else if (pdata) {
type = pdata->type;
clk_rate = pdata->clk_rate;
needs_vcc = pdata->needs_vcc;
if (gpio_is_valid(pdata->gpio_reset)) {
err = devm_gpio_request_one(dev, pdata->gpio_reset,
GPIOF_ACTIVE_LOW,
dev_name(dev));
if (!err)
nop->gpiod_reset =
gpio_to_desc(pdata->gpio_reset);
}
nop->gpiod_vbus = pdata->gpiod_vbus;
}
if (err == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (err) {
dev_err(dev, "Error requesting RESET or VBUS GPIO\n");
return err;
}
if (nop->gpiod_reset)
gpiod_direction_output(nop->gpiod_reset, 1);
nop->phy.otg = devm_kzalloc(dev, sizeof(*nop->phy.otg),
GFP_KERNEL);
if (!nop->phy.otg)
return -ENOMEM;
nop->clk = devm_clk_get(dev, "main_clk");
if (IS_ERR(nop->clk)) {
dev_dbg(dev, "Can't get phy clock: %ld\n",
PTR_ERR(nop->clk));
}
if (!IS_ERR(nop->clk) && clk_rate) {
err = clk_set_rate(nop->clk, clk_rate);
if (err) {
dev_err(dev, "Error setting clock rate\n");
return err;
}
}
nop->vcc = devm_regulator_get(dev, "vcc");
if (IS_ERR(nop->vcc)) {
dev_dbg(dev, "Error getting vcc regulator: %ld\n",
PTR_ERR(nop->vcc));
if (needs_vcc)
return -EPROBE_DEFER;
}
nop->dev = dev;
nop->phy.dev = nop->dev;
nop->phy.label = "nop-xceiv";
nop->phy.set_suspend = nop_set_suspend;
nop->phy.type = type;
nop->phy.otg->state = OTG_STATE_UNDEFINED;
nop->phy.otg->usb_phy = &nop->phy;
nop->phy.otg->set_host = nop_set_host;
nop->phy.otg->set_peripheral = nop_set_peripheral;
return 0;
}
EXPORT_SYMBOL_GPL(usb_phy_gen_create_phy);
static int usb_phy_generic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct usb_phy_generic *nop;
int err;
nop = devm_kzalloc(dev, sizeof(*nop), GFP_KERNEL);
if (!nop)
return -ENOMEM;
err = usb_phy_gen_create_phy(dev, nop, dev_get_platdata(&pdev->dev));
if (err)
return err;
if (nop->gpiod_vbus) {
err = devm_request_threaded_irq(&pdev->dev,
gpiod_to_irq(nop->gpiod_vbus),
NULL, nop_gpio_vbus_thread,
VBUS_IRQ_FLAGS, "vbus_detect",
nop);
if (err) {
dev_err(&pdev->dev, "can't request irq %i, err: %d\n",
gpiod_to_irq(nop->gpiod_vbus), err);
return err;
}
}
nop->phy.init = usb_gen_phy_init;
nop->phy.shutdown = usb_gen_phy_shutdown;
err = usb_add_phy_dev(&nop->phy);
if (err) {
dev_err(&pdev->dev, "can't register transceiver, err: %d\n",
err);
return err;
}
platform_set_drvdata(pdev, nop);
return 0;
}
static int usb_phy_generic_remove(struct platform_device *pdev)
{
struct usb_phy_generic *nop = platform_get_drvdata(pdev);
usb_remove_phy(&nop->phy);
return 0;
}
static const struct of_device_id nop_xceiv_dt_ids[] = {
{ .compatible = "usb-nop-xceiv" },
{ }
};
MODULE_DEVICE_TABLE(of, nop_xceiv_dt_ids);
static struct platform_driver usb_phy_generic_driver = {
.probe = usb_phy_generic_probe,
.remove = usb_phy_generic_remove,
.driver = {
.name = "usb_phy_generic",
.of_match_table = nop_xceiv_dt_ids,
},
};
static int __init usb_phy_generic_init(void)
{
return platform_driver_register(&usb_phy_generic_driver);
}
subsys_initcall(usb_phy_generic_init);
static void __exit usb_phy_generic_exit(void)
{
platform_driver_unregister(&usb_phy_generic_driver);
}
module_exit(usb_phy_generic_exit);
MODULE_ALIAS("platform:usb_phy_generic");
MODULE_AUTHOR("Texas Instruments Inc");
MODULE_DESCRIPTION("NOP USB Transceiver driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
FelipeEcker/XenGT-Preview-kernel | sound/soc/soc-cache.c | 288 | 4722 | /*
* soc-cache.c -- ASoC register cache helpers
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <sound/soc.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <trace/events/asoc.h>
static bool snd_soc_set_cache_val(void *base, unsigned int idx,
unsigned int val, unsigned int word_size)
{
switch (word_size) {
case 1: {
u8 *cache = base;
if (cache[idx] == val)
return true;
cache[idx] = val;
break;
}
case 2: {
u16 *cache = base;
if (cache[idx] == val)
return true;
cache[idx] = val;
break;
}
default:
WARN(1, "Invalid word_size %d\n", word_size);
break;
}
return false;
}
static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
unsigned int word_size)
{
if (!base)
return -1;
switch (word_size) {
case 1: {
const u8 *cache = base;
return cache[idx];
}
case 2: {
const u16 *cache = base;
return cache[idx];
}
default:
WARN(1, "Invalid word_size %d\n", word_size);
break;
}
/* unreachable */
return -1;
}
int snd_soc_cache_init(struct snd_soc_codec *codec)
{
const struct snd_soc_codec_driver *codec_drv = codec->driver;
size_t reg_size;
reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
if (!reg_size)
return 0;
mutex_init(&codec->cache_rw_mutex);
dev_dbg(codec->dev, "ASoC: Initializing cache for %s codec\n",
codec->component.name);
if (codec_drv->reg_cache_default)
codec->reg_cache = kmemdup(codec_drv->reg_cache_default,
reg_size, GFP_KERNEL);
else
codec->reg_cache = kzalloc(reg_size, GFP_KERNEL);
if (!codec->reg_cache)
return -ENOMEM;
return 0;
}
/*
* NOTE: keep in mind that this function might be called
* multiple times.
*/
int snd_soc_cache_exit(struct snd_soc_codec *codec)
{
dev_dbg(codec->dev, "ASoC: Destroying cache for %s codec\n",
codec->component.name);
kfree(codec->reg_cache);
codec->reg_cache = NULL;
return 0;
}
/**
* snd_soc_cache_read: Fetch the value of a given register from the cache.
*
* @codec: CODEC to configure.
* @reg: The register index.
* @value: The value to be returned.
*/
int snd_soc_cache_read(struct snd_soc_codec *codec,
unsigned int reg, unsigned int *value)
{
if (!value)
return -EINVAL;
mutex_lock(&codec->cache_rw_mutex);
if (!ZERO_OR_NULL_PTR(codec->reg_cache))
*value = snd_soc_get_cache_val(codec->reg_cache, reg,
codec->driver->reg_word_size);
mutex_unlock(&codec->cache_rw_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_cache_read);
/**
* snd_soc_cache_write: Set the value of a given register in the cache.
*
* @codec: CODEC to configure.
* @reg: The register index.
* @value: The new register value.
*/
int snd_soc_cache_write(struct snd_soc_codec *codec,
unsigned int reg, unsigned int value)
{
mutex_lock(&codec->cache_rw_mutex);
if (!ZERO_OR_NULL_PTR(codec->reg_cache))
snd_soc_set_cache_val(codec->reg_cache, reg, value,
codec->driver->reg_word_size);
mutex_unlock(&codec->cache_rw_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_cache_write);
static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
{
int i;
int ret;
const struct snd_soc_codec_driver *codec_drv;
unsigned int val;
codec_drv = codec->driver;
for (i = 0; i < codec_drv->reg_cache_size; ++i) {
ret = snd_soc_cache_read(codec, i, &val);
if (ret)
return ret;
if (codec_drv->reg_cache_default)
if (snd_soc_get_cache_val(codec_drv->reg_cache_default,
i, codec_drv->reg_word_size) == val)
continue;
ret = snd_soc_write(codec, i, val);
if (ret)
return ret;
dev_dbg(codec->dev, "ASoC: Synced register %#x, value = %#x\n",
i, val);
}
return 0;
}
/**
* snd_soc_cache_sync: Sync the register cache with the hardware.
*
* @codec: CODEC to configure.
*
* Any registers that should not be synced should be marked as
* volatile. In general drivers can choose not to use the provided
* syncing functionality if they so require.
*/
int snd_soc_cache_sync(struct snd_soc_codec *codec)
{
const char *name = "flat";
int ret;
if (!codec->cache_sync)
return 0;
dev_dbg(codec->dev, "ASoC: Syncing cache for %s codec\n",
codec->component.name);
trace_snd_soc_cache_sync(codec, name, "start");
ret = snd_soc_flat_cache_sync(codec);
if (!ret)
codec->cache_sync = 0;
trace_snd_soc_cache_sync(codec, name, "end");
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_cache_sync);
| gpl-2.0 |
lirokoa/htc_pico_kernel | drivers/base/firmware_class.c | 288 | 17472 | /*
* firmware_class.c - Multi purpose firmware loading support
*
* Copyright (c) 2003 Manuel Estrada Sainz
*
* Please see Documentation/firmware_class/ for more information.
*
*/
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/highmem.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#define to_dev(obj) container_of(obj, struct device, kobj)
MODULE_AUTHOR("Manuel Estrada Sainz");
MODULE_DESCRIPTION("Multi purpose firmware loading support");
MODULE_LICENSE("GPL");
/* Builtin firmware support */
#ifdef CONFIG_FW_LOADER
extern struct builtin_fw __start_builtin_fw[];
extern struct builtin_fw __end_builtin_fw[];
static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
{
struct builtin_fw *b_fw;
for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
if (strcmp(name, b_fw->name) == 0) {
fw->size = b_fw->size;
fw->data = b_fw->data;
return true;
}
}
return false;
}
static bool fw_is_builtin_firmware(const struct firmware *fw)
{
struct builtin_fw *b_fw;
for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
if (fw->data == b_fw->data)
return true;
return false;
}
#else /* Module case - no builtin firmware support */
static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
{
return false;
}
static inline bool fw_is_builtin_firmware(const struct firmware *fw)
{
return false;
}
#endif
enum {
FW_STATUS_LOADING,
FW_STATUS_DONE,
FW_STATUS_ABORT,
};
static int loading_timeout = 60; /* In seconds */
/* fw_lock could be moved to 'struct firmware_priv' but since it is just
* guarding for corner cases a global lock should be OK */
static DEFINE_MUTEX(fw_lock);
struct firmware_priv {
struct completion completion;
struct firmware *fw;
unsigned long status;
struct page **pages;
int nr_pages;
int page_array_size;
struct timer_list timeout;
struct device dev;
bool nowait;
char fw_id[];
};
static struct firmware_priv *to_firmware_priv(struct device *dev)
{
return container_of(dev, struct firmware_priv, dev);
}
static void fw_load_abort(struct firmware_priv *fw_priv)
{
set_bit(FW_STATUS_ABORT, &fw_priv->status);
wmb();
complete(&fw_priv->completion);
}
static ssize_t firmware_timeout_show(struct class *class,
struct class_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", loading_timeout);
}
/**
* firmware_timeout_store - set number of seconds to wait for firmware
* @class: device class pointer
* @attr: device attribute pointer
* @buf: buffer to scan for timeout value
* @count: number of bytes in @buf
*
* Sets the number of seconds to wait for the firmware. Once
* this expires an error will be returned to the driver and no
* firmware will be provided.
*
* Note: zero means 'wait forever'.
**/
static ssize_t firmware_timeout_store(struct class *class,
struct class_attribute *attr,
const char *buf, size_t count)
{
loading_timeout = simple_strtol(buf, NULL, 10);
if (loading_timeout < 0)
loading_timeout = 0;
return count;
}
static struct class_attribute firmware_class_attrs[] = {
__ATTR(timeout, S_IWUSR | S_IRUGO,
firmware_timeout_show, firmware_timeout_store),
__ATTR_NULL
};
static void fw_dev_release(struct device *dev)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
int i;
for (i = 0; i < fw_priv->nr_pages; i++)
__free_page(fw_priv->pages[i]);
kfree(fw_priv->pages);
kfree(fw_priv);
module_put(THIS_MODULE);
}
static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->fw_id))
return -ENOMEM;
if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
return -ENOMEM;
if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
return -ENOMEM;
return 0;
}
static struct class firmware_class = {
.name = "firmware",
.class_attrs = firmware_class_attrs,
.dev_uevent = firmware_uevent,
.dev_release = fw_dev_release,
};
static ssize_t firmware_loading_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
int loading = test_bit(FW_STATUS_LOADING, &fw_priv->status);
return sprintf(buf, "%d\n", loading);
}
static void firmware_free_data(const struct firmware *fw)
{
int i;
vunmap(fw->data);
if (fw->pages) {
for (i = 0; i < PFN_UP(fw->size); i++)
__free_page(fw->pages[i]);
kfree(fw->pages);
}
}
/* Some architectures don't have PAGE_KERNEL_RO */
#ifndef PAGE_KERNEL_RO
#define PAGE_KERNEL_RO PAGE_KERNEL
#endif
/**
* firmware_loading_store - set value in the 'loading' control file
* @dev: device pointer
* @attr: device attribute pointer
* @buf: buffer to scan for loading control value
* @count: number of bytes in @buf
*
* The relevant values are:
*
* 1: Start a load, discarding any previous partial load.
* 0: Conclude the load and hand the data to the driver code.
* -1: Conclude the load with an error and discard any written data.
**/
static ssize_t firmware_loading_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
int loading = simple_strtol(buf, NULL, 10);
int i;
switch (loading) {
case 1:
mutex_lock(&fw_lock);
if (!fw_priv->fw) {
mutex_unlock(&fw_lock);
break;
}
firmware_free_data(fw_priv->fw);
memset(fw_priv->fw, 0, sizeof(struct firmware));
/* If the pages are not owned by 'struct firmware' */
for (i = 0; i < fw_priv->nr_pages; i++)
__free_page(fw_priv->pages[i]);
kfree(fw_priv->pages);
fw_priv->pages = NULL;
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
set_bit(FW_STATUS_LOADING, &fw_priv->status);
mutex_unlock(&fw_lock);
break;
case 0:
if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
vunmap(fw_priv->fw->data);
fw_priv->fw->data = vmap(fw_priv->pages,
fw_priv->nr_pages,
0, PAGE_KERNEL_RO);
if (!fw_priv->fw->data) {
dev_err(dev, "%s: vmap() failed\n", __func__);
goto err;
}
/* Pages are now owned by 'struct firmware' */
fw_priv->fw->pages = fw_priv->pages;
fw_priv->pages = NULL;
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
complete(&fw_priv->completion);
clear_bit(FW_STATUS_LOADING, &fw_priv->status);
break;
}
/* fallthrough */
default:
dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
/* fallthrough */
case -1:
err:
fw_load_abort(fw_priv);
break;
}
return count;
}
static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = to_dev(kobj);
struct firmware_priv *fw_priv = to_firmware_priv(dev);
struct firmware *fw;
ssize_t ret_count;
mutex_lock(&fw_lock);
fw = fw_priv->fw;
if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) {
ret_count = -ENODEV;
goto out;
}
if (offset > fw->size) {
ret_count = 0;
goto out;
}
if (count > fw->size - offset)
count = fw->size - offset;
ret_count = count;
while (count) {
void *page_data;
int page_nr = offset >> PAGE_SHIFT;
int page_ofs = offset & (PAGE_SIZE-1);
int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
page_data = kmap(fw_priv->pages[page_nr]);
memcpy(buffer, page_data + page_ofs, page_cnt);
kunmap(fw_priv->pages[page_nr]);
buffer += page_cnt;
offset += page_cnt;
count -= page_cnt;
}
out:
mutex_unlock(&fw_lock);
return ret_count;
}
static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
{
int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT;
/* If the array of pages is too small, grow it... */
if (fw_priv->page_array_size < pages_needed) {
int new_array_size = max(pages_needed,
fw_priv->page_array_size * 2);
struct page **new_pages;
new_pages = kmalloc(new_array_size * sizeof(void *),
GFP_KERNEL);
if (!new_pages) {
fw_load_abort(fw_priv);
return -ENOMEM;
}
memcpy(new_pages, fw_priv->pages,
fw_priv->page_array_size * sizeof(void *));
memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
(new_array_size - fw_priv->page_array_size));
kfree(fw_priv->pages);
fw_priv->pages = new_pages;
fw_priv->page_array_size = new_array_size;
}
while (fw_priv->nr_pages < pages_needed) {
fw_priv->pages[fw_priv->nr_pages] =
alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
if (!fw_priv->pages[fw_priv->nr_pages]) {
fw_load_abort(fw_priv);
return -ENOMEM;
}
fw_priv->nr_pages++;
}
return 0;
}
/**
* firmware_data_write - write method for firmware
* @filp: open sysfs file
* @kobj: kobject for the device
* @bin_attr: bin_attr structure
* @buffer: buffer being written
* @offset: buffer offset for write in total data store area
* @count: buffer size
*
* Data written to the 'data' attribute will be later handed to
* the driver as a firmware image.
**/
static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = to_dev(kobj);
struct firmware_priv *fw_priv = to_firmware_priv(dev);
struct firmware *fw;
ssize_t retval;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
mutex_lock(&fw_lock);
fw = fw_priv->fw;
if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) {
retval = -ENODEV;
goto out;
}
retval = fw_realloc_buffer(fw_priv, offset + count);
if (retval)
goto out;
retval = count;
while (count) {
void *page_data;
int page_nr = offset >> PAGE_SHIFT;
int page_ofs = offset & (PAGE_SIZE - 1);
int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
page_data = kmap(fw_priv->pages[page_nr]);
memcpy(page_data + page_ofs, buffer, page_cnt);
kunmap(fw_priv->pages[page_nr]);
buffer += page_cnt;
offset += page_cnt;
count -= page_cnt;
}
fw->size = max_t(size_t, offset, fw->size);
out:
mutex_unlock(&fw_lock);
return retval;
}
static struct bin_attribute firmware_attr_data = {
.attr = { .name = "data", .mode = 0644 },
.size = 0,
.read = firmware_data_read,
.write = firmware_data_write,
};
static void firmware_class_timeout(u_long data)
{
struct firmware_priv *fw_priv = (struct firmware_priv *) data;
fw_load_abort(fw_priv);
}
static struct firmware_priv *
fw_create_instance(struct firmware *firmware, const char *fw_name,
struct device *device, bool uevent, bool nowait)
{
struct firmware_priv *fw_priv;
struct device *f_dev;
int error;
fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL);
if (!fw_priv) {
dev_err(device, "%s: kmalloc failed\n", __func__);
error = -ENOMEM;
goto err_out;
}
fw_priv->fw = firmware;
fw_priv->nowait = nowait;
strcpy(fw_priv->fw_id, fw_name);
init_completion(&fw_priv->completion);
setup_timer(&fw_priv->timeout,
firmware_class_timeout, (u_long) fw_priv);
f_dev = &fw_priv->dev;
device_initialize(f_dev);
dev_set_name(f_dev, "%s", dev_name(device));
f_dev->parent = device;
f_dev->class = &firmware_class;
dev_set_uevent_suppress(f_dev, true);
/* Need to pin this module until class device is destroyed */
__module_get(THIS_MODULE);
error = device_add(f_dev);
if (error) {
dev_err(device, "%s: device_register failed\n", __func__);
goto err_put_dev;
}
error = device_create_bin_file(f_dev, &firmware_attr_data);
if (error) {
dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__);
goto err_del_dev;
}
error = device_create_file(f_dev, &dev_attr_loading);
if (error) {
dev_err(device, "%s: device_create_file failed\n", __func__);
goto err_del_bin_attr;
}
if (uevent)
dev_set_uevent_suppress(f_dev, false);
return fw_priv;
err_del_bin_attr:
device_remove_bin_file(f_dev, &firmware_attr_data);
err_del_dev:
device_del(f_dev);
err_put_dev:
put_device(f_dev);
err_out:
return ERR_PTR(error);
}
static void fw_destroy_instance(struct firmware_priv *fw_priv)
{
struct device *f_dev = &fw_priv->dev;
device_remove_file(f_dev, &dev_attr_loading);
device_remove_bin_file(f_dev, &firmware_attr_data);
device_unregister(f_dev);
}
static int _request_firmware(const struct firmware **firmware_p,
const char *name, struct device *device,
bool uevent, bool nowait)
{
struct firmware_priv *fw_priv;
struct firmware *firmware;
int retval = 0;
if (!firmware_p)
return -EINVAL;
*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
if (!firmware) {
dev_err(device, "%s: kmalloc(struct firmware) failed\n",
__func__);
retval = -ENOMEM;
goto out;
}
if (fw_get_builtin_firmware(firmware, name)) {
dev_dbg(device, "firmware: using built-in firmware %s\n", name);
return 0;
}
if (uevent)
dev_dbg(device, "firmware: requesting %s\n", name);
fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
if (IS_ERR(fw_priv)) {
retval = PTR_ERR(fw_priv);
goto out;
}
if (uevent) {
if (loading_timeout > 0)
mod_timer(&fw_priv->timeout,
round_jiffies_up(jiffies +
loading_timeout * HZ));
kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
}
wait_for_completion(&fw_priv->completion);
set_bit(FW_STATUS_DONE, &fw_priv->status);
del_timer_sync(&fw_priv->timeout);
mutex_lock(&fw_lock);
if (!fw_priv->fw->size || test_bit(FW_STATUS_ABORT, &fw_priv->status))
retval = -ENOENT;
fw_priv->fw = NULL;
mutex_unlock(&fw_lock);
fw_destroy_instance(fw_priv);
out:
if (retval) {
release_firmware(firmware);
*firmware_p = NULL;
}
return retval;
}
/**
* request_firmware: - send firmware request and wait for it
* @firmware_p: pointer to firmware image
* @name: name of firmware file
* @device: device for which firmware is being loaded
*
* @firmware_p will be used to return a firmware image by the name
* of @name for device @device.
*
* Should be called from user context where sleeping is allowed.
*
* @name will be used as $FIRMWARE in the uevent environment and
* should be distinctive enough not to be confused with any other
* firmware image for this or any other device.
**/
int
request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device)
{
int uevent = 1;
return _request_firmware(firmware_p, name, device, uevent, false);
}
/**
* release_firmware: - release the resource associated with a firmware image
* @fw: firmware resource to release
**/
void release_firmware(const struct firmware *fw)
{
if (fw) {
if (!fw_is_builtin_firmware(fw))
firmware_free_data(fw);
kfree(fw);
}
}
/* Async support */
struct firmware_work {
struct work_struct work;
struct module *module;
const char *name;
struct device *device;
void *context;
void (*cont)(const struct firmware *fw, void *context);
int uevent;
};
static int request_firmware_work_func(void *arg)
{
struct firmware_work *fw_work = arg;
const struct firmware *fw;
int ret;
if (!arg) {
WARN_ON(1);
return 0;
}
ret = _request_firmware(&fw, fw_work->name, fw_work->device,
fw_work->uevent, true);
fw_work->cont(fw, fw_work->context);
module_put(fw_work->module);
kfree(fw_work);
return ret;
}
/**
* request_firmware_nowait - asynchronous version of request_firmware
* @module: module requesting the firmware
* @uevent: sends uevent to copy the firmware image if this flag
* is non-zero else the firmware copy must be done manually.
* @name: name of firmware file
* @device: device for which firmware is being loaded
* @gfp: allocation flags
* @context: will be passed over to @cont, and
* @fw may be %NULL if firmware request fails.
* @cont: function will be called asynchronously when the firmware
* request is over.
*
* Asynchronous variant of request_firmware() for user contexts where
* it is not possible to sleep for long time. It can't be called
* in atomic contexts.
**/
int
request_firmware_nowait(
struct module *module, int uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
void (*cont)(const struct firmware *fw, void *context))
{
struct task_struct *task;
struct firmware_work *fw_work;
fw_work = kzalloc(sizeof (struct firmware_work), gfp);
if (!fw_work)
return -ENOMEM;
fw_work->module = module;
fw_work->name = name;
fw_work->device = device;
fw_work->context = context;
fw_work->cont = cont;
fw_work->uevent = uevent;
if (!try_module_get(module)) {
kfree(fw_work);
return -EFAULT;
}
task = kthread_run(request_firmware_work_func, fw_work,
"firmware/%s", name);
if (IS_ERR(task)) {
fw_work->cont(NULL, fw_work->context);
module_put(fw_work->module);
kfree(fw_work);
return PTR_ERR(task);
}
return 0;
}
static int __init firmware_class_init(void)
{
return class_register(&firmware_class);
}
static void __exit firmware_class_exit(void)
{
class_unregister(&firmware_class);
}
fs_initcall(firmware_class_init);
module_exit(firmware_class_exit);
EXPORT_SYMBOL(release_firmware);
EXPORT_SYMBOL(request_firmware);
EXPORT_SYMBOL(request_firmware_nowait);
| gpl-2.0 |
ktoonsez/SiyahD | drivers/media/video/v4l2-fh.c | 544 | 3214 | /*
* v4l2-fh.c
*
* V4L2 file handles.
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
* Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/bitops.h>
#include <linux/slab.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
int v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
{
fh->vdev = vdev;
/* Inherit from video_device. May be overridden by the driver. */
fh->ctrl_handler = vdev->ctrl_handler;
INIT_LIST_HEAD(&fh->list);
set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags);
fh->prio = V4L2_PRIORITY_UNSET;
/*
* fh->events only needs to be initialized if the driver
* supports the VIDIOC_SUBSCRIBE_EVENT ioctl.
*/
if (vdev->ioctl_ops && vdev->ioctl_ops->vidioc_subscribe_event)
return v4l2_event_init(fh);
fh->events = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_fh_init);
void v4l2_fh_add(struct v4l2_fh *fh)
{
unsigned long flags;
if (test_bit(V4L2_FL_USE_FH_PRIO, &fh->vdev->flags))
v4l2_prio_open(fh->vdev->prio, &fh->prio);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
list_add(&fh->list, &fh->vdev->fh_list);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
}
EXPORT_SYMBOL_GPL(v4l2_fh_add);
int v4l2_fh_open(struct file *filp)
{
struct video_device *vdev = video_devdata(filp);
struct v4l2_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
filp->private_data = fh;
if (fh == NULL)
return -ENOMEM;
v4l2_fh_init(fh, vdev);
v4l2_fh_add(fh);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_fh_open);
void v4l2_fh_del(struct v4l2_fh *fh)
{
unsigned long flags;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
list_del_init(&fh->list);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (test_bit(V4L2_FL_USE_FH_PRIO, &fh->vdev->flags))
v4l2_prio_close(fh->vdev->prio, fh->prio);
}
EXPORT_SYMBOL_GPL(v4l2_fh_del);
void v4l2_fh_exit(struct v4l2_fh *fh)
{
if (fh->vdev == NULL)
return;
fh->vdev = NULL;
v4l2_event_free(fh);
}
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
int v4l2_fh_release(struct file *filp)
{
struct v4l2_fh *fh = filp->private_data;
if (fh) {
v4l2_fh_del(fh);
v4l2_fh_exit(fh);
kfree(fh);
}
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_fh_release);
int v4l2_fh_is_singular(struct v4l2_fh *fh)
{
unsigned long flags;
int is_singular;
if (fh == NULL || fh->vdev == NULL)
return 0;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
is_singular = list_is_singular(&fh->list);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
return is_singular;
}
EXPORT_SYMBOL_GPL(v4l2_fh_is_singular);
| gpl-2.0 |
hggh/linux | tools/perf/tests/hists_link.c | 544 | 8527 | #include "perf.h"
#include "tests.h"
#include "debug.h"
#include "symbol.h"
#include "sort.h"
#include "evsel.h"
#include "evlist.h"
#include "machine.h"
#include "thread.h"
#include "parse-events.h"
#include "hists_common.h"
struct sample {
u32 pid;
u64 ip;
struct thread *thread;
struct map *map;
struct symbol *sym;
};
/* For the numbers, see hists_common.c */
static struct sample fake_common_samples[] = {
/* perf [kernel] schedule() */
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
/* perf [perf] main() */
{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
/* perf [perf] cmd_record() */
{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
/* bash [bash] xmalloc() */
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
/* bash [libc] malloc() */
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, },
};
static struct sample fake_samples[][5] = {
{
/* perf [perf] run_command() */
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
/* perf [libc] malloc() */
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
/* perf [kernel] page_fault() */
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
/* perf [kernel] sys_perf_event_open() */
{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
/* bash [libc] free() */
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_FREE, },
},
{
/* perf [libc] free() */
{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
/* bash [libc] malloc() */
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
/* bash [bash] xfee() */
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XFREE, },
/* bash [libc] realloc() */
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_REALLOC, },
/* bash [kernel] page_fault() */
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
},
};
static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
{
struct perf_evsel *evsel;
struct addr_location al;
struct hist_entry *he;
struct perf_sample sample = { .period = 1, };
size_t i = 0, k;
/*
* each evsel will have 10 samples - 5 common and 5 distinct.
* However the second evsel also has a collapsed entry for
* "bash [libc] malloc" so total 9 entries will be in the tree.
*/
evlist__for_each(evlist, evsel) {
struct hists *hists = evsel__hists(evsel);
for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
const union perf_event event = {
.header = {
.misc = PERF_RECORD_MISC_USER,
},
};
sample.pid = fake_common_samples[k].pid;
sample.tid = fake_common_samples[k].pid;
sample.ip = fake_common_samples[k].ip;
if (perf_event__preprocess_sample(&event, machine, &al,
&sample) < 0)
goto out;
he = __hists__add_entry(hists, &al, NULL,
NULL, NULL, 1, 1, 0, true);
if (he == NULL) {
addr_location__put(&al);
goto out;
}
fake_common_samples[k].thread = al.thread;
fake_common_samples[k].map = al.map;
fake_common_samples[k].sym = al.sym;
}
for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
const union perf_event event = {
.header = {
.misc = PERF_RECORD_MISC_USER,
},
};
sample.pid = fake_samples[i][k].pid;
sample.tid = fake_samples[i][k].pid;
sample.ip = fake_samples[i][k].ip;
if (perf_event__preprocess_sample(&event, machine, &al,
&sample) < 0)
goto out;
he = __hists__add_entry(hists, &al, NULL,
NULL, NULL, 1, 1, 0, true);
if (he == NULL) {
addr_location__put(&al);
goto out;
}
fake_samples[i][k].thread = al.thread;
fake_samples[i][k].map = al.map;
fake_samples[i][k].sym = al.sym;
}
i++;
}
return 0;
out:
pr_debug("Not enough memory for adding a hist entry\n");
return -1;
}
static int find_sample(struct sample *samples, size_t nr_samples,
struct thread *t, struct map *m, struct symbol *s)
{
while (nr_samples--) {
if (samples->thread == t && samples->map == m &&
samples->sym == s)
return 1;
samples++;
}
return 0;
}
static int __validate_match(struct hists *hists)
{
size_t count = 0;
struct rb_root *root;
struct rb_node *node;
/*
* Only entries from fake_common_samples should have a pair.
*/
if (sort__need_collapse)
root = &hists->entries_collapsed;
else
root = hists->entries_in;
node = rb_first(root);
while (node) {
struct hist_entry *he;
he = rb_entry(node, struct hist_entry, rb_node_in);
if (hist_entry__has_pairs(he)) {
if (find_sample(fake_common_samples,
ARRAY_SIZE(fake_common_samples),
he->thread, he->ms.map, he->ms.sym)) {
count++;
} else {
pr_debug("Can't find the matched entry\n");
return -1;
}
}
node = rb_next(node);
}
if (count != ARRAY_SIZE(fake_common_samples)) {
pr_debug("Invalid count for matched entries: %zd of %zd\n",
count, ARRAY_SIZE(fake_common_samples));
return -1;
}
return 0;
}
static int validate_match(struct hists *leader, struct hists *other)
{
return __validate_match(leader) || __validate_match(other);
}
static int __validate_link(struct hists *hists, int idx)
{
size_t count = 0;
size_t count_pair = 0;
size_t count_dummy = 0;
struct rb_root *root;
struct rb_node *node;
/*
* Leader hists (idx = 0) will have dummy entries from other,
* and some entries will have no pair. However every entry
* in other hists should have (dummy) pair.
*/
if (sort__need_collapse)
root = &hists->entries_collapsed;
else
root = hists->entries_in;
node = rb_first(root);
while (node) {
struct hist_entry *he;
he = rb_entry(node, struct hist_entry, rb_node_in);
if (hist_entry__has_pairs(he)) {
if (!find_sample(fake_common_samples,
ARRAY_SIZE(fake_common_samples),
he->thread, he->ms.map, he->ms.sym) &&
!find_sample(fake_samples[idx],
ARRAY_SIZE(fake_samples[idx]),
he->thread, he->ms.map, he->ms.sym)) {
count_dummy++;
}
count_pair++;
} else if (idx) {
pr_debug("A entry from the other hists should have pair\n");
return -1;
}
count++;
node = rb_next(node);
}
/*
* Note that we have a entry collapsed in the other (idx = 1) hists.
*/
if (idx == 0) {
if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
pr_debug("Invalid count of dummy entries: %zd of %zd\n",
count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
return -1;
}
if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
pr_debug("Invalid count of total leader entries: %zd of %zd\n",
count, count_pair + ARRAY_SIZE(fake_samples[0]));
return -1;
}
} else {
if (count != count_pair) {
pr_debug("Invalid count of total other entries: %zd of %zd\n",
count, count_pair);
return -1;
}
if (count_dummy > 0) {
pr_debug("Other hists should not have dummy entries: %zd\n",
count_dummy);
return -1;
}
}
return 0;
}
static int validate_link(struct hists *leader, struct hists *other)
{
return __validate_link(leader, 0) || __validate_link(other, 1);
}
int test__hists_link(void)
{
int err = -1;
struct hists *hists, *first_hists;
struct machines machines;
struct machine *machine = NULL;
struct perf_evsel *evsel, *first;
struct perf_evlist *evlist = perf_evlist__new();
if (evlist == NULL)
return -ENOMEM;
err = parse_events(evlist, "cpu-clock", NULL);
if (err)
goto out;
err = parse_events(evlist, "task-clock", NULL);
if (err)
goto out;
/* default sort order (comm,dso,sym) will be used */
if (setup_sorting() < 0)
goto out;
machines__init(&machines);
/* setup threads/dso/map/symbols also */
machine = setup_fake_machine(&machines);
if (!machine)
goto out;
if (verbose > 1)
machine__fprintf(machine, stderr);
/* process sample events */
err = add_hist_entries(evlist, machine);
if (err < 0)
goto out;
evlist__for_each(evlist, evsel) {
hists = evsel__hists(evsel);
hists__collapse_resort(hists, NULL);
if (verbose > 2)
print_hists_in(hists);
}
first = perf_evlist__first(evlist);
evsel = perf_evlist__last(evlist);
first_hists = evsel__hists(first);
hists = evsel__hists(evsel);
/* match common entries */
hists__match(first_hists, hists);
err = validate_match(first_hists, hists);
if (err)
goto out;
/* link common and/or dummy entries */
hists__link(first_hists, hists);
err = validate_link(first_hists, hists);
if (err)
goto out;
err = 0;
out:
/* tear down everything */
perf_evlist__delete(evlist);
reset_output_field();
machines__exit(&machines);
return err;
}
| gpl-2.0 |
christianeisendle/linux | tools/perf/builtin-lock.c | 544 | 23688 | #include "builtin.h"
#include "perf.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/util.h"
#include "util/cache.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
#include "util/parse-options.h"
#include "util/trace-event.h"
#include "util/debug.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/data.h"
#include <sys/types.h>
#include <sys/prctl.h>
#include <semaphore.h>
#include <pthread.h>
#include <math.h>
#include <limits.h>
#include <linux/list.h>
#include <linux/hash.h>
static struct perf_session *session;
/* based on kernel/lockdep.c */
#define LOCKHASH_BITS 12
#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
static struct list_head lockhash_table[LOCKHASH_SIZE];
#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
struct lock_stat {
struct list_head hash_entry;
struct rb_node rb; /* used for sorting */
/*
* FIXME: perf_evsel__intval() returns u64,
* so address of lockdep_map should be dealed as 64bit.
* Is there more better solution?
*/
void *addr; /* address of lockdep_map, used as ID */
char *name; /* for strcpy(), we cannot use const */
unsigned int nr_acquire;
unsigned int nr_acquired;
unsigned int nr_contended;
unsigned int nr_release;
unsigned int nr_readlock;
unsigned int nr_trylock;
/* these times are in nano sec. */
u64 avg_wait_time;
u64 wait_time_total;
u64 wait_time_min;
u64 wait_time_max;
int discard; /* flag of blacklist */
};
/*
* States of lock_seq_stat
*
* UNINITIALIZED is required for detecting first event of acquire.
* As the nature of lock events, there is no guarantee
* that the first event for the locks are acquire,
* it can be acquired, contended or release.
*/
#define SEQ_STATE_UNINITIALIZED 0 /* initial state */
#define SEQ_STATE_RELEASED 1
#define SEQ_STATE_ACQUIRING 2
#define SEQ_STATE_ACQUIRED 3
#define SEQ_STATE_READ_ACQUIRED 4
#define SEQ_STATE_CONTENDED 5
/*
* MAX_LOCK_DEPTH
* Imported from include/linux/sched.h.
* Should this be synchronized?
*/
#define MAX_LOCK_DEPTH 48
/*
* struct lock_seq_stat:
* Place to put on state of one lock sequence
* 1) acquire -> acquired -> release
* 2) acquire -> contended -> acquired -> release
* 3) acquire (with read or try) -> release
* 4) Are there other patterns?
*/
struct lock_seq_stat {
struct list_head list;
int state;
u64 prev_event_time;
void *addr;
int read_count;
};
struct thread_stat {
struct rb_node rb;
u32 tid;
struct list_head seq_list;
};
static struct rb_root thread_stats;
static struct thread_stat *thread_stat_find(u32 tid)
{
struct rb_node *node;
struct thread_stat *st;
node = thread_stats.rb_node;
while (node) {
st = container_of(node, struct thread_stat, rb);
if (st->tid == tid)
return st;
else if (tid < st->tid)
node = node->rb_left;
else
node = node->rb_right;
}
return NULL;
}
static void thread_stat_insert(struct thread_stat *new)
{
struct rb_node **rb = &thread_stats.rb_node;
struct rb_node *parent = NULL;
struct thread_stat *p;
while (*rb) {
p = container_of(*rb, struct thread_stat, rb);
parent = *rb;
if (new->tid < p->tid)
rb = &(*rb)->rb_left;
else if (new->tid > p->tid)
rb = &(*rb)->rb_right;
else
BUG_ON("inserting invalid thread_stat\n");
}
rb_link_node(&new->rb, parent, rb);
rb_insert_color(&new->rb, &thread_stats);
}
static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
{
struct thread_stat *st;
st = thread_stat_find(tid);
if (st)
return st;
st = zalloc(sizeof(struct thread_stat));
if (!st) {
pr_err("memory allocation failed\n");
return NULL;
}
st->tid = tid;
INIT_LIST_HEAD(&st->seq_list);
thread_stat_insert(st);
return st;
}
static struct thread_stat *thread_stat_findnew_first(u32 tid);
static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
thread_stat_findnew_first;
static struct thread_stat *thread_stat_findnew_first(u32 tid)
{
struct thread_stat *st;
st = zalloc(sizeof(struct thread_stat));
if (!st) {
pr_err("memory allocation failed\n");
return NULL;
}
st->tid = tid;
INIT_LIST_HEAD(&st->seq_list);
rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
rb_insert_color(&st->rb, &thread_stats);
thread_stat_findnew = thread_stat_findnew_after_first;
return st;
}
/* build simple key function one is bigger than two */
#define SINGLE_KEY(member) \
static int lock_stat_key_ ## member(struct lock_stat *one, \
struct lock_stat *two) \
{ \
return one->member > two->member; \
}
SINGLE_KEY(nr_acquired)
SINGLE_KEY(nr_contended)
SINGLE_KEY(avg_wait_time)
SINGLE_KEY(wait_time_total)
SINGLE_KEY(wait_time_max)
static int lock_stat_key_wait_time_min(struct lock_stat *one,
struct lock_stat *two)
{
u64 s1 = one->wait_time_min;
u64 s2 = two->wait_time_min;
if (s1 == ULLONG_MAX)
s1 = 0;
if (s2 == ULLONG_MAX)
s2 = 0;
return s1 > s2;
}
struct lock_key {
/*
* name: the value for specify by user
* this should be simpler than raw name of member
* e.g. nr_acquired -> acquired, wait_time_total -> wait_total
*/
const char *name;
int (*key)(struct lock_stat*, struct lock_stat*);
};
static const char *sort_key = "acquired";
static int (*compare)(struct lock_stat *, struct lock_stat *);
static struct rb_root result; /* place to store sorted data */
#define DEF_KEY_LOCK(name, fn_suffix) \
{ #name, lock_stat_key_ ## fn_suffix }
struct lock_key keys[] = {
DEF_KEY_LOCK(acquired, nr_acquired),
DEF_KEY_LOCK(contended, nr_contended),
DEF_KEY_LOCK(avg_wait, avg_wait_time),
DEF_KEY_LOCK(wait_total, wait_time_total),
DEF_KEY_LOCK(wait_min, wait_time_min),
DEF_KEY_LOCK(wait_max, wait_time_max),
/* extra comparisons much complicated should be here */
{ NULL, NULL }
};
static int select_key(void)
{
int i;
for (i = 0; keys[i].name; i++) {
if (!strcmp(keys[i].name, sort_key)) {
compare = keys[i].key;
return 0;
}
}
pr_err("Unknown compare key: %s\n", sort_key);
return -1;
}
static void insert_to_result(struct lock_stat *st,
int (*bigger)(struct lock_stat *, struct lock_stat *))
{
struct rb_node **rb = &result.rb_node;
struct rb_node *parent = NULL;
struct lock_stat *p;
while (*rb) {
p = container_of(*rb, struct lock_stat, rb);
parent = *rb;
if (bigger(st, p))
rb = &(*rb)->rb_left;
else
rb = &(*rb)->rb_right;
}
rb_link_node(&st->rb, parent, rb);
rb_insert_color(&st->rb, &result);
}
/* returns left most element of result, and erase it */
static struct lock_stat *pop_from_result(void)
{
struct rb_node *node = result.rb_node;
if (!node)
return NULL;
while (node->rb_left)
node = node->rb_left;
rb_erase(node, &result);
return container_of(node, struct lock_stat, rb);
}
static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
{
struct list_head *entry = lockhashentry(addr);
struct lock_stat *ret, *new;
list_for_each_entry(ret, entry, hash_entry) {
if (ret->addr == addr)
return ret;
}
new = zalloc(sizeof(struct lock_stat));
if (!new)
goto alloc_failed;
new->addr = addr;
new->name = zalloc(sizeof(char) * strlen(name) + 1);
if (!new->name) {
free(new);
goto alloc_failed;
}
strcpy(new->name, name);
new->wait_time_min = ULLONG_MAX;
list_add(&new->hash_entry, entry);
return new;
alloc_failed:
pr_err("memory allocation failed\n");
return NULL;
}
struct trace_lock_handler {
int (*acquire_event)(struct perf_evsel *evsel,
struct perf_sample *sample);
int (*acquired_event)(struct perf_evsel *evsel,
struct perf_sample *sample);
int (*contended_event)(struct perf_evsel *evsel,
struct perf_sample *sample);
int (*release_event)(struct perf_evsel *evsel,
struct perf_sample *sample);
};
static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
{
struct lock_seq_stat *seq;
list_for_each_entry(seq, &ts->seq_list, list) {
if (seq->addr == addr)
return seq;
}
seq = zalloc(sizeof(struct lock_seq_stat));
if (!seq) {
pr_err("memory allocation failed\n");
return NULL;
}
seq->state = SEQ_STATE_UNINITIALIZED;
seq->addr = addr;
list_add(&seq->list, &ts->seq_list);
return seq;
}
enum broken_state {
BROKEN_ACQUIRE,
BROKEN_ACQUIRED,
BROKEN_CONTENDED,
BROKEN_RELEASE,
BROKEN_MAX,
};
static int bad_hist[BROKEN_MAX];
enum acquire_flags {
TRY_LOCK = 1,
READ_LOCK = 2,
};
static int report_lock_acquire_event(struct perf_evsel *evsel,
struct perf_sample *sample)
{
void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
const char *name = perf_evsel__strval(evsel, sample, "name");
u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
int flag = perf_evsel__intval(evsel, sample, "flag");
memcpy(&addr, &tmp, sizeof(void *));
ls = lock_stat_findnew(addr, name);
if (!ls)
return -ENOMEM;
if (ls->discard)
return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
return -ENOMEM;
seq = get_seq(ts, addr);
if (!seq)
return -ENOMEM;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
case SEQ_STATE_RELEASED:
if (!flag) {
seq->state = SEQ_STATE_ACQUIRING;
} else {
if (flag & TRY_LOCK)
ls->nr_trylock++;
if (flag & READ_LOCK)
ls->nr_readlock++;
seq->state = SEQ_STATE_READ_ACQUIRED;
seq->read_count = 1;
ls->nr_acquired++;
}
break;
case SEQ_STATE_READ_ACQUIRED:
if (flag & READ_LOCK) {
seq->read_count++;
ls->nr_acquired++;
goto end;
} else {
goto broken;
}
break;
case SEQ_STATE_ACQUIRED:
case SEQ_STATE_ACQUIRING:
case SEQ_STATE_CONTENDED:
broken:
/* broken lock sequence, discard it */
ls->discard = 1;
bad_hist[BROKEN_ACQUIRE]++;
list_del(&seq->list);
free(seq);
goto end;
default:
BUG_ON("Unknown state of lock sequence found!\n");
break;
}
ls->nr_acquire++;
seq->prev_event_time = sample->time;
end:
return 0;
}
static int report_lock_acquired_event(struct perf_evsel *evsel,
struct perf_sample *sample)
{
void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
u64 contended_term;
const char *name = perf_evsel__strval(evsel, sample, "name");
u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
memcpy(&addr, &tmp, sizeof(void *));
ls = lock_stat_findnew(addr, name);
if (!ls)
return -ENOMEM;
if (ls->discard)
return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
return -ENOMEM;
seq = get_seq(ts, addr);
if (!seq)
return -ENOMEM;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
/* orphan event, do nothing */
return 0;
case SEQ_STATE_ACQUIRING:
break;
case SEQ_STATE_CONTENDED:
contended_term = sample->time - seq->prev_event_time;
ls->wait_time_total += contended_term;
if (contended_term < ls->wait_time_min)
ls->wait_time_min = contended_term;
if (ls->wait_time_max < contended_term)
ls->wait_time_max = contended_term;
break;
case SEQ_STATE_RELEASED:
case SEQ_STATE_ACQUIRED:
case SEQ_STATE_READ_ACQUIRED:
/* broken lock sequence, discard it */
ls->discard = 1;
bad_hist[BROKEN_ACQUIRED]++;
list_del(&seq->list);
free(seq);
goto end;
default:
BUG_ON("Unknown state of lock sequence found!\n");
break;
}
seq->state = SEQ_STATE_ACQUIRED;
ls->nr_acquired++;
ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0;
seq->prev_event_time = sample->time;
end:
return 0;
}
static int report_lock_contended_event(struct perf_evsel *evsel,
struct perf_sample *sample)
{
void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
const char *name = perf_evsel__strval(evsel, sample, "name");
u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
memcpy(&addr, &tmp, sizeof(void *));
ls = lock_stat_findnew(addr, name);
if (!ls)
return -ENOMEM;
if (ls->discard)
return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
return -ENOMEM;
seq = get_seq(ts, addr);
if (!seq)
return -ENOMEM;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
/* orphan event, do nothing */
return 0;
case SEQ_STATE_ACQUIRING:
break;
case SEQ_STATE_RELEASED:
case SEQ_STATE_ACQUIRED:
case SEQ_STATE_READ_ACQUIRED:
case SEQ_STATE_CONTENDED:
/* broken lock sequence, discard it */
ls->discard = 1;
bad_hist[BROKEN_CONTENDED]++;
list_del(&seq->list);
free(seq);
goto end;
default:
BUG_ON("Unknown state of lock sequence found!\n");
break;
}
seq->state = SEQ_STATE_CONTENDED;
ls->nr_contended++;
ls->avg_wait_time = ls->wait_time_total/ls->nr_contended;
seq->prev_event_time = sample->time;
end:
return 0;
}
static int report_lock_release_event(struct perf_evsel *evsel,
struct perf_sample *sample)
{
void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
const char *name = perf_evsel__strval(evsel, sample, "name");
u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
memcpy(&addr, &tmp, sizeof(void *));
ls = lock_stat_findnew(addr, name);
if (!ls)
return -ENOMEM;
if (ls->discard)
return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
return -ENOMEM;
seq = get_seq(ts, addr);
if (!seq)
return -ENOMEM;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
goto end;
case SEQ_STATE_ACQUIRED:
break;
case SEQ_STATE_READ_ACQUIRED:
seq->read_count--;
BUG_ON(seq->read_count < 0);
if (!seq->read_count) {
ls->nr_release++;
goto end;
}
break;
case SEQ_STATE_ACQUIRING:
case SEQ_STATE_CONTENDED:
case SEQ_STATE_RELEASED:
/* broken lock sequence, discard it */
ls->discard = 1;
bad_hist[BROKEN_RELEASE]++;
goto free_seq;
default:
BUG_ON("Unknown state of lock sequence found!\n");
break;
}
ls->nr_release++;
free_seq:
list_del(&seq->list);
free(seq);
end:
return 0;
}
/* lock oriented handlers */
/* TODO: handlers for CPU oriented, thread oriented */
static struct trace_lock_handler report_lock_ops = {
.acquire_event = report_lock_acquire_event,
.acquired_event = report_lock_acquired_event,
.contended_event = report_lock_contended_event,
.release_event = report_lock_release_event,
};
static struct trace_lock_handler *trace_handler;
static int perf_evsel__process_lock_acquire(struct perf_evsel *evsel,
struct perf_sample *sample)
{
if (trace_handler->acquire_event)
return trace_handler->acquire_event(evsel, sample);
return 0;
}
static int perf_evsel__process_lock_acquired(struct perf_evsel *evsel,
struct perf_sample *sample)
{
if (trace_handler->acquired_event)
return trace_handler->acquired_event(evsel, sample);
return 0;
}
static int perf_evsel__process_lock_contended(struct perf_evsel *evsel,
struct perf_sample *sample)
{
if (trace_handler->contended_event)
return trace_handler->contended_event(evsel, sample);
return 0;
}
static int perf_evsel__process_lock_release(struct perf_evsel *evsel,
struct perf_sample *sample)
{
if (trace_handler->release_event)
return trace_handler->release_event(evsel, sample);
return 0;
}
static void print_bad_events(int bad, int total)
{
/* Output for debug, this have to be removed */
int i;
const char *name[4] =
{ "acquire", "acquired", "contended", "release" };
pr_info("\n=== output for debug===\n\n");
pr_info("bad: %d, total: %d\n", bad, total);
pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100);
pr_info("histogram of events caused bad sequence\n");
for (i = 0; i < BROKEN_MAX; i++)
pr_info(" %10s: %d\n", name[i], bad_hist[i]);
}
/* TODO: various way to print, coloring, nano or milli sec */
static void print_result(void)
{
struct lock_stat *st;
char cut_name[20];
int bad, total;
pr_info("%20s ", "Name");
pr_info("%10s ", "acquired");
pr_info("%10s ", "contended");
pr_info("%15s ", "avg wait (ns)");
pr_info("%15s ", "total wait (ns)");
pr_info("%15s ", "max wait (ns)");
pr_info("%15s ", "min wait (ns)");
pr_info("\n\n");
bad = total = 0;
while ((st = pop_from_result())) {
total++;
if (st->discard) {
bad++;
continue;
}
bzero(cut_name, 20);
if (strlen(st->name) < 16) {
/* output raw name */
pr_info("%20s ", st->name);
} else {
strncpy(cut_name, st->name, 16);
cut_name[16] = '.';
cut_name[17] = '.';
cut_name[18] = '.';
cut_name[19] = '\0';
/* cut off name for saving output style */
pr_info("%20s ", cut_name);
}
pr_info("%10u ", st->nr_acquired);
pr_info("%10u ", st->nr_contended);
pr_info("%15" PRIu64 " ", st->avg_wait_time);
pr_info("%15" PRIu64 " ", st->wait_time_total);
pr_info("%15" PRIu64 " ", st->wait_time_max);
pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ?
0 : st->wait_time_min);
pr_info("\n");
}
print_bad_events(bad, total);
}
static bool info_threads, info_map;
static void dump_threads(void)
{
struct thread_stat *st;
struct rb_node *node;
struct thread *t;
pr_info("%10s: comm\n", "Thread ID");
node = rb_first(&thread_stats);
while (node) {
st = container_of(node, struct thread_stat, rb);
t = perf_session__findnew(session, st->tid);
pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
node = rb_next(node);
thread__put(t);
};
}
static void dump_map(void)
{
unsigned int i;
struct lock_stat *st;
pr_info("Address of instance: name of class\n");
for (i = 0; i < LOCKHASH_SIZE; i++) {
list_for_each_entry(st, &lockhash_table[i], hash_entry) {
pr_info(" %p: %s\n", st->addr, st->name);
}
}
}
static int dump_info(void)
{
int rc = 0;
if (info_threads)
dump_threads();
else if (info_map)
dump_map();
else {
rc = -1;
pr_err("Unknown type of information\n");
}
return rc;
}
typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
struct perf_sample *sample);
static int process_sample_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
int err = 0;
struct thread *thread = machine__findnew_thread(machine, sample->pid,
sample->tid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;
err = f(evsel, sample);
}
thread__put(thread);
return err;
}
static void sort_result(void)
{
unsigned int i;
struct lock_stat *st;
for (i = 0; i < LOCKHASH_SIZE; i++) {
list_for_each_entry(st, &lockhash_table[i], hash_entry) {
insert_to_result(st, compare);
}
}
}
static const struct perf_evsel_str_handler lock_tracepoints[] = {
{ "lock:lock_acquire", perf_evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */
{ "lock:lock_acquired", perf_evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
{ "lock:lock_contended", perf_evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
{ "lock:lock_release", perf_evsel__process_lock_release, }, /* CONFIG_LOCKDEP */
};
static bool force;
static int __cmd_report(bool display_info)
{
int err = -EINVAL;
struct perf_tool eops = {
.sample = process_sample_event,
.comm = perf_event__process_comm,
.ordered_events = true,
};
struct perf_data_file file = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
.force = force,
};
session = perf_session__new(&file, false, &eops);
if (!session) {
pr_err("Initializing perf session failed\n");
return -1;
}
symbol__init(&session->header.env);
if (!perf_session__has_traces(session, "lock record"))
goto out_delete;
if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
pr_err("Initializing perf session tracepoint handlers failed\n");
goto out_delete;
}
if (select_key())
goto out_delete;
err = perf_session__process_events(session);
if (err)
goto out_delete;
setup_pager();
if (display_info) /* used for info subcommand */
err = dump_info();
else {
sort_result();
print_result();
}
out_delete:
perf_session__delete(session);
return err;
}
static int __cmd_record(int argc, const char **argv)
{
const char *record_args[] = {
"record", "-R", "-m", "1024", "-c", "1",
};
unsigned int rec_argc, i, j, ret;
const char **rec_argv;
for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
if (!is_valid_tracepoint(lock_tracepoints[i].name)) {
pr_err("tracepoint %s is not enabled. "
"Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
lock_tracepoints[i].name);
return 1;
}
}
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
/* factor of 2 is for -e in front of each tracepoint */
rec_argc += 2 * ARRAY_SIZE(lock_tracepoints);
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = strdup(record_args[i]);
for (j = 0; j < ARRAY_SIZE(lock_tracepoints); j++) {
rec_argv[i++] = "-e";
rec_argv[i++] = strdup(lock_tracepoints[j].name);
}
for (j = 1; j < (unsigned int)argc; j++, i++)
rec_argv[i] = argv[j];
BUG_ON(i != rec_argc);
ret = cmd_record(i, rec_argv, NULL);
free(rec_argv);
return ret;
}
int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused)
{
const struct option info_options[] = {
OPT_BOOLEAN('t', "threads", &info_threads,
"dump thread list in perf.data"),
OPT_BOOLEAN('m', "map", &info_map,
"map of lock instances (address:name table)"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_END()
};
const struct option lock_options[] = {
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
OPT_END()
};
const struct option report_options[] = {
OPT_STRING('k', "key", &sort_key, "acquired",
"key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
/* TODO: type */
OPT_END()
};
const char * const info_usage[] = {
"perf lock info [<options>]",
NULL
};
const char *const lock_subcommands[] = { "record", "report", "script",
"info", NULL };
const char *lock_usage[] = {
NULL,
NULL
};
const char * const report_usage[] = {
"perf lock report [<options>]",
NULL
};
unsigned int i;
int rc = 0;
for (i = 0; i < LOCKHASH_SIZE; i++)
INIT_LIST_HEAD(lockhash_table + i);
argc = parse_options_subcommand(argc, argv, lock_options, lock_subcommands,
lock_usage, PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
usage_with_options(lock_usage, lock_options);
if (!strncmp(argv[0], "rec", 3)) {
return __cmd_record(argc, argv);
} else if (!strncmp(argv[0], "report", 6)) {
trace_handler = &report_lock_ops;
if (argc) {
argc = parse_options(argc, argv,
report_options, report_usage, 0);
if (argc)
usage_with_options(report_usage, report_options);
}
rc = __cmd_report(false);
} else if (!strcmp(argv[0], "script")) {
/* Aliased to 'perf script' */
return cmd_script(argc, argv, prefix);
} else if (!strcmp(argv[0], "info")) {
if (argc) {
argc = parse_options(argc, argv,
info_options, info_usage, 0);
if (argc)
usage_with_options(info_usage, info_options);
}
/* recycling report_lock_ops */
trace_handler = &report_lock_ops;
rc = __cmd_report(true);
} else {
usage_with_options(lock_usage, lock_options);
}
return rc;
}
| gpl-2.0 |
rupesh1mb/linux | drivers/media/rc/ir-sony-decoder.c | 544 | 4992 | /* ir-sony-decoder.c - handle Sony IR Pulse/Space protocol
*
* Copyright (C) 2010 by David Härdeman <david@hardeman.nu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/bitrev.h>
#include <linux/module.h>
#include "rc-core-priv.h"
#define SONY_UNIT 600000 /* ns */
#define SONY_HEADER_PULSE (4 * SONY_UNIT)
#define SONY_HEADER_SPACE (1 * SONY_UNIT)
#define SONY_BIT_0_PULSE (1 * SONY_UNIT)
#define SONY_BIT_1_PULSE (2 * SONY_UNIT)
#define SONY_BIT_SPACE (1 * SONY_UNIT)
#define SONY_TRAILER_SPACE (10 * SONY_UNIT) /* minimum */
enum sony_state {
STATE_INACTIVE,
STATE_HEADER_SPACE,
STATE_BIT_PULSE,
STATE_BIT_SPACE,
STATE_FINISHED,
};
/**
* ir_sony_decode() - Decode one Sony pulse or space
* @dev: the struct rc_dev descriptor of the device
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct sony_dec *data = &dev->raw->sony;
enum rc_type protocol;
u32 scancode;
u8 device, subdevice, function;
if (!(dev->enabled_protocols &
(RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20)))
return 0;
if (!is_timing_event(ev)) {
if (ev.reset)
data->state = STATE_INACTIVE;
return 0;
}
if (!geq_margin(ev.duration, SONY_UNIT, SONY_UNIT / 2))
goto out;
IR_dprintk(2, "Sony decode started at state %d (%uus %s)\n",
data->state, TO_US(ev.duration), TO_STR(ev.pulse));
switch (data->state) {
case STATE_INACTIVE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, SONY_HEADER_PULSE, SONY_UNIT / 2))
break;
data->count = 0;
data->state = STATE_HEADER_SPACE;
return 0;
case STATE_HEADER_SPACE:
if (ev.pulse)
break;
if (!eq_margin(ev.duration, SONY_HEADER_SPACE, SONY_UNIT / 2))
break;
data->state = STATE_BIT_PULSE;
return 0;
case STATE_BIT_PULSE:
if (!ev.pulse)
break;
data->bits <<= 1;
if (eq_margin(ev.duration, SONY_BIT_1_PULSE, SONY_UNIT / 2))
data->bits |= 1;
else if (!eq_margin(ev.duration, SONY_BIT_0_PULSE, SONY_UNIT / 2))
break;
data->count++;
data->state = STATE_BIT_SPACE;
return 0;
case STATE_BIT_SPACE:
if (ev.pulse)
break;
if (!geq_margin(ev.duration, SONY_BIT_SPACE, SONY_UNIT / 2))
break;
decrease_duration(&ev, SONY_BIT_SPACE);
if (!geq_margin(ev.duration, SONY_UNIT, SONY_UNIT / 2)) {
data->state = STATE_BIT_PULSE;
return 0;
}
data->state = STATE_FINISHED;
/* Fall through */
case STATE_FINISHED:
if (ev.pulse)
break;
if (!geq_margin(ev.duration, SONY_TRAILER_SPACE, SONY_UNIT / 2))
break;
switch (data->count) {
case 12:
if (!(dev->enabled_protocols & RC_BIT_SONY12))
goto finish_state_machine;
device = bitrev8((data->bits << 3) & 0xF8);
subdevice = 0;
function = bitrev8((data->bits >> 4) & 0xFE);
protocol = RC_TYPE_SONY12;
break;
case 15:
if (!(dev->enabled_protocols & RC_BIT_SONY15))
goto finish_state_machine;
device = bitrev8((data->bits >> 0) & 0xFF);
subdevice = 0;
function = bitrev8((data->bits >> 7) & 0xFE);
protocol = RC_TYPE_SONY15;
break;
case 20:
if (!(dev->enabled_protocols & RC_BIT_SONY20))
goto finish_state_machine;
device = bitrev8((data->bits >> 5) & 0xF8);
subdevice = bitrev8((data->bits >> 0) & 0xFF);
function = bitrev8((data->bits >> 12) & 0xFE);
protocol = RC_TYPE_SONY20;
break;
default:
IR_dprintk(1, "Sony invalid bitcount %u\n", data->count);
goto out;
}
scancode = device << 16 | subdevice << 8 | function;
IR_dprintk(1, "Sony(%u) scancode 0x%05x\n", data->count, scancode);
rc_keydown(dev, protocol, scancode, 0);
goto finish_state_machine;
}
out:
IR_dprintk(1, "Sony decode failed at state %d (%uus %s)\n",
data->state, TO_US(ev.duration), TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
finish_state_machine:
data->state = STATE_INACTIVE;
return 0;
}
static struct ir_raw_handler sony_handler = {
.protocols = RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20,
.decode = ir_sony_decode,
};
static int __init ir_sony_decode_init(void)
{
ir_raw_handler_register(&sony_handler);
printk(KERN_INFO "IR Sony protocol handler initialized\n");
return 0;
}
static void __exit ir_sony_decode_exit(void)
{
ir_raw_handler_unregister(&sony_handler);
}
module_init(ir_sony_decode_init);
module_exit(ir_sony_decode_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
MODULE_DESCRIPTION("Sony IR protocol decoder");
| gpl-2.0 |
Asderdd/android_kernel_google_msm8952 | drivers/acpi/acpica/rsdump.c | 2080 | 14619 | /*******************************************************************************
*
* Module Name: rsdump - Functions to display the resource structures.
*
******************************************************************************/
/*
* Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acresrc.h"
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsdump")
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
/* Local prototypes */
static void acpi_rs_out_string(char *title, char *value);
static void acpi_rs_out_integer8(char *title, u8 value);
static void acpi_rs_out_integer16(char *title, u16 value);
static void acpi_rs_out_integer32(char *title, u32 value);
static void acpi_rs_out_integer64(char *title, u64 value);
static void acpi_rs_out_title(char *title);
static void acpi_rs_dump_byte_list(u16 length, u8 *data);
static void acpi_rs_dump_word_list(u16 length, u16 *data);
static void acpi_rs_dump_dword_list(u8 length, u32 *data);
static void acpi_rs_dump_short_byte_list(u8 length, u8 *data);
static void
acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source);
static void acpi_rs_dump_address_common(union acpi_resource_data *resource);
static void
acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
/*******************************************************************************
*
* FUNCTION: acpi_rs_dump_descriptor
*
* PARAMETERS: resource - Buffer containing the resource
* table - Table entry to decode the resource
*
* RETURN: None
*
* DESCRIPTION: Dump a resource descriptor based on a dump table entry.
*
******************************************************************************/
static void
acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
{
u8 *target = NULL;
u8 *previous_target;
char *name;
u8 count;
/* First table entry must contain the table length (# of table entries) */
count = table->offset;
while (count) {
previous_target = target;
target = ACPI_ADD_PTR(u8, resource, table->offset);
name = table->name;
switch (table->opcode) {
case ACPI_RSD_TITLE:
/*
* Optional resource title
*/
if (table->name) {
acpi_os_printf("%s Resource\n", name);
}
break;
/* Strings */
case ACPI_RSD_LITERAL:
acpi_rs_out_string(name,
ACPI_CAST_PTR(char, table->pointer));
break;
case ACPI_RSD_STRING:
acpi_rs_out_string(name, ACPI_CAST_PTR(char, target));
break;
/* Data items, 8/16/32/64 bit */
case ACPI_RSD_UINT8:
if (table->pointer) {
acpi_rs_out_string(name, ACPI_CAST_PTR(char,
table->
pointer
[*target]));
} else {
acpi_rs_out_integer8(name, ACPI_GET8(target));
}
break;
case ACPI_RSD_UINT16:
acpi_rs_out_integer16(name, ACPI_GET16(target));
break;
case ACPI_RSD_UINT32:
acpi_rs_out_integer32(name, ACPI_GET32(target));
break;
case ACPI_RSD_UINT64:
acpi_rs_out_integer64(name, ACPI_GET64(target));
break;
/* Flags: 1-bit and 2-bit flags supported */
case ACPI_RSD_1BITFLAG:
acpi_rs_out_string(name, ACPI_CAST_PTR(char,
table->
pointer[*target &
0x01]));
break;
case ACPI_RSD_2BITFLAG:
acpi_rs_out_string(name, ACPI_CAST_PTR(char,
table->
pointer[*target &
0x03]));
break;
case ACPI_RSD_3BITFLAG:
acpi_rs_out_string(name, ACPI_CAST_PTR(char,
table->
pointer[*target &
0x07]));
break;
case ACPI_RSD_SHORTLIST:
/*
* Short byte list (single line output) for DMA and IRQ resources
* Note: The list length is obtained from the previous table entry
*/
if (previous_target) {
acpi_rs_out_title(name);
acpi_rs_dump_short_byte_list(*previous_target,
target);
}
break;
case ACPI_RSD_SHORTLISTX:
/*
* Short byte list (single line output) for GPIO vendor data
* Note: The list length is obtained from the previous table entry
*/
if (previous_target) {
acpi_rs_out_title(name);
acpi_rs_dump_short_byte_list(*previous_target,
*
(ACPI_CAST_INDIRECT_PTR
(u8, target)));
}
break;
case ACPI_RSD_LONGLIST:
/*
* Long byte list for Vendor resource data
* Note: The list length is obtained from the previous table entry
*/
if (previous_target) {
acpi_rs_dump_byte_list(ACPI_GET16
(previous_target),
target);
}
break;
case ACPI_RSD_DWORDLIST:
/*
* Dword list for Extended Interrupt resources
* Note: The list length is obtained from the previous table entry
*/
if (previous_target) {
acpi_rs_dump_dword_list(*previous_target,
ACPI_CAST_PTR(u32,
target));
}
break;
case ACPI_RSD_WORDLIST:
/*
* Word list for GPIO Pin Table
* Note: The list length is obtained from the previous table entry
*/
if (previous_target) {
acpi_rs_dump_word_list(*previous_target,
*(ACPI_CAST_INDIRECT_PTR
(u16, target)));
}
break;
case ACPI_RSD_ADDRESS:
/*
* Common flags for all Address resources
*/
acpi_rs_dump_address_common(ACPI_CAST_PTR
(union acpi_resource_data,
target));
break;
case ACPI_RSD_SOURCE:
/*
* Optional resource_source for Address resources
*/
acpi_rs_dump_resource_source(ACPI_CAST_PTR
(struct
acpi_resource_source,
target));
break;
default:
acpi_os_printf("**** Invalid table opcode [%X] ****\n",
table->opcode);
return;
}
table++;
count--;
}
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_dump_resource_source
*
* PARAMETERS: resource_source - Pointer to a Resource Source struct
*
* RETURN: None
*
* DESCRIPTION: Common routine for dumping the optional resource_source and the
* corresponding resource_source_index.
*
******************************************************************************/
static void
acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source)
{
ACPI_FUNCTION_ENTRY();
if (resource_source->index == 0xFF) {
return;
}
acpi_rs_out_integer8("Resource Source Index", resource_source->index);
acpi_rs_out_string("Resource Source",
resource_source->string_ptr ?
resource_source->string_ptr : "[Not Specified]");
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_dump_address_common
*
* PARAMETERS: resource - Pointer to an internal resource descriptor
*
* RETURN: None
*
* DESCRIPTION: Dump the fields that are common to all Address resource
* descriptors
*
******************************************************************************/
static void acpi_rs_dump_address_common(union acpi_resource_data *resource)
{
ACPI_FUNCTION_ENTRY();
/* Decode the type-specific flags */
switch (resource->address.resource_type) {
case ACPI_MEMORY_RANGE:
acpi_rs_dump_descriptor(resource, acpi_rs_dump_memory_flags);
break;
case ACPI_IO_RANGE:
acpi_rs_dump_descriptor(resource, acpi_rs_dump_io_flags);
break;
case ACPI_BUS_NUMBER_RANGE:
acpi_rs_out_string("Resource Type", "Bus Number Range");
break;
default:
acpi_rs_out_integer8("Resource Type",
(u8) resource->address.resource_type);
break;
}
/* Decode the general flags */
acpi_rs_dump_descriptor(resource, acpi_rs_dump_general_flags);
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_dump_resource_list
*
* PARAMETERS: resource_list - Pointer to a resource descriptor list
*
* RETURN: None
*
* DESCRIPTION: Dispatches the structure to the correct dump routine.
*
******************************************************************************/
void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
{
u32 count = 0;
u32 type;
ACPI_FUNCTION_ENTRY();
/* Check if debug output enabled */
if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
return;
}
/* Walk list and dump all resource descriptors (END_TAG terminates) */
do {
acpi_os_printf("\n[%02X] ", count);
count++;
/* Validate Type before dispatch */
type = resource_list->type;
if (type > ACPI_RESOURCE_TYPE_MAX) {
acpi_os_printf
("Invalid descriptor type (%X) in resource list\n",
resource_list->type);
return;
}
/* Sanity check the length. It must not be zero, or we loop forever */
if (!resource_list->length) {
acpi_os_printf
("Invalid zero length descriptor in resource list\n");
return;
}
/* Dump the resource descriptor */
if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
acpi_rs_dump_descriptor(&resource_list->data,
acpi_gbl_dump_serial_bus_dispatch
[resource_list->data.
common_serial_bus.type]);
} else {
acpi_rs_dump_descriptor(&resource_list->data,
acpi_gbl_dump_resource_dispatch
[type]);
}
/* Point to the next resource structure */
resource_list = ACPI_NEXT_RESOURCE(resource_list);
/* Exit when END_TAG descriptor is reached */
} while (type != ACPI_RESOURCE_TYPE_END_TAG);
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_dump_irq_list
*
* PARAMETERS: route_table - Pointer to the routing table to dump.
*
* RETURN: None
*
* DESCRIPTION: Print IRQ routing table
*
******************************************************************************/
void acpi_rs_dump_irq_list(u8 * route_table)
{
struct acpi_pci_routing_table *prt_element;
u8 count;
ACPI_FUNCTION_ENTRY();
/* Check if debug output enabled */
if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
return;
}
prt_element = ACPI_CAST_PTR(struct acpi_pci_routing_table, route_table);
/* Dump all table elements, Exit on zero length element */
for (count = 0; prt_element->length; count++) {
acpi_os_printf("\n[%02X] PCI IRQ Routing Table Package\n",
count);
acpi_rs_dump_descriptor(prt_element, acpi_rs_dump_prt);
prt_element = ACPI_ADD_PTR(struct acpi_pci_routing_table,
prt_element, prt_element->length);
}
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_out*
*
* PARAMETERS: title - Name of the resource field
* value - Value of the resource field
*
* RETURN: None
*
* DESCRIPTION: Miscellaneous helper functions to consistently format the
* output of the resource dump routines
*
******************************************************************************/
static void acpi_rs_out_string(char *title, char *value)
{
acpi_os_printf("%27s : %s", title, value);
if (!*value) {
acpi_os_printf("[NULL NAMESTRING]");
}
acpi_os_printf("\n");
}
static void acpi_rs_out_integer8(char *title, u8 value)
{
acpi_os_printf("%27s : %2.2X\n", title, value);
}
static void acpi_rs_out_integer16(char *title, u16 value)
{
acpi_os_printf("%27s : %4.4X\n", title, value);
}
static void acpi_rs_out_integer32(char *title, u32 value)
{
acpi_os_printf("%27s : %8.8X\n", title, value);
}
static void acpi_rs_out_integer64(char *title, u64 value)
{
acpi_os_printf("%27s : %8.8X%8.8X\n", title, ACPI_FORMAT_UINT64(value));
}
static void acpi_rs_out_title(char *title)
{
acpi_os_printf("%27s : ", title);
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_dump*List
*
* PARAMETERS: length - Number of elements in the list
* data - Start of the list
*
* RETURN: None
*
* DESCRIPTION: Miscellaneous functions to dump lists of raw data
*
******************************************************************************/
static void acpi_rs_dump_byte_list(u16 length, u8 * data)
{
u8 i;
for (i = 0; i < length; i++) {
acpi_os_printf("%25s%2.2X : %2.2X\n", "Byte", i, data[i]);
}
}
static void acpi_rs_dump_short_byte_list(u8 length, u8 * data)
{
u8 i;
for (i = 0; i < length; i++) {
acpi_os_printf("%X ", data[i]);
}
acpi_os_printf("\n");
}
static void acpi_rs_dump_dword_list(u8 length, u32 * data)
{
u8 i;
for (i = 0; i < length; i++) {
acpi_os_printf("%25s%2.2X : %8.8X\n", "Dword", i, data[i]);
}
}
static void acpi_rs_dump_word_list(u16 length, u16 *data)
{
u16 i;
for (i = 0; i < length; i++) {
acpi_os_printf("%25s%2.2X : %4.4X\n", "Word", i, data[i]);
}
}
#endif
| gpl-2.0 |
lyfkevin/lge-kernel-iproj | drivers/gpu/drm/i915/i915_gem_tiling.c | 2336 | 15145 | /*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include "linux/string.h"
#include "linux/bitops.h"
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
/** @file i915_gem_tiling.c
*
* Support for managing tiling state of buffer objects.
*
* The idea behind tiling is to increase cache hit rates by rearranging
* pixel data so that a group of pixel accesses are in the same cacheline.
* Performance improvement from doing this on the back/depth buffer are on
* the order of 30%.
*
* Intel architectures make this somewhat more complicated, though, by
* adjustments made to addressing of data when the memory is in interleaved
* mode (matched pairs of DIMMS) to improve memory bandwidth.
* For interleaved memory, the CPU sends every sequential 64 bytes
* to an alternate memory channel so it can get the bandwidth from both.
*
* The GPU also rearranges its accesses for increased bandwidth to interleaved
* memory, and it matches what the CPU does for non-tiled. However, when tiled
* it does it a little differently, since one walks addresses not just in the
* X direction but also Y. So, along with alternating channels when bit
* 6 of the address flips, it also alternates when other bits flip -- Bits 9
* (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
* are common to both the 915 and 965-class hardware.
*
* The CPU also sometimes XORs in higher bits as well, to improve
* bandwidth doing strided access like we do so frequently in graphics. This
* is called "Channel XOR Randomization" in the MCH documentation. The result
* is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
* decode.
*
* All of this bit 6 XORing has an effect on our memory management,
* as we need to make sure that the 3d driver can correctly address object
* contents.
*
* If we don't have interleaved memory, all tiling is safe and no swizzling is
* required.
*
* When bit 17 is XORed in, we simply refuse to tile at all. Bit
* 17 is not just a page offset, so as we page an objet out and back in,
* individual pages in it will have different bit 17 addresses, resulting in
* each 64 bytes being swapped with its neighbor!
*
* Otherwise, if interleaved, we have to tell the 3d driver what the address
* swizzling it needs to do is, since it's writing with the CPU to the pages
* (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
* pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
* required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
* to match what the GPU expects.
*/
/**
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (INTEL_INFO(dev)->gen >= 5) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if (IS_GEN2(dev)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (IS_MOBILE(dev)) {
uint32_t dcc;
/* On mobile 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
* 9 for Y tiled. The CPU's interleave is independent, and
* can be based on either bit 11 (haven't seen this yet) or
* bit 17 (common).
*/
dcc = I915_READ(DCC);
switch (dcc & DCC_ADDRESSING_MODE_MASK) {
case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
break;
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
if (dcc & DCC_CHANNEL_XOR_DISABLE) {
/* This is the base swizzling by the GPU for
* tiled buffers.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
/* Bit 11 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
} else {
/* Bit 17 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
swizzle_y = I915_BIT_6_SWIZZLE_9_17;
}
break;
}
if (dcc == 0xffffffff) {
DRM_ERROR("Couldn't read from MCHBAR. "
"Disabling tiling.\n");
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
} else {
/* The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
* (interleaving) on as much memory as it can, and the GPU
* will additionally sometimes enable different bit 6
* swizzling for tiled objects from the CPU.
*
* Here's what I found on the G965:
* slot fill memory size swizzling
* 0A 0B 1A 1B 1-ch 2-ch
* 512 0 0 0 512 0 O
* 512 0 512 0 16 1008 X
* 512 0 0 512 16 1008 X
* 0 512 0 512 16 1008 X
* 1024 1024 1024 0 2048 1024 O
*
* We could probably detect this based on either the DRB
* matching, which was the case for the swizzling required in
* the table above, or from the 1-ch value being less than
* the minimum size of a rank.
*/
if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
}
dev_priv->mm.bit_6_swizzle_x = swizzle_x;
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
/* Check pitch constriants for all chips & tiling formats */
static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
int tile_width;
/* Linear is always fine */
if (tiling_mode == I915_TILING_NONE)
return true;
if (IS_GEN2(dev) ||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
tile_width = 128;
else
tile_width = 512;
/* check maximum stride & object size */
if (INTEL_INFO(dev)->gen >= 4) {
/* i965 stores the end address of the gtt mapping in the fence
* reg, so dont bother to check the size */
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else {
if (stride > 8192)
return false;
if (IS_GEN3(dev)) {
if (size > I830_FENCE_MAX_SIZE_VAL << 20)
return false;
} else {
if (size > I830_FENCE_MAX_SIZE_VAL << 19)
return false;
}
}
/* 965+ just needs multiples of tile width */
if (INTEL_INFO(dev)->gen >= 4) {
if (stride & (tile_width - 1))
return false;
return true;
}
/* Pre-965 needs power of two tile widths */
if (stride < tile_width)
return false;
if (stride & (stride - 1))
return false;
return true;
}
/* Is the current GTT allocation valid for the change in tiling? */
static bool
i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
{
u32 size;
if (tiling_mode == I915_TILING_NONE)
return true;
if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true;
if (INTEL_INFO(obj->base.dev)->gen == 3) {
if (obj->gtt_offset & ~I915_FENCE_START_MASK)
return false;
} else {
if (obj->gtt_offset & ~I830_FENCE_START_MASK)
return false;
}
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
if (INTEL_INFO(obj->base.dev)->gen == 3)
size = 1024*1024;
else
size = 512*1024;
while (size < obj->base.size)
size <<= 1;
if (obj->gtt_space->size != size)
return false;
if (obj->gtt_offset & (size - 1))
return false;
return true;
}
/**
* Sets the tiling mode of an object, returning the required swizzling of
* bit 6 of addresses in the object.
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int ret = 0;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL)
return -ENOENT;
if (!i915_tiling_ok(dev,
args->stride, obj->base.size, args->tiling_mode)) {
drm_gem_object_unreference_unlocked(&obj->base);
return -EINVAL;
}
if (obj->pin_count) {
drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
}
if (args->tiling_mode == I915_TILING_NONE) {
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
else
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
* and we use the pread/pwrite bit17 paths to swizzle for it.
* If there was a user that was relying on the swizzle
* information for drm_intel_bo_map()ed reads/writes this would
* break it, but we don't have any of those.
*/
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
/* If we can't handle the swizzling, make it untiled. */
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
args->tiling_mode = I915_TILING_NONE;
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
args->stride = 0;
}
}
mutex_lock(&dev->struct_mutex);
if (args->tiling_mode != obj->tiling_mode ||
args->stride != obj->stride) {
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
* need to ensure that any fence register is cleared.
*/
i915_gem_release_mmap(obj);
obj->map_and_fenceable =
obj->gtt_space == NULL ||
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
u32 unfenced_alignment =
i915_gem_get_unfenced_gtt_alignment(dev,
obj->base.size,
args->tiling_mode);
if (obj->gtt_offset & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
}
if (ret == 0) {
obj->tiling_changed = true;
obj->tiling_mode = args->tiling_mode;
obj->stride = args->stride;
}
}
/* we have to maintain this existing ABI... */
args->stride = obj->stride;
args->tiling_mode = obj->tiling_mode;
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/**
* Returns the current tiling mode and required bit 6 swizzling for the object.
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL)
return -ENOENT;
mutex_lock(&dev->struct_mutex);
args->tiling_mode = obj->tiling_mode;
switch (obj->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
break;
case I915_TILING_Y:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
break;
case I915_TILING_NONE:
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
break;
default:
DRM_ERROR("unknown tiling mode\n");
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return 0;
}
/**
* Swap every 64 bytes of this page around, to account for it having a new
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
static void
i915_gem_swizzle_page(struct page *page)
{
char temp[64];
char *vaddr;
int i;
vaddr = kmap(page);
for (i = 0; i < PAGE_SIZE; i += 128) {
memcpy(temp, &vaddr[i], 64);
memcpy(&vaddr[i], &vaddr[i + 64], 64);
memcpy(&vaddr[i + 64], temp, 64);
}
kunmap(page);
}
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return;
if (obj->bit_17 == NULL)
return;
for (i = 0; i < page_count; i++) {
char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(obj->pages[i]);
set_page_dirty(obj->pages[i]);
}
}
}
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return;
if (obj->bit_17 == NULL) {
obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
sizeof(long), GFP_KERNEL);
if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
return;
}
}
for (i = 0; i < page_count; i++) {
if (page_to_phys(obj->pages[i]) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
}
}
| gpl-2.0 |
motley-git/TF201-Kernel-Lite | drivers/media/video/videobuf2-memops.c | 2592 | 6305 | /*
* videobuf2-memops.c - generic memory handling routines for videobuf2
*
* Copyright (C) 2010 Samsung Electronics
*
* Author: Pawel Osciak <pawel@osciak.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-memops.h>
/**
* vb2_get_vma() - acquire and lock the virtual memory area
* @vma: given virtual memory area
*
* This function attempts to acquire an area mapped in the userspace for
* the duration of a hardware operation. The area is "locked" by performing
* the same set of operation that are done when process calls fork() and
* memory areas are duplicated.
*
* Returns a copy of a virtual memory region on success or NULL.
*/
struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
{
struct vm_area_struct *vma_copy;
vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
if (vma_copy == NULL)
return NULL;
if (vma->vm_ops && vma->vm_ops->open)
vma->vm_ops->open(vma);
if (vma->vm_file)
get_file(vma->vm_file);
memcpy(vma_copy, vma, sizeof(*vma));
vma_copy->vm_mm = NULL;
vma_copy->vm_next = NULL;
vma_copy->vm_prev = NULL;
return vma_copy;
}
/**
* vb2_put_userptr() - release a userspace virtual memory area
* @vma: virtual memory region associated with the area to be released
*
* This function releases the previously acquired memory area after a hardware
* operation.
*/
void vb2_put_vma(struct vm_area_struct *vma)
{
if (!vma)
return;
if (vma->vm_file)
fput(vma->vm_file);
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
kfree(vma);
}
EXPORT_SYMBOL_GPL(vb2_put_vma);
/**
* vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
* @vaddr: starting virtual address of the area to be verified
* @size: size of the area
* @res_paddr: will return physical address for the given vaddr
* @res_vma: will return locked copy of struct vm_area for the given area
*
* This function will go through memory area of size @size mapped at @vaddr and
* verify that the underlying physical pages are contiguous. If they are
* contiguous the virtual memory area is locked and a @res_vma is filled with
* the copy and @res_pa set to the physical address of the buffer.
*
* Returns 0 on success.
*/
int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
struct vm_area_struct **res_vma, dma_addr_t *res_pa)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long offset, start, end;
unsigned long this_pfn, prev_pfn;
dma_addr_t pa = 0;
int ret = -EFAULT;
start = vaddr;
offset = start & ~PAGE_MASK;
end = start + size;
down_read(&mm->mmap_sem);
vma = find_vma(mm, start);
if (vma == NULL || vma->vm_end < end)
goto done;
for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
ret = follow_pfn(vma, start, &this_pfn);
if (ret)
goto done;
if (prev_pfn == 0)
pa = this_pfn << PAGE_SHIFT;
else if (this_pfn != prev_pfn + 1) {
ret = -EFAULT;
goto done;
}
prev_pfn = this_pfn;
}
/*
* Memory is contigous, lock vma and return to the caller
*/
*res_vma = vb2_get_vma(vma);
if (*res_vma == NULL) {
ret = -ENOMEM;
goto done;
}
*res_pa = pa + offset;
ret = 0;
done:
up_read(&mm->mmap_sem);
return ret;
}
EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
/**
* vb2_mmap_pfn_range() - map physical pages to userspace
* @vma: virtual memory region for the mapping
* @paddr: starting physical address of the memory to be mapped
* @size: size of the memory to be mapped
* @vm_ops: vm operations to be assigned to the created area
* @priv: private data to be associated with the area
*
* Returns 0 on success.
*/
int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
unsigned long size,
const struct vm_operations_struct *vm_ops,
void *priv)
{
int ret;
size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
size, vma->vm_page_prot);
if (ret) {
printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
return ret;
}
vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
vma->vm_private_data = priv;
vma->vm_ops = vm_ops;
vma->vm_ops->open(vma);
printk(KERN_DEBUG "%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
__func__, paddr, vma->vm_start, size);
return 0;
}
EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range);
/**
* vb2_common_vm_open() - increase refcount of the vma
* @vma: virtual memory region for the mapping
*
* This function adds another user to the provided vma. It expects
* struct vb2_vmarea_handler pointer in vma->vm_private_data.
*/
static void vb2_common_vm_open(struct vm_area_struct *vma)
{
struct vb2_vmarea_handler *h = vma->vm_private_data;
printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n",
__func__, h, atomic_read(h->refcount), vma->vm_start,
vma->vm_end);
atomic_inc(h->refcount);
}
/**
* vb2_common_vm_close() - decrease refcount of the vma
* @vma: virtual memory region for the mapping
*
* This function releases the user from the provided vma. It expects
* struct vb2_vmarea_handler pointer in vma->vm_private_data.
*/
static void vb2_common_vm_close(struct vm_area_struct *vma)
{
struct vb2_vmarea_handler *h = vma->vm_private_data;
printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n",
__func__, h, atomic_read(h->refcount), vma->vm_start,
vma->vm_end);
h->put(h->arg);
}
/**
* vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
* video buffers
*/
const struct vm_operations_struct vb2_common_vm_ops = {
.open = vb2_common_vm_open,
.close = vb2_common_vm_close,
};
EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
MODULE_DESCRIPTION("common memory handling routines for videobuf2");
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
bgn9000/Shun-Andromeda | drivers/i2c/busses/i2c-isch.c | 3104 | 8959 | /*
i2c-isch.c - Linux kernel driver for Intel SCH chipset SMBus
- Based on i2c-piix4.c
Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl> and
Philip Edelbrock <phil@netroedge.com>
- Intel SCH support
Copyright (c) 2007 - 2008 Jacob Jun Pan <jacob.jun.pan@intel.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Supports:
Intel SCH chipsets (AF82US15W, AF82US15L, AF82UL11L)
Note: we assume there can only be one device, with one SMBus interface.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/acpi.h>
/* SCH SMBus address offsets */
#define SMBHSTCNT (0 + sch_smba)
#define SMBHSTSTS (1 + sch_smba)
#define SMBHSTADD (4 + sch_smba) /* TSA */
#define SMBHSTCMD (5 + sch_smba)
#define SMBHSTDAT0 (6 + sch_smba)
#define SMBHSTDAT1 (7 + sch_smba)
#define SMBBLKDAT (0x20 + sch_smba)
/* Other settings */
#define MAX_TIMEOUT 500
/* I2C constants */
#define SCH_QUICK 0x00
#define SCH_BYTE 0x01
#define SCH_BYTE_DATA 0x02
#define SCH_WORD_DATA 0x03
#define SCH_BLOCK_DATA 0x05
static unsigned short sch_smba;
static struct i2c_adapter sch_adapter;
/*
* Start the i2c transaction -- the i2c_access will prepare the transaction
* and this function will execute it.
* return 0 for success and others for failure.
*/
static int sch_transaction(void)
{
int temp;
int result = 0;
int timeout = 0;
dev_dbg(&sch_adapter.dev, "Transaction (pre): CNT=%02x, CMD=%02x, "
"ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb(SMBHSTCNT),
inb(SMBHSTCMD), inb(SMBHSTADD), inb(SMBHSTDAT0),
inb(SMBHSTDAT1));
/* Make sure the SMBus host is ready to start transmitting */
temp = inb(SMBHSTSTS) & 0x0f;
if (temp) {
/* Can not be busy since we checked it in sch_access */
if (temp & 0x01) {
dev_dbg(&sch_adapter.dev, "Completion (%02x). "
"Clear...\n", temp);
}
if (temp & 0x06) {
dev_dbg(&sch_adapter.dev, "SMBus error (%02x). "
"Resetting...\n", temp);
}
outb(temp, SMBHSTSTS);
temp = inb(SMBHSTSTS) & 0x0f;
if (temp) {
dev_err(&sch_adapter.dev,
"SMBus is not ready: (%02x)\n", temp);
return -EAGAIN;
}
}
/* start the transaction by setting bit 4 */
outb(inb(SMBHSTCNT) | 0x10, SMBHSTCNT);
do {
msleep(1);
temp = inb(SMBHSTSTS) & 0x0f;
} while ((temp & 0x08) && (timeout++ < MAX_TIMEOUT));
/* If the SMBus is still busy, we give up */
if (timeout > MAX_TIMEOUT) {
dev_err(&sch_adapter.dev, "SMBus Timeout!\n");
result = -ETIMEDOUT;
}
if (temp & 0x04) {
result = -EIO;
dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be "
"locked until next hard reset. (sorry!)\n");
/* Clock stops and slave is stuck in mid-transmission */
} else if (temp & 0x02) {
result = -EIO;
dev_err(&sch_adapter.dev, "Error: no response!\n");
} else if (temp & 0x01) {
dev_dbg(&sch_adapter.dev, "Post complete!\n");
outb(temp, SMBHSTSTS);
temp = inb(SMBHSTSTS) & 0x07;
if (temp & 0x06) {
/* Completion clear failed */
dev_dbg(&sch_adapter.dev, "Failed reset at end of "
"transaction (%02x), Bus error!\n", temp);
}
} else {
result = -ENXIO;
dev_dbg(&sch_adapter.dev, "No such address.\n");
}
dev_dbg(&sch_adapter.dev, "Transaction (post): CNT=%02x, CMD=%02x, "
"ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb(SMBHSTCNT),
inb(SMBHSTCMD), inb(SMBHSTADD), inb(SMBHSTDAT0),
inb(SMBHSTDAT1));
return result;
}
/*
* This is the main access entry for i2c-sch access
* adap is i2c_adapter pointer, addr is the i2c device bus address, read_write
* (0 for read and 1 for write), size is i2c transaction type and data is the
* union of transaction for data to be transferred or data read from bus.
* return 0 for success and others for failure.
*/
static s32 sch_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data *data)
{
int i, len, temp, rc;
/* Make sure the SMBus host is not busy */
temp = inb(SMBHSTSTS) & 0x0f;
if (temp & 0x08) {
dev_dbg(&sch_adapter.dev, "SMBus busy (%02x)\n", temp);
return -EAGAIN;
}
dev_dbg(&sch_adapter.dev, "access size: %d %s\n", size,
(read_write)?"READ":"WRITE");
switch (size) {
case I2C_SMBUS_QUICK:
outb((addr << 1) | read_write, SMBHSTADD);
size = SCH_QUICK;
break;
case I2C_SMBUS_BYTE:
outb((addr << 1) | read_write, SMBHSTADD);
if (read_write == I2C_SMBUS_WRITE)
outb(command, SMBHSTCMD);
size = SCH_BYTE;
break;
case I2C_SMBUS_BYTE_DATA:
outb((addr << 1) | read_write, SMBHSTADD);
outb(command, SMBHSTCMD);
if (read_write == I2C_SMBUS_WRITE)
outb(data->byte, SMBHSTDAT0);
size = SCH_BYTE_DATA;
break;
case I2C_SMBUS_WORD_DATA:
outb((addr << 1) | read_write, SMBHSTADD);
outb(command, SMBHSTCMD);
if (read_write == I2C_SMBUS_WRITE) {
outb(data->word & 0xff, SMBHSTDAT0);
outb((data->word & 0xff00) >> 8, SMBHSTDAT1);
}
size = SCH_WORD_DATA;
break;
case I2C_SMBUS_BLOCK_DATA:
outb((addr << 1) | read_write, SMBHSTADD);
outb(command, SMBHSTCMD);
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
if (len == 0 || len > I2C_SMBUS_BLOCK_MAX)
return -EINVAL;
outb(len, SMBHSTDAT0);
for (i = 1; i <= len; i++)
outb(data->block[i], SMBBLKDAT+i-1);
}
size = SCH_BLOCK_DATA;
break;
default:
dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
return -EOPNOTSUPP;
}
dev_dbg(&sch_adapter.dev, "write size %d to 0x%04x\n", size, SMBHSTCNT);
outb((inb(SMBHSTCNT) & 0xb0) | (size & 0x7), SMBHSTCNT);
rc = sch_transaction();
if (rc) /* Error in transaction */
return rc;
if ((read_write == I2C_SMBUS_WRITE) || (size == SCH_QUICK))
return 0;
switch (size) {
case SCH_BYTE:
case SCH_BYTE_DATA:
data->byte = inb(SMBHSTDAT0);
break;
case SCH_WORD_DATA:
data->word = inb(SMBHSTDAT0) + (inb(SMBHSTDAT1) << 8);
break;
case SCH_BLOCK_DATA:
data->block[0] = inb(SMBHSTDAT0);
if (data->block[0] == 0 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
return -EPROTO;
for (i = 1; i <= data->block[0]; i++)
data->block[i] = inb(SMBBLKDAT+i-1);
break;
}
return 0;
}
static u32 sch_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA;
}
static const struct i2c_algorithm smbus_algorithm = {
.smbus_xfer = sch_access,
.functionality = sch_func,
};
static struct i2c_adapter sch_adapter = {
.owner = THIS_MODULE,
.class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
.algo = &smbus_algorithm,
};
static int __devinit smbus_sch_probe(struct platform_device *dev)
{
struct resource *res;
int retval;
res = platform_get_resource(dev, IORESOURCE_IO, 0);
if (!res)
return -EBUSY;
if (!request_region(res->start, resource_size(res), dev->name)) {
dev_err(&dev->dev, "SMBus region 0x%x already in use!\n",
sch_smba);
return -EBUSY;
}
sch_smba = res->start;
dev_dbg(&dev->dev, "SMBA = 0x%X\n", sch_smba);
/* set up the sysfs linkage to our parent device */
sch_adapter.dev.parent = &dev->dev;
snprintf(sch_adapter.name, sizeof(sch_adapter.name),
"SMBus SCH adapter at %04x", sch_smba);
retval = i2c_add_adapter(&sch_adapter);
if (retval) {
dev_err(&dev->dev, "Couldn't register adapter!\n");
release_region(res->start, resource_size(res));
sch_smba = 0;
}
return retval;
}
static int __devexit smbus_sch_remove(struct platform_device *pdev)
{
struct resource *res;
if (sch_smba) {
i2c_del_adapter(&sch_adapter);
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
release_region(res->start, resource_size(res));
sch_smba = 0;
}
return 0;
}
static struct platform_driver smbus_sch_driver = {
.driver = {
.name = "isch_smbus",
.owner = THIS_MODULE,
},
.probe = smbus_sch_probe,
.remove = __devexit_p(smbus_sch_remove),
};
static int __init i2c_sch_init(void)
{
return platform_driver_register(&smbus_sch_driver);
}
static void __exit i2c_sch_exit(void)
{
platform_driver_unregister(&smbus_sch_driver);
}
MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com>");
MODULE_DESCRIPTION("Intel SCH SMBus driver");
MODULE_LICENSE("GPL");
module_init(i2c_sch_init);
module_exit(i2c_sch_exit);
MODULE_ALIAS("platform:isch_smbus");
| gpl-2.0 |
krash86/android_kernel_google_pixel | arch/xtensa/variants/s6000/gpio.c | 3104 | 5634 | /*
* s6000 gpio driver
*
* Copyright (c) 2009 emlix GmbH
* Authors: Oskar Schirmer <oskar@scara.com>
* Johannes Weiner <hannes@cmpxchg.org>
* Daniel Gloeckner <dg@emlix.com>
*/
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <variant/hardware.h>
#define IRQ_BASE XTENSA_NR_IRQS
#define S6_GPIO_DATA 0x000
#define S6_GPIO_IS 0x404
#define S6_GPIO_IBE 0x408
#define S6_GPIO_IEV 0x40C
#define S6_GPIO_IE 0x410
#define S6_GPIO_RIS 0x414
#define S6_GPIO_MIS 0x418
#define S6_GPIO_IC 0x41C
#define S6_GPIO_AFSEL 0x420
#define S6_GPIO_DIR 0x800
#define S6_GPIO_BANK(nr) ((nr) * 0x1000)
#define S6_GPIO_MASK(nr) (4 << (nr))
#define S6_GPIO_OFFSET(nr) \
(S6_GPIO_BANK((nr) >> 3) + S6_GPIO_MASK((nr) & 7))
static int direction_input(struct gpio_chip *chip, unsigned int off)
{
writeb(0, S6_REG_GPIO + S6_GPIO_DIR + S6_GPIO_OFFSET(off));
return 0;
}
static int get(struct gpio_chip *chip, unsigned int off)
{
return readb(S6_REG_GPIO + S6_GPIO_DATA + S6_GPIO_OFFSET(off));
}
static int direction_output(struct gpio_chip *chip, unsigned int off, int val)
{
unsigned rel = S6_GPIO_OFFSET(off);
writeb(~0, S6_REG_GPIO + S6_GPIO_DIR + rel);
writeb(val ? ~0 : 0, S6_REG_GPIO + S6_GPIO_DATA + rel);
return 0;
}
static void set(struct gpio_chip *chip, unsigned int off, int val)
{
writeb(val ? ~0 : 0, S6_REG_GPIO + S6_GPIO_DATA + S6_GPIO_OFFSET(off));
}
static int to_irq(struct gpio_chip *chip, unsigned offset)
{
if (offset < 8)
return offset + IRQ_BASE;
return -EINVAL;
}
static struct gpio_chip gpiochip = {
.owner = THIS_MODULE,
.direction_input = direction_input,
.get = get,
.direction_output = direction_output,
.set = set,
.to_irq = to_irq,
.base = 0,
.ngpio = 24,
.can_sleep = 0, /* no blocking io needed */
.exported = 0, /* no exporting to userspace */
};
int s6_gpio_init(u32 afsel)
{
writeb(afsel, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_AFSEL);
writeb(afsel >> 8, S6_REG_GPIO + S6_GPIO_BANK(1) + S6_GPIO_AFSEL);
writeb(afsel >> 16, S6_REG_GPIO + S6_GPIO_BANK(2) + S6_GPIO_AFSEL);
return gpiochip_add(&gpiochip);
}
static void ack(struct irq_data *d)
{
writeb(1 << (d->irq - IRQ_BASE), S6_REG_GPIO + S6_GPIO_IC);
}
static void mask(struct irq_data *d)
{
u8 r = readb(S6_REG_GPIO + S6_GPIO_IE);
r &= ~(1 << (d->irq - IRQ_BASE));
writeb(r, S6_REG_GPIO + S6_GPIO_IE);
}
static void unmask(struct irq_data *d)
{
u8 m = readb(S6_REG_GPIO + S6_GPIO_IE);
m |= 1 << (d->irq - IRQ_BASE);
writeb(m, S6_REG_GPIO + S6_GPIO_IE);
}
static int set_type(struct irq_data *d, unsigned int type)
{
const u8 m = 1 << (d->irq - IRQ_BASE);
irq_flow_handler_t handler;
u8 reg;
if (type == IRQ_TYPE_PROBE) {
if ((readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_AFSEL) & m)
|| (readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IE) & m)
|| readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_DIR
+ S6_GPIO_MASK(irq - IRQ_BASE)))
return 0;
type = IRQ_TYPE_EDGE_BOTH;
}
reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IS);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) {
reg |= m;
handler = handle_level_irq;
} else {
reg &= ~m;
handler = handle_edge_irq;
}
writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IS);
__irq_set_handler_locked(irq, handler);
reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IEV);
if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING))
reg |= m;
else
reg &= ~m;
writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IEV);
reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IBE);
if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
reg |= m;
else
reg &= ~m;
writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IBE);
return 0;
}
static struct irq_chip gpioirqs = {
.name = "GPIO",
.irq_ack = ack,
.irq_mask = mask,
.irq_unmask = unmask,
.irq_set_type = set_type,
};
static u8 demux_masks[4];
static void demux_irqs(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
u8 *mask = irq_desc_get_handler_data(desc);
u8 pending;
int cirq;
chip->irq_mask(&desc->irq_data);
chip->irq_ack(&desc->irq_data);
pending = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_MIS) & *mask;
cirq = IRQ_BASE - 1;
while (pending) {
int n = ffs(pending);
cirq += n;
pending >>= n;
generic_handle_irq(cirq);
}
chip->irq_unmask(&desc->irq_data);
}
extern const signed char *platform_irq_mappings[XTENSA_NR_IRQS];
void __init variant_init_irq(void)
{
int irq, n;
writeb(0, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IE);
for (irq = n = 0; irq < XTENSA_NR_IRQS; irq++) {
const signed char *mapping = platform_irq_mappings[irq];
int alone = 1;
u8 mask;
if (!mapping)
continue;
for(mask = 0; *mapping != -1; mapping++)
switch (*mapping) {
case S6_INTC_GPIO(0):
mask |= 1 << 0;
break;
case S6_INTC_GPIO(1):
mask |= 1 << 1;
break;
case S6_INTC_GPIO(2):
mask |= 1 << 2;
break;
case S6_INTC_GPIO(3):
mask |= 0x1f << 3;
break;
default:
alone = 0;
}
if (mask) {
int cirq, i;
if (!alone) {
printk(KERN_ERR "chained irq chips can't share"
" parent irq %i\n", irq);
continue;
}
demux_masks[n] = mask;
cirq = IRQ_BASE - 1;
do {
i = ffs(mask);
cirq += i;
mask >>= i;
irq_set_chip(cirq, &gpioirqs);
irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
} while (mask);
irq_set_handler_data(irq, demux_masks + n);
irq_set_chained_handler(irq, demux_irqs);
if (++n == ARRAY_SIZE(demux_masks))
break;
}
}
}
| gpl-2.0 |
mastero9017/Crystal | arch/arm/mach-msm/msm_mpmctr.c | 3360 | 2286 | /* Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/smp.h>
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <mach/msm_mpmctr.h>
static void __iomem *mpm_timer_base;
uint32_t msm_mpm_get_count(void)
{
uint32_t count;
if (!mpm_timer_base)
return 0;
count = __raw_readl_no_log(mpm_timer_base);
pr_debug("mpm sclk sync:(%u)", count);
return count;
}
EXPORT_SYMBOL(msm_mpm_get_count);
static inline void msm_mpmctr_show_count(void)
{
unsigned long long t;
unsigned long nsec_rem;
t = sched_clock();
nsec_rem = do_div(t, 1000000000)/1000;
printk(KERN_INFO "mpm_counter: [%5lu.%06lu]:(%u)\n",
(unsigned long)t, nsec_rem,
msm_mpm_get_count());
}
static struct of_device_id msm_mpmctr_of_match[] = {
{.compatible = "qcom,mpm2-sleep-counter"},
{}
};
static struct platform_driver msm_mpmctr_driver = {
.driver = {
.name = "msm_mpctr",
.owner = THIS_MODULE,
.of_match_table = msm_mpmctr_of_match,
},
};
static int __init mpmctr_set_register(struct device_node *np)
{
if (of_get_address(np, 0, NULL, NULL)) {
mpm_timer_base = of_iomap(np, 0);
if (!mpm_timer_base) {
pr_err("%s: cannot map timer base\n", __func__);
return -ENOMEM;
}
}
return 0;
}
static int __init msm_mpmctr_probe(struct platform_device *pdev)
{
if (!pdev->dev.of_node)
return -ENODEV;
if (mpmctr_set_register(pdev->dev.of_node))
return -ENODEV;
msm_mpmctr_show_count();
return 0;
}
static int __init mpmctr_init(void)
{
return platform_driver_probe(&msm_mpmctr_driver, msm_mpmctr_probe);
}
module_init(mpmctr_init)
| gpl-2.0 |
FeyoMx/MDSdevKernel_a7010 | arch/um/kernel/smp.c | 3360 | 4873 | /*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <linux/percpu.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#ifdef CONFIG_SMP
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/threads.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/hardirq.h>
#include <asm/smp.h>
#include <asm/processor.h>
#include <asm/spinlock.h>
#include <kern.h>
#include <irq_user.h>
#include <os.h>
/* Per CPU bogomips and other parameters
* The only piece used here is the ipi pipe, which is set before SMP is
* started and never changed.
*/
struct cpuinfo_um cpu_data[NR_CPUS];
/* A statistic, can be a little off */
int num_reschedules_sent = 0;
/* Not changed after boot */
struct task_struct *idle_threads[NR_CPUS];
void smp_send_reschedule(int cpu)
{
os_write_file(cpu_data[cpu].ipi_pipe[1], "R", 1);
num_reschedules_sent++;
}
void smp_send_stop(void)
{
int i;
printk(KERN_INFO "Stopping all CPUs...");
for (i = 0; i < num_online_cpus(); i++) {
if (i == current_thread->cpu)
continue;
os_write_file(cpu_data[i].ipi_pipe[1], "S", 1);
}
printk(KERN_CONT "done\n");
}
static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
static cpumask_t cpu_callin_map = CPU_MASK_NONE;
static int idle_proc(void *cpup)
{
int cpu = (int) cpup, err;
err = os_pipe(cpu_data[cpu].ipi_pipe, 1, 1);
if (err < 0)
panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err);
os_set_fd_async(cpu_data[cpu].ipi_pipe[0]);
wmb();
if (cpu_test_and_set(cpu, cpu_callin_map)) {
printk(KERN_ERR "huh, CPU#%d already present??\n", cpu);
BUG();
}
while (!cpu_isset(cpu, smp_commenced_mask))
cpu_relax();
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
default_idle();
return 0;
}
static struct task_struct *idle_thread(int cpu)
{
struct task_struct *new_task;
current->thread.request.u.thread.proc = idle_proc;
current->thread.request.u.thread.arg = (void *) cpu;
new_task = fork_idle(cpu);
if (IS_ERR(new_task))
panic("copy_process failed in idle_thread, error = %ld",
PTR_ERR(new_task));
cpu_tasks[cpu] = ((struct cpu_task)
{ .pid = new_task->thread.mode.tt.extern_pid,
.task = new_task } );
idle_threads[cpu] = new_task;
panic("skas mode doesn't support SMP");
return new_task;
}
void smp_prepare_cpus(unsigned int maxcpus)
{
struct task_struct *idle;
unsigned long waittime;
int err, cpu, me = smp_processor_id();
int i;
for (i = 0; i < ncpus; ++i)
set_cpu_possible(i, true);
set_cpu_online(me, true);
cpu_set(me, cpu_callin_map);
err = os_pipe(cpu_data[me].ipi_pipe, 1, 1);
if (err < 0)
panic("CPU#0 failed to create IPI pipe, errno = %d", -err);
os_set_fd_async(cpu_data[me].ipi_pipe[0]);
for (cpu = 1; cpu < ncpus; cpu++) {
printk(KERN_INFO "Booting processor %d...\n", cpu);
idle = idle_thread(cpu);
init_idle(idle, cpu);
waittime = 200000000;
while (waittime-- && !cpu_isset(cpu, cpu_callin_map))
cpu_relax();
printk(KERN_INFO "%s\n",
cpu_isset(cpu, cpu_calling_map) ? "done" : "failed");
}
}
void smp_prepare_boot_cpu(void)
{
set_cpu_online(smp_processor_id(), true);
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
cpu_set(cpu, smp_commenced_mask);
while (!cpu_online(cpu))
mb();
return 0;
}
int setup_profiling_timer(unsigned int multiplier)
{
printk(KERN_INFO "setup_profiling_timer\n");
return 0;
}
void smp_call_function_slave(int cpu);
void IPI_handler(int cpu)
{
unsigned char c;
int fd;
fd = cpu_data[cpu].ipi_pipe[0];
while (os_read_file(fd, &c, 1) == 1) {
switch (c) {
case 'C':
smp_call_function_slave(cpu);
break;
case 'R':
scheduler_ipi();
break;
case 'S':
printk(KERN_INFO "CPU#%d stopping\n", cpu);
while (1)
pause();
break;
default:
printk(KERN_ERR "CPU#%d received unknown IPI [%c]!\n",
cpu, c);
break;
}
}
}
int hard_smp_processor_id(void)
{
return pid_to_processor_id(os_getpid());
}
static DEFINE_SPINLOCK(call_lock);
static atomic_t scf_started;
static atomic_t scf_finished;
static void (*func)(void *info);
static void *info;
void smp_call_function_slave(int cpu)
{
atomic_inc(&scf_started);
(*func)(info);
atomic_inc(&scf_finished);
}
int smp_call_function(void (*_func)(void *info), void *_info, int wait)
{
int cpus = num_online_cpus() - 1;
int i;
if (!cpus)
return 0;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
spin_lock_bh(&call_lock);
atomic_set(&scf_started, 0);
atomic_set(&scf_finished, 0);
func = _func;
info = _info;
for_each_online_cpu(i)
os_write_file(cpu_data[i].ipi_pipe[1], "C", 1);
while (atomic_read(&scf_started) != cpus)
barrier();
if (wait)
while (atomic_read(&scf_finished) != cpus)
barrier();
spin_unlock_bh(&call_lock);
return 0;
}
#endif
| gpl-2.0 |
childofthehorn/android_kernel_oneplus_msm8994 | arch/powerpc/platforms/cell/beat.c | 3360 | 5669 | /*
* Simple routines for Celleb/Beat
*
* (C) Copyright 2006-2007 TOSHIBA CORPORATION
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/rtc.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
#include <linux/reboot.h>
#include <asm/hvconsole.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include "beat_wrapper.h"
#include "beat.h"
#include "beat_interrupt.h"
static int beat_pm_poweroff_flag;
void beat_restart(char *cmd)
{
beat_shutdown_logical_partition(!beat_pm_poweroff_flag);
}
void beat_power_off(void)
{
beat_shutdown_logical_partition(0);
}
u64 beat_halt_code = 0x1000000000000000UL;
EXPORT_SYMBOL(beat_halt_code);
void beat_halt(void)
{
beat_shutdown_logical_partition(beat_halt_code);
}
int beat_set_rtc_time(struct rtc_time *rtc_time)
{
u64 tim;
tim = mktime(rtc_time->tm_year+1900,
rtc_time->tm_mon+1, rtc_time->tm_mday,
rtc_time->tm_hour, rtc_time->tm_min, rtc_time->tm_sec);
if (beat_rtc_write(tim))
return -1;
return 0;
}
void beat_get_rtc_time(struct rtc_time *rtc_time)
{
u64 tim;
if (beat_rtc_read(&tim))
tim = 0;
to_tm(tim, rtc_time);
rtc_time->tm_year -= 1900;
rtc_time->tm_mon -= 1;
}
#define BEAT_NVRAM_SIZE 4096
ssize_t beat_nvram_read(char *buf, size_t count, loff_t *index)
{
unsigned int i;
unsigned long len;
char *p = buf;
if (*index >= BEAT_NVRAM_SIZE)
return -ENODEV;
i = *index;
if (i + count > BEAT_NVRAM_SIZE)
count = BEAT_NVRAM_SIZE - i;
for (; count != 0; count -= len) {
len = count;
if (len > BEAT_NVRW_CNT)
len = BEAT_NVRW_CNT;
if (beat_eeprom_read(i, len, p))
return -EIO;
p += len;
i += len;
}
*index = i;
return p - buf;
}
ssize_t beat_nvram_write(char *buf, size_t count, loff_t *index)
{
unsigned int i;
unsigned long len;
char *p = buf;
if (*index >= BEAT_NVRAM_SIZE)
return -ENODEV;
i = *index;
if (i + count > BEAT_NVRAM_SIZE)
count = BEAT_NVRAM_SIZE - i;
for (; count != 0; count -= len) {
len = count;
if (len > BEAT_NVRW_CNT)
len = BEAT_NVRW_CNT;
if (beat_eeprom_write(i, len, p))
return -EIO;
p += len;
i += len;
}
*index = i;
return p - buf;
}
ssize_t beat_nvram_get_size(void)
{
return BEAT_NVRAM_SIZE;
}
int beat_set_xdabr(unsigned long dabr, unsigned long dabrx)
{
if (beat_set_dabr(dabr, dabrx))
return -1;
return 0;
}
int64_t beat_get_term_char(u64 vterm, u64 *len, u64 *t1, u64 *t2)
{
u64 db[2];
s64 ret;
ret = beat_get_characters_from_console(vterm, len, (u8 *)db);
if (ret == 0) {
*t1 = db[0];
*t2 = db[1];
}
return ret;
}
EXPORT_SYMBOL(beat_get_term_char);
int64_t beat_put_term_char(u64 vterm, u64 len, u64 t1, u64 t2)
{
u64 db[2];
db[0] = t1;
db[1] = t2;
return beat_put_characters_to_console(vterm, len, (u8 *)db);
}
EXPORT_SYMBOL(beat_put_term_char);
void beat_power_save(void)
{
beat_pause(0);
}
#ifdef CONFIG_KEXEC
void beat_kexec_cpu_down(int crash, int secondary)
{
beatic_deinit_IRQ();
}
#endif
static irqreturn_t beat_power_event(int virq, void *arg)
{
printk(KERN_DEBUG "Beat: power button pressed\n");
beat_pm_poweroff_flag = 1;
ctrl_alt_del();
return IRQ_HANDLED;
}
static irqreturn_t beat_reset_event(int virq, void *arg)
{
printk(KERN_DEBUG "Beat: reset button pressed\n");
beat_pm_poweroff_flag = 0;
ctrl_alt_del();
return IRQ_HANDLED;
}
static struct beat_event_list {
const char *typecode;
irq_handler_t handler;
unsigned int virq;
} beat_event_list[] = {
{ "power", beat_power_event, 0 },
{ "reset", beat_reset_event, 0 },
};
static int __init beat_register_event(void)
{
u64 path[4], data[2];
int rc, i;
unsigned int virq;
for (i = 0; i < ARRAY_SIZE(beat_event_list); i++) {
struct beat_event_list *ev = &beat_event_list[i];
if (beat_construct_event_receive_port(data) != 0) {
printk(KERN_ERR "Beat: "
"cannot construct event receive port for %s\n",
ev->typecode);
return -EINVAL;
}
virq = irq_create_mapping(NULL, data[0]);
if (virq == NO_IRQ) {
printk(KERN_ERR "Beat: failed to get virtual IRQ"
" for event receive port for %s\n",
ev->typecode);
beat_destruct_event_receive_port(data[0]);
return -EIO;
}
ev->virq = virq;
rc = request_irq(virq, ev->handler, 0,
ev->typecode, NULL);
if (rc != 0) {
printk(KERN_ERR "Beat: failed to request virtual IRQ"
" for event receive port for %s\n",
ev->typecode);
beat_destruct_event_receive_port(data[0]);
return rc;
}
path[0] = 0x1000000065780000ul; /* 1,ex */
path[1] = 0x627574746f6e0000ul; /* button */
path[2] = 0;
strncpy((char *)&path[2], ev->typecode, 8);
path[3] = 0;
data[1] = 0;
beat_create_repository_node(path, data);
}
return 0;
}
static int __init beat_event_init(void)
{
if (!firmware_has_feature(FW_FEATURE_BEAT))
return -EINVAL;
beat_pm_poweroff_flag = 0;
return beat_register_event();
}
device_initcall(beat_event_init);
| gpl-2.0 |
KylinUI/android_kernel_samsung_exynos5410 | drivers/regulator/lp3971.c | 4896 | 13989 | /*
* Regulator driver for National Semiconductors LP3971 PMIC chip
*
* Copyright (C) 2009 Samsung Electronics
* Author: Marek Szyprowski <m.szyprowski@samsung.com>
*
* Based on wm8350.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/lp3971.h>
#include <linux/slab.h>
struct lp3971 {
struct device *dev;
struct mutex io_lock;
struct i2c_client *i2c;
int num_regulators;
struct regulator_dev **rdev;
};
static u8 lp3971_reg_read(struct lp3971 *lp3971, u8 reg);
static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val);
#define LP3971_SYS_CONTROL1_REG 0x07
/* System control register 1 initial value,
bits 4 and 5 are EPROM programmable */
#define SYS_CONTROL1_INIT_VAL 0x40
#define SYS_CONTROL1_INIT_MASK 0xCF
#define LP3971_BUCK_VOL_ENABLE_REG 0x10
#define LP3971_BUCK_VOL_CHANGE_REG 0x20
/* Voltage control registers shift:
LP3971_BUCK1 -> 0
LP3971_BUCK2 -> 4
LP3971_BUCK3 -> 6
*/
#define BUCK_VOL_CHANGE_SHIFT(x) (((!!x) << 2) | (x & ~0x01))
#define BUCK_VOL_CHANGE_FLAG_GO 0x01
#define BUCK_VOL_CHANGE_FLAG_TARGET 0x02
#define BUCK_VOL_CHANGE_FLAG_MASK 0x03
#define LP3971_BUCK1_BASE 0x23
#define LP3971_BUCK2_BASE 0x29
#define LP3971_BUCK3_BASE 0x32
static const int buck_base_addr[] = {
LP3971_BUCK1_BASE,
LP3971_BUCK2_BASE,
LP3971_BUCK3_BASE,
};
#define LP3971_BUCK_TARGET_VOL1_REG(x) (buck_base_addr[x])
#define LP3971_BUCK_TARGET_VOL2_REG(x) (buck_base_addr[x]+1)
static const int buck_voltage_map[] = {
0, 800, 850, 900, 950, 1000, 1050, 1100,
1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500,
1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800,
3000, 3300,
};
#define BUCK_TARGET_VOL_MASK 0x3f
#define BUCK_TARGET_VOL_MIN_IDX 0x01
#define BUCK_TARGET_VOL_MAX_IDX 0x19
#define LP3971_BUCK_RAMP_REG(x) (buck_base_addr[x]+2)
#define LP3971_LDO_ENABLE_REG 0x12
#define LP3971_LDO_VOL_CONTR_BASE 0x39
/* Voltage control registers:
LP3971_LDO1 -> LP3971_LDO_VOL_CONTR_BASE + 0
LP3971_LDO2 -> LP3971_LDO_VOL_CONTR_BASE + 0
LP3971_LDO3 -> LP3971_LDO_VOL_CONTR_BASE + 1
LP3971_LDO4 -> LP3971_LDO_VOL_CONTR_BASE + 1
LP3971_LDO5 -> LP3971_LDO_VOL_CONTR_BASE + 2
*/
#define LP3971_LDO_VOL_CONTR_REG(x) (LP3971_LDO_VOL_CONTR_BASE + (x >> 1))
/* Voltage control registers shift:
LP3971_LDO1 -> 0, LP3971_LDO2 -> 4
LP3971_LDO3 -> 0, LP3971_LDO4 -> 4
LP3971_LDO5 -> 0
*/
#define LDO_VOL_CONTR_SHIFT(x) ((x & 1) << 2)
#define LDO_VOL_CONTR_MASK 0x0f
static const int ldo45_voltage_map[] = {
1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350,
1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300,
};
static const int ldo123_voltage_map[] = {
1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500,
2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300,
};
static const int *ldo_voltage_map[] = {
ldo123_voltage_map, /* LDO1 */
ldo123_voltage_map, /* LDO2 */
ldo123_voltage_map, /* LDO3 */
ldo45_voltage_map, /* LDO4 */
ldo45_voltage_map, /* LDO5 */
};
#define LDO_VOL_VALUE_MAP(x) (ldo_voltage_map[(x - LP3971_LDO1)])
#define LDO_VOL_MIN_IDX 0x00
#define LDO_VOL_MAX_IDX 0x0f
static int lp3971_ldo_list_voltage(struct regulator_dev *dev, unsigned index)
{
int ldo = rdev_get_id(dev) - LP3971_LDO1;
return 1000 * LDO_VOL_VALUE_MAP(ldo)[index];
}
static int lp3971_ldo_is_enabled(struct regulator_dev *dev)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev) - LP3971_LDO1;
u16 mask = 1 << (1 + ldo);
u16 val;
val = lp3971_reg_read(lp3971, LP3971_LDO_ENABLE_REG);
return (val & mask) != 0;
}
static int lp3971_ldo_enable(struct regulator_dev *dev)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev) - LP3971_LDO1;
u16 mask = 1 << (1 + ldo);
return lp3971_set_bits(lp3971, LP3971_LDO_ENABLE_REG, mask, mask);
}
static int lp3971_ldo_disable(struct regulator_dev *dev)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev) - LP3971_LDO1;
u16 mask = 1 << (1 + ldo);
return lp3971_set_bits(lp3971, LP3971_LDO_ENABLE_REG, mask, 0);
}
static int lp3971_ldo_get_voltage(struct regulator_dev *dev)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev) - LP3971_LDO1;
u16 val, reg;
reg = lp3971_reg_read(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo));
val = (reg >> LDO_VOL_CONTR_SHIFT(ldo)) & LDO_VOL_CONTR_MASK;
return 1000 * LDO_VOL_VALUE_MAP(ldo)[val];
}
static int lp3971_ldo_set_voltage(struct regulator_dev *dev,
int min_uV, int max_uV,
unsigned int *selector)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev) - LP3971_LDO1;
int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
const int *vol_map = LDO_VOL_VALUE_MAP(ldo);
u16 val;
if (min_vol < vol_map[LDO_VOL_MIN_IDX] ||
min_vol > vol_map[LDO_VOL_MAX_IDX])
return -EINVAL;
for (val = LDO_VOL_MIN_IDX; val <= LDO_VOL_MAX_IDX; val++)
if (vol_map[val] >= min_vol)
break;
if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol)
return -EINVAL;
*selector = val;
return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo),
LDO_VOL_CONTR_MASK << LDO_VOL_CONTR_SHIFT(ldo),
val << LDO_VOL_CONTR_SHIFT(ldo));
}
static struct regulator_ops lp3971_ldo_ops = {
.list_voltage = lp3971_ldo_list_voltage,
.is_enabled = lp3971_ldo_is_enabled,
.enable = lp3971_ldo_enable,
.disable = lp3971_ldo_disable,
.get_voltage = lp3971_ldo_get_voltage,
.set_voltage = lp3971_ldo_set_voltage,
};
static int lp3971_dcdc_list_voltage(struct regulator_dev *dev, unsigned index)
{
return 1000 * buck_voltage_map[index];
}
static int lp3971_dcdc_is_enabled(struct regulator_dev *dev)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
int buck = rdev_get_id(dev) - LP3971_DCDC1;
u16 mask = 1 << (buck * 2);
u16 val;
val = lp3971_reg_read(lp3971, LP3971_BUCK_VOL_ENABLE_REG);
return (val & mask) != 0;
}
static int lp3971_dcdc_enable(struct regulator_dev *dev)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
int buck = rdev_get_id(dev) - LP3971_DCDC1;
u16 mask = 1 << (buck * 2);
return lp3971_set_bits(lp3971, LP3971_BUCK_VOL_ENABLE_REG, mask, mask);
}
static int lp3971_dcdc_disable(struct regulator_dev *dev)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
int buck = rdev_get_id(dev) - LP3971_DCDC1;
u16 mask = 1 << (buck * 2);
return lp3971_set_bits(lp3971, LP3971_BUCK_VOL_ENABLE_REG, mask, 0);
}
static int lp3971_dcdc_get_voltage(struct regulator_dev *dev)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
int buck = rdev_get_id(dev) - LP3971_DCDC1;
u16 reg;
int val;
reg = lp3971_reg_read(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck));
reg &= BUCK_TARGET_VOL_MASK;
if (reg <= BUCK_TARGET_VOL_MAX_IDX)
val = 1000 * buck_voltage_map[reg];
else {
val = 0;
dev_warn(&dev->dev, "chip reported incorrect voltage value.\n");
}
return val;
}
static int lp3971_dcdc_set_voltage(struct regulator_dev *dev,
int min_uV, int max_uV,
unsigned int *selector)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
int buck = rdev_get_id(dev) - LP3971_DCDC1;
int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
const int *vol_map = buck_voltage_map;
u16 val;
int ret;
if (min_vol < vol_map[BUCK_TARGET_VOL_MIN_IDX] ||
min_vol > vol_map[BUCK_TARGET_VOL_MAX_IDX])
return -EINVAL;
for (val = BUCK_TARGET_VOL_MIN_IDX; val <= BUCK_TARGET_VOL_MAX_IDX;
val++)
if (vol_map[val] >= min_vol)
break;
if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol)
return -EINVAL;
*selector = val;
ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck),
BUCK_TARGET_VOL_MASK, val);
if (ret)
return ret;
ret = lp3971_set_bits(lp3971, LP3971_BUCK_VOL_CHANGE_REG,
BUCK_VOL_CHANGE_FLAG_MASK << BUCK_VOL_CHANGE_SHIFT(buck),
BUCK_VOL_CHANGE_FLAG_GO << BUCK_VOL_CHANGE_SHIFT(buck));
if (ret)
return ret;
return lp3971_set_bits(lp3971, LP3971_BUCK_VOL_CHANGE_REG,
BUCK_VOL_CHANGE_FLAG_MASK << BUCK_VOL_CHANGE_SHIFT(buck),
0 << BUCK_VOL_CHANGE_SHIFT(buck));
}
static struct regulator_ops lp3971_dcdc_ops = {
.list_voltage = lp3971_dcdc_list_voltage,
.is_enabled = lp3971_dcdc_is_enabled,
.enable = lp3971_dcdc_enable,
.disable = lp3971_dcdc_disable,
.get_voltage = lp3971_dcdc_get_voltage,
.set_voltage = lp3971_dcdc_set_voltage,
};
static struct regulator_desc regulators[] = {
{
.name = "LDO1",
.id = LP3971_LDO1,
.ops = &lp3971_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo123_voltage_map),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "LDO2",
.id = LP3971_LDO2,
.ops = &lp3971_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo123_voltage_map),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "LDO3",
.id = LP3971_LDO3,
.ops = &lp3971_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo123_voltage_map),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "LDO4",
.id = LP3971_LDO4,
.ops = &lp3971_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo45_voltage_map),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "LDO5",
.id = LP3971_LDO5,
.ops = &lp3971_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo45_voltage_map),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "DCDC1",
.id = LP3971_DCDC1,
.ops = &lp3971_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck_voltage_map),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "DCDC2",
.id = LP3971_DCDC2,
.ops = &lp3971_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck_voltage_map),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
{
.name = "DCDC3",
.id = LP3971_DCDC3,
.ops = &lp3971_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck_voltage_map),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
};
static int lp3971_i2c_read(struct i2c_client *i2c, char reg, int count,
u16 *dest)
{
int ret;
if (count != 1)
return -EIO;
ret = i2c_smbus_read_byte_data(i2c, reg);
if (ret < 0)
return -EIO;
*dest = ret;
return 0;
}
static int lp3971_i2c_write(struct i2c_client *i2c, char reg, int count,
const u16 *src)
{
if (count != 1)
return -EIO;
return i2c_smbus_write_byte_data(i2c, reg, *src);
}
static u8 lp3971_reg_read(struct lp3971 *lp3971, u8 reg)
{
u16 val = 0;
mutex_lock(&lp3971->io_lock);
lp3971_i2c_read(lp3971->i2c, reg, 1, &val);
dev_dbg(lp3971->dev, "reg read 0x%02x -> 0x%02x\n", (int)reg,
(unsigned)val&0xff);
mutex_unlock(&lp3971->io_lock);
return val & 0xff;
}
static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val)
{
u16 tmp;
int ret;
mutex_lock(&lp3971->io_lock);
ret = lp3971_i2c_read(lp3971->i2c, reg, 1, &tmp);
tmp = (tmp & ~mask) | val;
if (ret == 0) {
ret = lp3971_i2c_write(lp3971->i2c, reg, 1, &tmp);
dev_dbg(lp3971->dev, "reg write 0x%02x -> 0x%02x\n", (int)reg,
(unsigned)val&0xff);
}
mutex_unlock(&lp3971->io_lock);
return ret;
}
static int __devinit setup_regulators(struct lp3971 *lp3971,
struct lp3971_platform_data *pdata)
{
int i, err;
lp3971->num_regulators = pdata->num_regulators;
lp3971->rdev = kcalloc(pdata->num_regulators,
sizeof(struct regulator_dev *), GFP_KERNEL);
if (!lp3971->rdev) {
err = -ENOMEM;
goto err_nomem;
}
/* Instantiate the regulators */
for (i = 0; i < pdata->num_regulators; i++) {
struct lp3971_regulator_subdev *reg = &pdata->regulators[i];
lp3971->rdev[i] = regulator_register(®ulators[reg->id],
lp3971->dev, reg->initdata, lp3971, NULL);
if (IS_ERR(lp3971->rdev[i])) {
err = PTR_ERR(lp3971->rdev[i]);
dev_err(lp3971->dev, "regulator init failed: %d\n",
err);
goto error;
}
}
return 0;
error:
while (--i >= 0)
regulator_unregister(lp3971->rdev[i]);
kfree(lp3971->rdev);
lp3971->rdev = NULL;
err_nomem:
return err;
}
static int __devinit lp3971_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct lp3971 *lp3971;
struct lp3971_platform_data *pdata = i2c->dev.platform_data;
int ret;
u16 val;
if (!pdata) {
dev_dbg(&i2c->dev, "No platform init data supplied\n");
return -ENODEV;
}
lp3971 = kzalloc(sizeof(struct lp3971), GFP_KERNEL);
if (lp3971 == NULL)
return -ENOMEM;
lp3971->i2c = i2c;
lp3971->dev = &i2c->dev;
mutex_init(&lp3971->io_lock);
/* Detect LP3971 */
ret = lp3971_i2c_read(i2c, LP3971_SYS_CONTROL1_REG, 1, &val);
if (ret == 0 && (val & SYS_CONTROL1_INIT_MASK) != SYS_CONTROL1_INIT_VAL)
ret = -ENODEV;
if (ret < 0) {
dev_err(&i2c->dev, "failed to detect device\n");
goto err_detect;
}
ret = setup_regulators(lp3971, pdata);
if (ret < 0)
goto err_detect;
i2c_set_clientdata(i2c, lp3971);
return 0;
err_detect:
kfree(lp3971);
return ret;
}
static int __devexit lp3971_i2c_remove(struct i2c_client *i2c)
{
struct lp3971 *lp3971 = i2c_get_clientdata(i2c);
int i;
for (i = 0; i < lp3971->num_regulators; i++)
regulator_unregister(lp3971->rdev[i]);
kfree(lp3971->rdev);
kfree(lp3971);
return 0;
}
static const struct i2c_device_id lp3971_i2c_id[] = {
{ "lp3971", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lp3971_i2c_id);
static struct i2c_driver lp3971_i2c_driver = {
.driver = {
.name = "LP3971",
.owner = THIS_MODULE,
},
.probe = lp3971_i2c_probe,
.remove = __devexit_p(lp3971_i2c_remove),
.id_table = lp3971_i2c_id,
};
static int __init lp3971_module_init(void)
{
int ret;
ret = i2c_add_driver(&lp3971_i2c_driver);
if (ret != 0)
pr_err("Failed to register I2C driver: %d\n", ret);
return ret;
}
module_init(lp3971_module_init);
static void __exit lp3971_module_exit(void)
{
i2c_del_driver(&lp3971_i2c_driver);
}
module_exit(lp3971_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marek Szyprowski <m.szyprowski@samsung.com>");
MODULE_DESCRIPTION("LP3971 PMIC driver");
| gpl-2.0 |
zarboz/nvidia_shield | drivers/message/i2o/i2o_proc.c | 4896 | 53362 | /*
* procfs handler for Linux I2O subsystem
*
* (c) Copyright 1999 Deepak Saxena
*
* Originally written by Deepak Saxena(deepak@plexity.net)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This is an initial test release. The code is based on the design of the
* ide procfs system (drivers/block/ide-proc.c). Some code taken from
* i2o-core module by Alan Cox.
*
* DISCLAIMER: This code is still under development/test and may cause
* your system to behave unpredictably. Use at your own discretion.
*
*
* Fixes/additions:
* Juha Sievänen (Juha.Sievanen@cs.Helsinki.FI),
* Auvo Häkkinen (Auvo.Hakkinen@cs.Helsinki.FI)
* University of Helsinki, Department of Computer Science
* LAN entries
* Markus Lidel <Markus.Lidel@shadowconnect.com>
* Changes for new I2O API
*/
#define OSM_NAME "proc-osm"
#define OSM_VERSION "1.316"
#define OSM_DESCRIPTION "I2O ProcFS OSM"
#define I2O_MAX_MODULES 4
// FIXME!
#define FMT_U64_HEX "0x%08x%08x"
#define U64_VAL(pu64) *((u32*)(pu64)+1), *((u32*)(pu64))
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/i2o.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
/* Structure used to define /proc entries */
typedef struct _i2o_proc_entry_t {
char *name; /* entry name */
umode_t mode; /* mode */
const struct file_operations *fops; /* open function */
} i2o_proc_entry;
/* global I2O /proc/i2o entry */
static struct proc_dir_entry *i2o_proc_dir_root;
/* proc OSM driver struct */
static struct i2o_driver i2o_proc_driver = {
.name = OSM_NAME,
};
static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
{
int i;
/* 19990419 -sralston
* The I2O v1.5 (and v2.0 so far) "official specification"
* got serial numbers WRONG!
* Apparently, and despite what Section 3.4.4 says and
* Figure 3-35 shows (pg 3-39 in the pdf doc),
* the convention / consensus seems to be:
* + First byte is SNFormat
* + Second byte is SNLen (but only if SNFormat==7 (?))
* + (v2.0) SCSI+BS may use IEEE Registered (64 or 128 bit) format
*/
switch (serialno[0]) {
case I2O_SNFORMAT_BINARY: /* Binary */
seq_printf(seq, "0x");
for (i = 0; i < serialno[1]; i++) {
seq_printf(seq, "%02X", serialno[2 + i]);
}
break;
case I2O_SNFORMAT_ASCII: /* ASCII */
if (serialno[1] < ' ') { /* printable or SNLen? */
/* sanity */
max_len =
(max_len < serialno[1]) ? max_len : serialno[1];
serialno[1 + max_len] = '\0';
/* just print it */
seq_printf(seq, "%s", &serialno[2]);
} else {
/* print chars for specified length */
for (i = 0; i < serialno[1]; i++) {
seq_printf(seq, "%c", serialno[2 + i]);
}
}
break;
case I2O_SNFORMAT_UNICODE: /* UNICODE */
seq_printf(seq, "UNICODE Format. Can't Display\n");
break;
case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */
seq_printf(seq, "LAN-48 MAC address @ %pM", &serialno[2]);
break;
case I2O_SNFORMAT_WAN: /* WAN MAC Address */
/* FIXME: Figure out what a WAN access address looks like?? */
seq_printf(seq, "WAN Access Address");
break;
/* plus new in v2.0 */
case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */
/* FIXME: Figure out what a LAN-64 address really looks like?? */
seq_printf(seq,
"LAN-64 MAC address @ [?:%02X:%02X:?] %pM",
serialno[8], serialno[9], &serialno[2]);
break;
case I2O_SNFORMAT_DDM: /* I2O DDM */
seq_printf(seq,
"DDM: Tid=%03Xh, Rsvd=%04Xh, OrgId=%04Xh",
*(u16 *) & serialno[2],
*(u16 *) & serialno[4], *(u16 *) & serialno[6]);
break;
case I2O_SNFORMAT_IEEE_REG64: /* IEEE Registered (64-bit) */
case I2O_SNFORMAT_IEEE_REG128: /* IEEE Registered (128-bit) */
/* FIXME: Figure if this is even close?? */
seq_printf(seq,
"IEEE NodeName(hi,lo)=(%08Xh:%08Xh), PortName(hi,lo)=(%08Xh:%08Xh)\n",
*(u32 *) & serialno[2],
*(u32 *) & serialno[6],
*(u32 *) & serialno[10], *(u32 *) & serialno[14]);
break;
case I2O_SNFORMAT_UNKNOWN: /* Unknown 0 */
case I2O_SNFORMAT_UNKNOWN2: /* Unknown 0xff */
default:
seq_printf(seq, "Unknown data format (0x%02x)", serialno[0]);
break;
}
return 0;
}
/**
* i2o_get_class_name - do i2o class name lookup
* @class: class number
*
* Return a descriptive string for an i2o class.
*/
static const char *i2o_get_class_name(int class)
{
int idx = 16;
static char *i2o_class_name[] = {
"Executive",
"Device Driver Module",
"Block Device",
"Tape Device",
"LAN Interface",
"WAN Interface",
"Fibre Channel Port",
"Fibre Channel Device",
"SCSI Device",
"ATE Port",
"ATE Device",
"Floppy Controller",
"Floppy Device",
"Secondary Bus Port",
"Peer Transport Agent",
"Peer Transport",
"Unknown"
};
switch (class & 0xfff) {
case I2O_CLASS_EXECUTIVE:
idx = 0;
break;
case I2O_CLASS_DDM:
idx = 1;
break;
case I2O_CLASS_RANDOM_BLOCK_STORAGE:
idx = 2;
break;
case I2O_CLASS_SEQUENTIAL_STORAGE:
idx = 3;
break;
case I2O_CLASS_LAN:
idx = 4;
break;
case I2O_CLASS_WAN:
idx = 5;
break;
case I2O_CLASS_FIBRE_CHANNEL_PORT:
idx = 6;
break;
case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
idx = 7;
break;
case I2O_CLASS_SCSI_PERIPHERAL:
idx = 8;
break;
case I2O_CLASS_ATE_PORT:
idx = 9;
break;
case I2O_CLASS_ATE_PERIPHERAL:
idx = 10;
break;
case I2O_CLASS_FLOPPY_CONTROLLER:
idx = 11;
break;
case I2O_CLASS_FLOPPY_DEVICE:
idx = 12;
break;
case I2O_CLASS_BUS_ADAPTER:
idx = 13;
break;
case I2O_CLASS_PEER_TRANSPORT_AGENT:
idx = 14;
break;
case I2O_CLASS_PEER_TRANSPORT:
idx = 15;
break;
}
return i2o_class_name[idx];
}
#define SCSI_TABLE_SIZE 13
static char *scsi_devices[] = {
"Direct-Access Read/Write",
"Sequential-Access Storage",
"Printer",
"Processor",
"WORM Device",
"CD-ROM Device",
"Scanner Device",
"Optical Memory Device",
"Medium Changer Device",
"Communications Device",
"Graphics Art Pre-Press Device",
"Graphics Art Pre-Press Device",
"Array Controller Device"
};
static char *chtostr(u8 * chars, int n)
{
char tmp[256];
tmp[0] = 0;
return strncat(tmp, (char *)chars, n);
}
static int i2o_report_query_status(struct seq_file *seq, int block_status,
char *group)
{
switch (block_status) {
case -ETIMEDOUT:
return seq_printf(seq, "Timeout reading group %s.\n", group);
case -ENOMEM:
return seq_printf(seq, "No free memory to read the table.\n");
case -I2O_PARAMS_STATUS_INVALID_GROUP_ID:
return seq_printf(seq, "Group %s not supported.\n", group);
default:
return seq_printf(seq,
"Error reading group %s. BlockStatus 0x%02X\n",
group, -block_status);
}
}
static char *bus_strings[] = {
"Local Bus",
"ISA",
"EISA",
"MCA",
"PCI",
"PCMCIA",
"NUBUS",
"CARDBUS"
};
static int i2o_seq_show_hrt(struct seq_file *seq, void *v)
{
struct i2o_controller *c = (struct i2o_controller *)seq->private;
i2o_hrt *hrt = (i2o_hrt *) c->hrt.virt;
u32 bus;
int i;
if (hrt->hrt_version) {
seq_printf(seq,
"HRT table for controller is too new a version.\n");
return 0;
}
seq_printf(seq, "HRT has %d entries of %d bytes each.\n",
hrt->num_entries, hrt->entry_len << 2);
for (i = 0; i < hrt->num_entries; i++) {
seq_printf(seq, "Entry %d:\n", i);
seq_printf(seq, " Adapter ID: %0#10x\n",
hrt->hrt_entry[i].adapter_id);
seq_printf(seq, " Controlling tid: %0#6x\n",
hrt->hrt_entry[i].parent_tid);
if (hrt->hrt_entry[i].bus_type != 0x80) {
bus = hrt->hrt_entry[i].bus_type;
seq_printf(seq, " %s Information\n",
bus_strings[bus]);
switch (bus) {
case I2O_BUS_LOCAL:
seq_printf(seq, " IOBase: %0#6x,",
hrt->hrt_entry[i].bus.local_bus.
LbBaseIOPort);
seq_printf(seq, " MemoryBase: %0#10x\n",
hrt->hrt_entry[i].bus.local_bus.
LbBaseMemoryAddress);
break;
case I2O_BUS_ISA:
seq_printf(seq, " IOBase: %0#6x,",
hrt->hrt_entry[i].bus.isa_bus.
IsaBaseIOPort);
seq_printf(seq, " MemoryBase: %0#10x,",
hrt->hrt_entry[i].bus.isa_bus.
IsaBaseMemoryAddress);
seq_printf(seq, " CSN: %0#4x,",
hrt->hrt_entry[i].bus.isa_bus.CSN);
break;
case I2O_BUS_EISA:
seq_printf(seq, " IOBase: %0#6x,",
hrt->hrt_entry[i].bus.eisa_bus.
EisaBaseIOPort);
seq_printf(seq, " MemoryBase: %0#10x,",
hrt->hrt_entry[i].bus.eisa_bus.
EisaBaseMemoryAddress);
seq_printf(seq, " Slot: %0#4x,",
hrt->hrt_entry[i].bus.eisa_bus.
EisaSlotNumber);
break;
case I2O_BUS_MCA:
seq_printf(seq, " IOBase: %0#6x,",
hrt->hrt_entry[i].bus.mca_bus.
McaBaseIOPort);
seq_printf(seq, " MemoryBase: %0#10x,",
hrt->hrt_entry[i].bus.mca_bus.
McaBaseMemoryAddress);
seq_printf(seq, " Slot: %0#4x,",
hrt->hrt_entry[i].bus.mca_bus.
McaSlotNumber);
break;
case I2O_BUS_PCI:
seq_printf(seq, " Bus: %0#4x",
hrt->hrt_entry[i].bus.pci_bus.
PciBusNumber);
seq_printf(seq, " Dev: %0#4x",
hrt->hrt_entry[i].bus.pci_bus.
PciDeviceNumber);
seq_printf(seq, " Func: %0#4x",
hrt->hrt_entry[i].bus.pci_bus.
PciFunctionNumber);
seq_printf(seq, " Vendor: %0#6x",
hrt->hrt_entry[i].bus.pci_bus.
PciVendorID);
seq_printf(seq, " Device: %0#6x\n",
hrt->hrt_entry[i].bus.pci_bus.
PciDeviceID);
break;
default:
seq_printf(seq, " Unsupported Bus Type\n");
}
} else
seq_printf(seq, " Unknown Bus Type\n");
}
return 0;
}
static int i2o_seq_show_lct(struct seq_file *seq, void *v)
{
struct i2o_controller *c = (struct i2o_controller *)seq->private;
i2o_lct *lct = (i2o_lct *) c->lct;
int entries;
int i;
#define BUS_TABLE_SIZE 3
static char *bus_ports[] = {
"Generic Bus",
"SCSI Bus",
"Fibre Channel Bus"
};
entries = (lct->table_size - 3) / 9;
seq_printf(seq, "LCT contains %d %s\n", entries,
entries == 1 ? "entry" : "entries");
if (lct->boot_tid)
seq_printf(seq, "Boot Device @ ID %d\n", lct->boot_tid);
seq_printf(seq, "Current Change Indicator: %#10x\n", lct->change_ind);
for (i = 0; i < entries; i++) {
seq_printf(seq, "Entry %d\n", i);
seq_printf(seq, " Class, SubClass : %s",
i2o_get_class_name(lct->lct_entry[i].class_id));
/*
* Classes which we'll print subclass info for
*/
switch (lct->lct_entry[i].class_id & 0xFFF) {
case I2O_CLASS_RANDOM_BLOCK_STORAGE:
switch (lct->lct_entry[i].sub_class) {
case 0x00:
seq_printf(seq, ", Direct-Access Read/Write");
break;
case 0x04:
seq_printf(seq, ", WORM Drive");
break;
case 0x05:
seq_printf(seq, ", CD-ROM Drive");
break;
case 0x07:
seq_printf(seq, ", Optical Memory Device");
break;
default:
seq_printf(seq, ", Unknown (0x%02x)",
lct->lct_entry[i].sub_class);
break;
}
break;
case I2O_CLASS_LAN:
switch (lct->lct_entry[i].sub_class & 0xFF) {
case 0x30:
seq_printf(seq, ", Ethernet");
break;
case 0x40:
seq_printf(seq, ", 100base VG");
break;
case 0x50:
seq_printf(seq, ", IEEE 802.5/Token-Ring");
break;
case 0x60:
seq_printf(seq, ", ANSI X3T9.5 FDDI");
break;
case 0x70:
seq_printf(seq, ", Fibre Channel");
break;
default:
seq_printf(seq, ", Unknown Sub-Class (0x%02x)",
lct->lct_entry[i].sub_class & 0xFF);
break;
}
break;
case I2O_CLASS_SCSI_PERIPHERAL:
if (lct->lct_entry[i].sub_class < SCSI_TABLE_SIZE)
seq_printf(seq, ", %s",
scsi_devices[lct->lct_entry[i].
sub_class]);
else
seq_printf(seq, ", Unknown Device Type");
break;
case I2O_CLASS_BUS_ADAPTER:
if (lct->lct_entry[i].sub_class < BUS_TABLE_SIZE)
seq_printf(seq, ", %s",
bus_ports[lct->lct_entry[i].
sub_class]);
else
seq_printf(seq, ", Unknown Bus Type");
break;
}
seq_printf(seq, "\n");
seq_printf(seq, " Local TID : 0x%03x\n",
lct->lct_entry[i].tid);
seq_printf(seq, " User TID : 0x%03x\n",
lct->lct_entry[i].user_tid);
seq_printf(seq, " Parent TID : 0x%03x\n",
lct->lct_entry[i].parent_tid);
seq_printf(seq, " Identity Tag : 0x%x%x%x%x%x%x%x%x\n",
lct->lct_entry[i].identity_tag[0],
lct->lct_entry[i].identity_tag[1],
lct->lct_entry[i].identity_tag[2],
lct->lct_entry[i].identity_tag[3],
lct->lct_entry[i].identity_tag[4],
lct->lct_entry[i].identity_tag[5],
lct->lct_entry[i].identity_tag[6],
lct->lct_entry[i].identity_tag[7]);
seq_printf(seq, " Change Indicator : %0#10x\n",
lct->lct_entry[i].change_ind);
seq_printf(seq, " Event Capab Mask : %0#10x\n",
lct->lct_entry[i].device_flags);
}
return 0;
}
static int i2o_seq_show_status(struct seq_file *seq, void *v)
{
struct i2o_controller *c = (struct i2o_controller *)seq->private;
char prodstr[25];
int version;
i2o_status_block *sb = c->status_block.virt;
i2o_status_get(c); // reread the status block
seq_printf(seq, "Organization ID : %0#6x\n", sb->org_id);
version = sb->i2o_version;
/* FIXME for Spec 2.0
if (version == 0x02) {
seq_printf(seq, "Lowest I2O version supported: ");
switch(workspace[2]) {
case 0x00:
seq_printf(seq, "1.0\n");
break;
case 0x01:
seq_printf(seq, "1.5\n");
break;
case 0x02:
seq_printf(seq, "2.0\n");
break;
}
seq_printf(seq, "Highest I2O version supported: ");
switch(workspace[3]) {
case 0x00:
seq_printf(seq, "1.0\n");
break;
case 0x01:
seq_printf(seq, "1.5\n");
break;
case 0x02:
seq_printf(seq, "2.0\n");
break;
}
}
*/
seq_printf(seq, "IOP ID : %0#5x\n", sb->iop_id);
seq_printf(seq, "Host Unit ID : %0#6x\n", sb->host_unit_id);
seq_printf(seq, "Segment Number : %0#5x\n", sb->segment_number);
seq_printf(seq, "I2O version : ");
switch (version) {
case 0x00:
seq_printf(seq, "1.0\n");
break;
case 0x01:
seq_printf(seq, "1.5\n");
break;
case 0x02:
seq_printf(seq, "2.0\n");
break;
default:
seq_printf(seq, "Unknown version\n");
}
seq_printf(seq, "IOP State : ");
switch (sb->iop_state) {
case 0x01:
seq_printf(seq, "INIT\n");
break;
case 0x02:
seq_printf(seq, "RESET\n");
break;
case 0x04:
seq_printf(seq, "HOLD\n");
break;
case 0x05:
seq_printf(seq, "READY\n");
break;
case 0x08:
seq_printf(seq, "OPERATIONAL\n");
break;
case 0x10:
seq_printf(seq, "FAILED\n");
break;
case 0x11:
seq_printf(seq, "FAULTED\n");
break;
default:
seq_printf(seq, "Unknown\n");
break;
}
seq_printf(seq, "Messenger Type : ");
switch (sb->msg_type) {
case 0x00:
seq_printf(seq, "Memory mapped\n");
break;
case 0x01:
seq_printf(seq, "Memory mapped only\n");
break;
case 0x02:
seq_printf(seq, "Remote only\n");
break;
case 0x03:
seq_printf(seq, "Memory mapped and remote\n");
break;
default:
seq_printf(seq, "Unknown\n");
}
seq_printf(seq, "Inbound Frame Size : %d bytes\n",
sb->inbound_frame_size << 2);
seq_printf(seq, "Max Inbound Frames : %d\n",
sb->max_inbound_frames);
seq_printf(seq, "Current Inbound Frames : %d\n",
sb->cur_inbound_frames);
seq_printf(seq, "Max Outbound Frames : %d\n",
sb->max_outbound_frames);
/* Spec doesn't say if NULL terminated or not... */
memcpy(prodstr, sb->product_id, 24);
prodstr[24] = '\0';
seq_printf(seq, "Product ID : %s\n", prodstr);
seq_printf(seq, "Expected LCT Size : %d bytes\n",
sb->expected_lct_size);
seq_printf(seq, "IOP Capabilities\n");
seq_printf(seq, " Context Field Size Support : ");
switch (sb->iop_capabilities & 0x0000003) {
case 0:
seq_printf(seq, "Supports only 32-bit context fields\n");
break;
case 1:
seq_printf(seq, "Supports only 64-bit context fields\n");
break;
case 2:
seq_printf(seq, "Supports 32-bit and 64-bit context fields, "
"but not concurrently\n");
break;
case 3:
seq_printf(seq, "Supports 32-bit and 64-bit context fields "
"concurrently\n");
break;
default:
seq_printf(seq, "0x%08x\n", sb->iop_capabilities);
}
seq_printf(seq, " Current Context Field Size : ");
switch (sb->iop_capabilities & 0x0000000C) {
case 0:
seq_printf(seq, "not configured\n");
break;
case 4:
seq_printf(seq, "Supports only 32-bit context fields\n");
break;
case 8:
seq_printf(seq, "Supports only 64-bit context fields\n");
break;
case 12:
seq_printf(seq, "Supports both 32-bit or 64-bit context fields "
"concurrently\n");
break;
default:
seq_printf(seq, "\n");
}
seq_printf(seq, " Inbound Peer Support : %s\n",
(sb->
iop_capabilities & 0x00000010) ? "Supported" :
"Not supported");
seq_printf(seq, " Outbound Peer Support : %s\n",
(sb->
iop_capabilities & 0x00000020) ? "Supported" :
"Not supported");
seq_printf(seq, " Peer to Peer Support : %s\n",
(sb->
iop_capabilities & 0x00000040) ? "Supported" :
"Not supported");
seq_printf(seq, "Desired private memory size : %d kB\n",
sb->desired_mem_size >> 10);
seq_printf(seq, "Allocated private memory size : %d kB\n",
sb->current_mem_size >> 10);
seq_printf(seq, "Private memory base address : %0#10x\n",
sb->current_mem_base);
seq_printf(seq, "Desired private I/O size : %d kB\n",
sb->desired_io_size >> 10);
seq_printf(seq, "Allocated private I/O size : %d kB\n",
sb->current_io_size >> 10);
seq_printf(seq, "Private I/O base address : %0#10x\n",
sb->current_io_base);
return 0;
}
static int i2o_seq_show_hw(struct seq_file *seq, void *v)
{
struct i2o_controller *c = (struct i2o_controller *)seq->private;
static u32 work32[5];
static u8 *work8 = (u8 *) work32;
static u16 *work16 = (u16 *) work32;
int token;
u32 hwcap;
static char *cpu_table[] = {
"Intel 80960 series",
"AMD2900 series",
"Motorola 68000 series",
"ARM series",
"MIPS series",
"Sparc series",
"PowerPC series",
"Intel x86 series"
};
token =
i2o_parm_field_get(c->exec, 0x0000, -1, &work32, sizeof(work32));
if (token < 0) {
i2o_report_query_status(seq, token, "0x0000 IOP Hardware");
return 0;
}
seq_printf(seq, "I2O Vendor ID : %0#6x\n", work16[0]);
seq_printf(seq, "Product ID : %0#6x\n", work16[1]);
seq_printf(seq, "CPU : ");
if (work8[16] > 8)
seq_printf(seq, "Unknown\n");
else
seq_printf(seq, "%s\n", cpu_table[work8[16]]);
/* Anyone using ProcessorVersion? */
seq_printf(seq, "RAM : %dkB\n", work32[1] >> 10);
seq_printf(seq, "Non-Volatile Mem : %dkB\n", work32[2] >> 10);
hwcap = work32[3];
seq_printf(seq, "Capabilities : 0x%08x\n", hwcap);
seq_printf(seq, " [%s] Self booting\n",
(hwcap & 0x00000001) ? "+" : "-");
seq_printf(seq, " [%s] Upgradable IRTOS\n",
(hwcap & 0x00000002) ? "+" : "-");
seq_printf(seq, " [%s] Supports downloading DDMs\n",
(hwcap & 0x00000004) ? "+" : "-");
seq_printf(seq, " [%s] Supports installing DDMs\n",
(hwcap & 0x00000008) ? "+" : "-");
seq_printf(seq, " [%s] Battery-backed RAM\n",
(hwcap & 0x00000010) ? "+" : "-");
return 0;
}
/* Executive group 0003h - Executing DDM List (table) */
static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
{
struct i2o_controller *c = (struct i2o_controller *)seq->private;
int token;
int i;
typedef struct _i2o_exec_execute_ddm_table {
u16 ddm_tid;
u8 module_type;
u8 reserved;
u16 i2o_vendor_id;
u16 module_id;
u8 module_name_version[28];
u32 data_size;
u32 code_size;
} i2o_exec_execute_ddm_table;
struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
i2o_exec_execute_ddm_table ddm_table[I2O_MAX_MODULES];
} *result;
i2o_exec_execute_ddm_table ddm_table;
result = kmalloc(sizeof(*result), GFP_KERNEL);
if (!result)
return -ENOMEM;
token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0003, -1,
NULL, 0, result, sizeof(*result));
if (token < 0) {
i2o_report_query_status(seq, token,
"0x0003 Executing DDM List");
goto out;
}
seq_printf(seq,
"Tid Module_type Vendor Mod_id Module_name Vrs Data_size Code_size\n");
ddm_table = result->ddm_table[0];
for (i = 0; i < result->row_count; ddm_table = result->ddm_table[++i]) {
seq_printf(seq, "0x%03x ", ddm_table.ddm_tid & 0xFFF);
switch (ddm_table.module_type) {
case 0x01:
seq_printf(seq, "Downloaded DDM ");
break;
case 0x22:
seq_printf(seq, "Embedded DDM ");
break;
default:
seq_printf(seq, " ");
}
seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
seq_printf(seq, "%-#8x", ddm_table.module_id);
seq_printf(seq, "%-29s",
chtostr(ddm_table.module_name_version, 28));
seq_printf(seq, "%9d ", ddm_table.data_size);
seq_printf(seq, "%8d", ddm_table.code_size);
seq_printf(seq, "\n");
}
out:
kfree(result);
return 0;
}
/* Executive group 0004h - Driver Store (scalar) */
static int i2o_seq_show_driver_store(struct seq_file *seq, void *v)
{
struct i2o_controller *c = (struct i2o_controller *)seq->private;
u32 work32[8];
int token;
token =
i2o_parm_field_get(c->exec, 0x0004, -1, &work32, sizeof(work32));
if (token < 0) {
i2o_report_query_status(seq, token, "0x0004 Driver Store");
return 0;
}
seq_printf(seq, "Module limit : %d\n"
"Module count : %d\n"
"Current space : %d kB\n"
"Free space : %d kB\n",
work32[0], work32[1], work32[2] >> 10, work32[3] >> 10);
return 0;
}
/* Executive group 0005h - Driver Store Table (table) */
static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
{
typedef struct _i2o_driver_store {
u16 stored_ddm_index;
u8 module_type;
u8 reserved;
u16 i2o_vendor_id;
u16 module_id;
u8 module_name_version[28];
u8 date[8];
u32 module_size;
u32 mpb_size;
u32 module_flags;
} i2o_driver_store_table;
struct i2o_controller *c = (struct i2o_controller *)seq->private;
int token;
int i;
typedef struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
i2o_driver_store_table dst[I2O_MAX_MODULES];
} i2o_driver_result_table;
i2o_driver_result_table *result;
i2o_driver_store_table *dst;
result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
if (result == NULL)
return -ENOMEM;
token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0005, -1,
NULL, 0, result, sizeof(*result));
if (token < 0) {
i2o_report_query_status(seq, token,
"0x0005 DRIVER STORE TABLE");
kfree(result);
return 0;
}
seq_printf(seq,
"# Module_type Vendor Mod_id Module_name Vrs"
"Date Mod_size Par_size Flags\n");
for (i = 0, dst = &result->dst[0]; i < result->row_count;
dst = &result->dst[++i]) {
seq_printf(seq, "%-3d", dst->stored_ddm_index);
switch (dst->module_type) {
case 0x01:
seq_printf(seq, "Downloaded DDM ");
break;
case 0x22:
seq_printf(seq, "Embedded DDM ");
break;
default:
seq_printf(seq, " ");
}
seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
seq_printf(seq, "%-#8x", dst->module_id);
seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
seq_printf(seq, "%-9s", chtostr(dst->date, 8));
seq_printf(seq, "%8d ", dst->module_size);
seq_printf(seq, "%8d ", dst->mpb_size);
seq_printf(seq, "0x%04x", dst->module_flags);
seq_printf(seq, "\n");
}
kfree(result);
return 0;
}
/* Generic group F000h - Params Descriptor (table) */
static int i2o_seq_show_groups(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
int i;
u8 properties;
typedef struct _i2o_group_info {
u16 group_number;
u16 field_count;
u16 row_count;
u8 properties;
u8 reserved;
} i2o_group_info;
struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
i2o_group_info group[256];
} *result;
result = kmalloc(sizeof(*result), GFP_KERNEL);
if (!result)
return -ENOMEM;
token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0,
result, sizeof(*result));
if (token < 0) {
i2o_report_query_status(seq, token, "0xF000 Params Descriptor");
goto out;
}
seq_printf(seq,
"# Group FieldCount RowCount Type Add Del Clear\n");
for (i = 0; i < result->row_count; i++) {
seq_printf(seq, "%-3d", i);
seq_printf(seq, "0x%04X ", result->group[i].group_number);
seq_printf(seq, "%10d ", result->group[i].field_count);
seq_printf(seq, "%8d ", result->group[i].row_count);
properties = result->group[i].properties;
if (properties & 0x1)
seq_printf(seq, "Table ");
else
seq_printf(seq, "Scalar ");
if (properties & 0x2)
seq_printf(seq, " + ");
else
seq_printf(seq, " - ");
if (properties & 0x4)
seq_printf(seq, " + ");
else
seq_printf(seq, " - ");
if (properties & 0x8)
seq_printf(seq, " + ");
else
seq_printf(seq, " - ");
seq_printf(seq, "\n");
}
if (result->more_flag)
seq_printf(seq, "There is more...\n");
out:
kfree(result);
return 0;
}
/* Generic group F001h - Physical Device Table (table) */
static int i2o_seq_show_phys_device(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
int i;
struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
u32 adapter_id[64];
} result;
token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF001, -1, NULL, 0,
&result, sizeof(result));
if (token < 0) {
i2o_report_query_status(seq, token,
"0xF001 Physical Device Table");
return 0;
}
if (result.row_count)
seq_printf(seq, "# AdapterId\n");
for (i = 0; i < result.row_count; i++) {
seq_printf(seq, "%-2d", i);
seq_printf(seq, "%#7x\n", result.adapter_id[i]);
}
if (result.more_flag)
seq_printf(seq, "There is more...\n");
return 0;
}
/* Generic group F002h - Claimed Table (table) */
static int i2o_seq_show_claimed(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
int i;
struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
u16 claimed_tid[64];
} result;
token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF002, -1, NULL, 0,
&result, sizeof(result));
if (token < 0) {
i2o_report_query_status(seq, token, "0xF002 Claimed Table");
return 0;
}
if (result.row_count)
seq_printf(seq, "# ClaimedTid\n");
for (i = 0; i < result.row_count; i++) {
seq_printf(seq, "%-2d", i);
seq_printf(seq, "%#7x\n", result.claimed_tid[i]);
}
if (result.more_flag)
seq_printf(seq, "There is more...\n");
return 0;
}
/* Generic group F003h - User Table (table) */
static int i2o_seq_show_users(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
int i;
typedef struct _i2o_user_table {
u16 instance;
u16 user_tid;
u8 claim_type;
u8 reserved1;
u16 reserved2;
} i2o_user_table;
struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
i2o_user_table user[64];
} *result;
result = kmalloc(sizeof(*result), GFP_KERNEL);
if (!result)
return -ENOMEM;
token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF003, -1, NULL, 0,
result, sizeof(*result));
if (token < 0) {
i2o_report_query_status(seq, token, "0xF003 User Table");
goto out;
}
seq_printf(seq, "# Instance UserTid ClaimType\n");
for (i = 0; i < result->row_count; i++) {
seq_printf(seq, "%-3d", i);
seq_printf(seq, "%#8x ", result->user[i].instance);
seq_printf(seq, "%#7x ", result->user[i].user_tid);
seq_printf(seq, "%#9x\n", result->user[i].claim_type);
}
if (result->more_flag)
seq_printf(seq, "There is more...\n");
out:
kfree(result);
return 0;
}
/* Generic group F005h - Private message extensions (table) (optional) */
static int i2o_seq_show_priv_msgs(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
int i;
typedef struct _i2o_private {
u16 ext_instance;
u16 organization_id;
u16 x_function_code;
} i2o_private;
struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
i2o_private extension[64];
} result;
token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0,
&result, sizeof(result));
if (token < 0) {
i2o_report_query_status(seq, token,
"0xF005 Private Message Extensions (optional)");
return 0;
}
seq_printf(seq, "Instance# OrgId FunctionCode\n");
for (i = 0; i < result.row_count; i++) {
seq_printf(seq, "%0#9x ", result.extension[i].ext_instance);
seq_printf(seq, "%0#6x ", result.extension[i].organization_id);
seq_printf(seq, "%0#6x", result.extension[i].x_function_code);
seq_printf(seq, "\n");
}
if (result.more_flag)
seq_printf(seq, "There is more...\n");
return 0;
}
/* Generic group F006h - Authorized User Table (table) */
static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
int i;
struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
u32 alternate_tid[64];
} result;
token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF006, -1, NULL, 0,
&result, sizeof(result));
if (token < 0) {
i2o_report_query_status(seq, token,
"0xF006 Autohorized User Table");
return 0;
}
if (result.row_count)
seq_printf(seq, "# AlternateTid\n");
for (i = 0; i < result.row_count; i++) {
seq_printf(seq, "%-2d", i);
seq_printf(seq, "%#7x ", result.alternate_tid[i]);
}
if (result.more_flag)
seq_printf(seq, "There is more...\n");
return 0;
}
/* Generic group F100h - Device Identity (scalar) */
static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
// == (allow) 512d bytes (max)
static u16 *work16 = (u16 *) work32;
int token;
token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
if (token < 0) {
i2o_report_query_status(seq, token, "0xF100 Device Identity");
return 0;
}
seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
seq_printf(seq, "Vendor info : %s\n",
chtostr((u8 *) (work32 + 2), 16));
seq_printf(seq, "Product info : %s\n",
chtostr((u8 *) (work32 + 6), 16));
seq_printf(seq, "Description : %s\n",
chtostr((u8 *) (work32 + 10), 16));
seq_printf(seq, "Product rev. : %s\n",
chtostr((u8 *) (work32 + 14), 8));
seq_printf(seq, "Serial number : ");
print_serial_number(seq, (u8 *) (work32 + 16),
/* allow for SNLen plus
* possible trailing '\0'
*/
sizeof(work32) - (16 * sizeof(u32)) - 2);
seq_printf(seq, "\n");
return 0;
}
static int i2o_seq_show_dev_name(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
seq_printf(seq, "%s\n", dev_name(&d->device));
return 0;
}
/* Generic group F101h - DDM Identity (scalar) */
static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
struct {
u16 ddm_tid;
u8 module_name[24];
u8 module_rev[8];
u8 sn_format;
u8 serial_number[12];
u8 pad[256]; // allow up to 256 byte (max) serial number
} result;
token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
if (token < 0) {
i2o_report_query_status(seq, token, "0xF101 DDM Identity");
return 0;
}
seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
seq_printf(seq, "Module name : %s\n",
chtostr(result.module_name, 24));
seq_printf(seq, "Module revision : %s\n",
chtostr(result.module_rev, 8));
seq_printf(seq, "Serial number : ");
print_serial_number(seq, result.serial_number, sizeof(result) - 36);
/* allow for SNLen plus possible trailing '\0' */
seq_printf(seq, "\n");
return 0;
}
/* Generic group F102h - User Information (scalar) */
static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
struct {
u8 device_name[64];
u8 service_name[64];
u8 physical_location[64];
u8 instance_number[4];
} result;
token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
if (token < 0) {
i2o_report_query_status(seq, token, "0xF102 User Information");
return 0;
}
seq_printf(seq, "Device name : %s\n",
chtostr(result.device_name, 64));
seq_printf(seq, "Service name : %s\n",
chtostr(result.service_name, 64));
seq_printf(seq, "Physical name : %s\n",
chtostr(result.physical_location, 64));
seq_printf(seq, "Instance number : %s\n",
chtostr(result.instance_number, 4));
return 0;
}
/* Generic group F103h - SGL Operating Limits (scalar) */
static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
static u32 work32[12];
static u16 *work16 = (u16 *) work32;
static u8 *work8 = (u8 *) work32;
int token;
token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
if (token < 0) {
i2o_report_query_status(seq, token,
"0xF103 SGL Operating Limits");
return 0;
}
seq_printf(seq, "SGL chain size : %d\n", work32[0]);
seq_printf(seq, "Max SGL chain size : %d\n", work32[1]);
seq_printf(seq, "SGL chain size target : %d\n", work32[2]);
seq_printf(seq, "SGL frag count : %d\n", work16[6]);
seq_printf(seq, "Max SGL frag count : %d\n", work16[7]);
seq_printf(seq, "SGL frag count target : %d\n", work16[8]);
/* FIXME
if (d->i2oversion == 0x02)
{
*/
seq_printf(seq, "SGL data alignment : %d\n", work16[8]);
seq_printf(seq, "SGL addr limit : %d\n", work8[20]);
seq_printf(seq, "SGL addr sizes supported : ");
if (work8[21] & 0x01)
seq_printf(seq, "32 bit ");
if (work8[21] & 0x02)
seq_printf(seq, "64 bit ");
if (work8[21] & 0x04)
seq_printf(seq, "96 bit ");
if (work8[21] & 0x08)
seq_printf(seq, "128 bit ");
seq_printf(seq, "\n");
/*
}
*/
return 0;
}
/* Generic group F200h - Sensors (scalar) */
static int i2o_seq_show_sensors(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
struct {
u16 sensor_instance;
u8 component;
u16 component_instance;
u8 sensor_class;
u8 sensor_type;
u8 scaling_exponent;
u32 actual_reading;
u32 minimum_reading;
u32 low2lowcat_treshold;
u32 lowcat2low_treshold;
u32 lowwarn2low_treshold;
u32 low2lowwarn_treshold;
u32 norm2lowwarn_treshold;
u32 lowwarn2norm_treshold;
u32 nominal_reading;
u32 hiwarn2norm_treshold;
u32 norm2hiwarn_treshold;
u32 high2hiwarn_treshold;
u32 hiwarn2high_treshold;
u32 hicat2high_treshold;
u32 hi2hicat_treshold;
u32 maximum_reading;
u8 sensor_state;
u16 event_enable;
} result;
token = i2o_parm_field_get(d, 0xF200, -1, &result, sizeof(result));
if (token < 0) {
i2o_report_query_status(seq, token,
"0xF200 Sensors (optional)");
return 0;
}
seq_printf(seq, "Sensor instance : %d\n", result.sensor_instance);
seq_printf(seq, "Component : %d = ", result.component);
switch (result.component) {
case 0:
seq_printf(seq, "Other");
break;
case 1:
seq_printf(seq, "Planar logic Board");
break;
case 2:
seq_printf(seq, "CPU");
break;
case 3:
seq_printf(seq, "Chassis");
break;
case 4:
seq_printf(seq, "Power Supply");
break;
case 5:
seq_printf(seq, "Storage");
break;
case 6:
seq_printf(seq, "External");
break;
}
seq_printf(seq, "\n");
seq_printf(seq, "Component instance : %d\n",
result.component_instance);
seq_printf(seq, "Sensor class : %s\n",
result.sensor_class ? "Analog" : "Digital");
seq_printf(seq, "Sensor type : %d = ", result.sensor_type);
switch (result.sensor_type) {
case 0:
seq_printf(seq, "Other\n");
break;
case 1:
seq_printf(seq, "Thermal\n");
break;
case 2:
seq_printf(seq, "DC voltage (DC volts)\n");
break;
case 3:
seq_printf(seq, "AC voltage (AC volts)\n");
break;
case 4:
seq_printf(seq, "DC current (DC amps)\n");
break;
case 5:
seq_printf(seq, "AC current (AC volts)\n");
break;
case 6:
seq_printf(seq, "Door open\n");
break;
case 7:
seq_printf(seq, "Fan operational\n");
break;
}
seq_printf(seq, "Scaling exponent : %d\n",
result.scaling_exponent);
seq_printf(seq, "Actual reading : %d\n", result.actual_reading);
seq_printf(seq, "Minimum reading : %d\n", result.minimum_reading);
seq_printf(seq, "Low2LowCat treshold : %d\n",
result.low2lowcat_treshold);
seq_printf(seq, "LowCat2Low treshold : %d\n",
result.lowcat2low_treshold);
seq_printf(seq, "LowWarn2Low treshold : %d\n",
result.lowwarn2low_treshold);
seq_printf(seq, "Low2LowWarn treshold : %d\n",
result.low2lowwarn_treshold);
seq_printf(seq, "Norm2LowWarn treshold : %d\n",
result.norm2lowwarn_treshold);
seq_printf(seq, "LowWarn2Norm treshold : %d\n",
result.lowwarn2norm_treshold);
seq_printf(seq, "Nominal reading : %d\n", result.nominal_reading);
seq_printf(seq, "HiWarn2Norm treshold : %d\n",
result.hiwarn2norm_treshold);
seq_printf(seq, "Norm2HiWarn treshold : %d\n",
result.norm2hiwarn_treshold);
seq_printf(seq, "High2HiWarn treshold : %d\n",
result.high2hiwarn_treshold);
seq_printf(seq, "HiWarn2High treshold : %d\n",
result.hiwarn2high_treshold);
seq_printf(seq, "HiCat2High treshold : %d\n",
result.hicat2high_treshold);
seq_printf(seq, "High2HiCat treshold : %d\n",
result.hi2hicat_treshold);
seq_printf(seq, "Maximum reading : %d\n", result.maximum_reading);
seq_printf(seq, "Sensor state : %d = ", result.sensor_state);
switch (result.sensor_state) {
case 0:
seq_printf(seq, "Normal\n");
break;
case 1:
seq_printf(seq, "Abnormal\n");
break;
case 2:
seq_printf(seq, "Unknown\n");
break;
case 3:
seq_printf(seq, "Low Catastrophic (LoCat)\n");
break;
case 4:
seq_printf(seq, "Low (Low)\n");
break;
case 5:
seq_printf(seq, "Low Warning (LoWarn)\n");
break;
case 6:
seq_printf(seq, "High Warning (HiWarn)\n");
break;
case 7:
seq_printf(seq, "High (High)\n");
break;
case 8:
seq_printf(seq, "High Catastrophic (HiCat)\n");
break;
}
seq_printf(seq, "Event_enable : 0x%02X\n", result.event_enable);
seq_printf(seq, " [%s] Operational state change. \n",
(result.event_enable & 0x01) ? "+" : "-");
seq_printf(seq, " [%s] Low catastrophic. \n",
(result.event_enable & 0x02) ? "+" : "-");
seq_printf(seq, " [%s] Low reading. \n",
(result.event_enable & 0x04) ? "+" : "-");
seq_printf(seq, " [%s] Low warning. \n",
(result.event_enable & 0x08) ? "+" : "-");
seq_printf(seq,
" [%s] Change back to normal from out of range state. \n",
(result.event_enable & 0x10) ? "+" : "-");
seq_printf(seq, " [%s] High warning. \n",
(result.event_enable & 0x20) ? "+" : "-");
seq_printf(seq, " [%s] High reading. \n",
(result.event_enable & 0x40) ? "+" : "-");
seq_printf(seq, " [%s] High catastrophic. \n",
(result.event_enable & 0x80) ? "+" : "-");
return 0;
}
static int i2o_seq_open_hrt(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_hrt, PDE(inode)->data);
};
static int i2o_seq_open_lct(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_lct, PDE(inode)->data);
};
static int i2o_seq_open_status(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_status, PDE(inode)->data);
};
static int i2o_seq_open_hw(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_hw, PDE(inode)->data);
};
static int i2o_seq_open_ddm_table(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_ddm_table, PDE(inode)->data);
};
static int i2o_seq_open_driver_store(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_driver_store, PDE(inode)->data);
};
static int i2o_seq_open_drivers_stored(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_drivers_stored, PDE(inode)->data);
};
static int i2o_seq_open_groups(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_groups, PDE(inode)->data);
};
static int i2o_seq_open_phys_device(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_phys_device, PDE(inode)->data);
};
static int i2o_seq_open_claimed(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_claimed, PDE(inode)->data);
};
static int i2o_seq_open_users(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_users, PDE(inode)->data);
};
static int i2o_seq_open_priv_msgs(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_priv_msgs, PDE(inode)->data);
};
static int i2o_seq_open_authorized_users(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_authorized_users,
PDE(inode)->data);
};
static int i2o_seq_open_dev_identity(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_dev_identity, PDE(inode)->data);
};
static int i2o_seq_open_ddm_identity(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_ddm_identity, PDE(inode)->data);
};
static int i2o_seq_open_uinfo(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_uinfo, PDE(inode)->data);
};
static int i2o_seq_open_sgl_limits(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_sgl_limits, PDE(inode)->data);
};
static int i2o_seq_open_sensors(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_sensors, PDE(inode)->data);
};
static int i2o_seq_open_dev_name(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_dev_name, PDE(inode)->data);
};
static const struct file_operations i2o_seq_fops_lct = {
.open = i2o_seq_open_lct,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_hrt = {
.open = i2o_seq_open_hrt,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_status = {
.open = i2o_seq_open_status,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_hw = {
.open = i2o_seq_open_hw,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_ddm_table = {
.open = i2o_seq_open_ddm_table,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_driver_store = {
.open = i2o_seq_open_driver_store,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_drivers_stored = {
.open = i2o_seq_open_drivers_stored,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_groups = {
.open = i2o_seq_open_groups,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_phys_device = {
.open = i2o_seq_open_phys_device,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_claimed = {
.open = i2o_seq_open_claimed,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_users = {
.open = i2o_seq_open_users,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_priv_msgs = {
.open = i2o_seq_open_priv_msgs,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_authorized_users = {
.open = i2o_seq_open_authorized_users,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_dev_name = {
.open = i2o_seq_open_dev_name,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_dev_identity = {
.open = i2o_seq_open_dev_identity,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_ddm_identity = {
.open = i2o_seq_open_ddm_identity,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_uinfo = {
.open = i2o_seq_open_uinfo,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_sgl_limits = {
.open = i2o_seq_open_sgl_limits,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_sensors = {
.open = i2o_seq_open_sensors,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* IOP specific entries...write field just in case someone
* ever wants one.
*/
static i2o_proc_entry i2o_proc_generic_iop_entries[] = {
{"hrt", S_IFREG | S_IRUGO, &i2o_seq_fops_hrt},
{"lct", S_IFREG | S_IRUGO, &i2o_seq_fops_lct},
{"status", S_IFREG | S_IRUGO, &i2o_seq_fops_status},
{"hw", S_IFREG | S_IRUGO, &i2o_seq_fops_hw},
{"ddm_table", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_table},
{"driver_store", S_IFREG | S_IRUGO, &i2o_seq_fops_driver_store},
{"drivers_stored", S_IFREG | S_IRUGO, &i2o_seq_fops_drivers_stored},
{NULL, 0, NULL}
};
/*
* Device specific entries
*/
static i2o_proc_entry generic_dev_entries[] = {
{"groups", S_IFREG | S_IRUGO, &i2o_seq_fops_groups},
{"phys_dev", S_IFREG | S_IRUGO, &i2o_seq_fops_phys_device},
{"claimed", S_IFREG | S_IRUGO, &i2o_seq_fops_claimed},
{"users", S_IFREG | S_IRUGO, &i2o_seq_fops_users},
{"priv_msgs", S_IFREG | S_IRUGO, &i2o_seq_fops_priv_msgs},
{"authorized_users", S_IFREG | S_IRUGO, &i2o_seq_fops_authorized_users},
{"dev_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_identity},
{"ddm_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_identity},
{"user_info", S_IFREG | S_IRUGO, &i2o_seq_fops_uinfo},
{"sgl_limits", S_IFREG | S_IRUGO, &i2o_seq_fops_sgl_limits},
{"sensors", S_IFREG | S_IRUGO, &i2o_seq_fops_sensors},
{NULL, 0, NULL}
};
/*
* Storage unit specific entries (SCSI Periph, BS) with device names
*/
static i2o_proc_entry rbs_dev_entries[] = {
{"dev_name", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_name},
{NULL, 0, NULL}
};
/**
* i2o_proc_create_entries - Creates proc dir entries
* @dir: proc dir entry under which the entries should be placed
* @i2o_pe: pointer to the entries which should be added
* @data: pointer to I2O controller or device
*
* Create proc dir entries for a I2O controller or I2O device.
*
* Returns 0 on success or negative error code on failure.
*/
static int i2o_proc_create_entries(struct proc_dir_entry *dir,
i2o_proc_entry * i2o_pe, void *data)
{
struct proc_dir_entry *tmp;
while (i2o_pe->name) {
tmp = proc_create_data(i2o_pe->name, i2o_pe->mode, dir,
i2o_pe->fops, data);
if (!tmp)
return -1;
i2o_pe++;
}
return 0;
}
/**
* i2o_proc_subdir_remove - Remove child entries from a proc entry
* @dir: proc dir entry from which the childs should be removed
*
* Iterate over each i2o proc entry under dir and remove it. If the child
* also has entries, remove them too.
*/
static void i2o_proc_subdir_remove(struct proc_dir_entry *dir)
{
struct proc_dir_entry *pe, *tmp;
pe = dir->subdir;
while (pe) {
tmp = pe->next;
i2o_proc_subdir_remove(pe);
remove_proc_entry(pe->name, dir);
pe = tmp;
}
};
/**
* i2o_proc_device_add - Add an I2O device to the proc dir
* @dir: proc dir entry to which the device should be added
* @dev: I2O device which should be added
*
* Add an I2O device to the proc dir entry dir and create the entries for
* the device depending on the class of the I2O device.
*/
static void i2o_proc_device_add(struct proc_dir_entry *dir,
struct i2o_device *dev)
{
char buff[10];
struct proc_dir_entry *devdir;
i2o_proc_entry *i2o_pe = NULL;
sprintf(buff, "%03x", dev->lct_data.tid);
osm_debug("adding device /proc/i2o/%s/%s\n", dev->iop->name, buff);
devdir = proc_mkdir(buff, dir);
if (!devdir) {
osm_warn("Could not allocate procdir!\n");
return;
}
devdir->data = dev;
i2o_proc_create_entries(devdir, generic_dev_entries, dev);
/* Inform core that we want updates about this device's status */
switch (dev->lct_data.class_id) {
case I2O_CLASS_SCSI_PERIPHERAL:
case I2O_CLASS_RANDOM_BLOCK_STORAGE:
i2o_pe = rbs_dev_entries;
break;
default:
break;
}
if (i2o_pe)
i2o_proc_create_entries(devdir, i2o_pe, dev);
}
/**
* i2o_proc_iop_add - Add an I2O controller to the i2o proc tree
* @dir: parent proc dir entry
* @c: I2O controller which should be added
*
* Add the entries to the parent proc dir entry. Also each device is added
* to the controllers proc dir entry.
*
* Returns 0 on success or negative error code on failure.
*/
static int i2o_proc_iop_add(struct proc_dir_entry *dir,
struct i2o_controller *c)
{
struct proc_dir_entry *iopdir;
struct i2o_device *dev;
osm_debug("adding IOP /proc/i2o/%s\n", c->name);
iopdir = proc_mkdir(c->name, dir);
if (!iopdir)
return -1;
iopdir->data = c;
i2o_proc_create_entries(iopdir, i2o_proc_generic_iop_entries, c);
list_for_each_entry(dev, &c->devices, list)
i2o_proc_device_add(iopdir, dev);
return 0;
}
/**
* i2o_proc_iop_remove - Removes an I2O controller from the i2o proc tree
* @dir: parent proc dir entry
* @c: I2O controller which should be removed
*
* Iterate over each i2o proc entry and search controller c. If it is found
* remove it from the tree.
*/
static void i2o_proc_iop_remove(struct proc_dir_entry *dir,
struct i2o_controller *c)
{
struct proc_dir_entry *pe, *tmp;
pe = dir->subdir;
while (pe) {
tmp = pe->next;
if (pe->data == c) {
i2o_proc_subdir_remove(pe);
remove_proc_entry(pe->name, dir);
}
osm_debug("removing IOP /proc/i2o/%s\n", c->name);
pe = tmp;
}
}
/**
* i2o_proc_fs_create - Create the i2o proc fs.
*
* Iterate over each I2O controller and create the entries for it.
*
* Returns 0 on success or negative error code on failure.
*/
static int __init i2o_proc_fs_create(void)
{
struct i2o_controller *c;
i2o_proc_dir_root = proc_mkdir("i2o", NULL);
if (!i2o_proc_dir_root)
return -1;
list_for_each_entry(c, &i2o_controllers, list)
i2o_proc_iop_add(i2o_proc_dir_root, c);
return 0;
};
/**
* i2o_proc_fs_destroy - Cleanup the all i2o proc entries
*
* Iterate over each I2O controller and remove the entries for it.
*
* Returns 0 on success or negative error code on failure.
*/
static int __exit i2o_proc_fs_destroy(void)
{
struct i2o_controller *c;
list_for_each_entry(c, &i2o_controllers, list)
i2o_proc_iop_remove(i2o_proc_dir_root, c);
remove_proc_entry("i2o", NULL);
return 0;
};
/**
* i2o_proc_init - Init function for procfs
*
* Registers Proc OSM and creates procfs entries.
*
* Returns 0 on success or negative error code on failure.
*/
static int __init i2o_proc_init(void)
{
int rc;
printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
rc = i2o_driver_register(&i2o_proc_driver);
if (rc)
return rc;
rc = i2o_proc_fs_create();
if (rc) {
i2o_driver_unregister(&i2o_proc_driver);
return rc;
}
return 0;
};
/**
* i2o_proc_exit - Exit function for procfs
*
* Unregisters Proc OSM and removes procfs entries.
*/
static void __exit i2o_proc_exit(void)
{
i2o_driver_unregister(&i2o_proc_driver);
i2o_proc_fs_destroy();
};
MODULE_AUTHOR("Deepak Saxena");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(OSM_DESCRIPTION);
MODULE_VERSION(OSM_VERSION);
module_init(i2o_proc_init);
module_exit(i2o_proc_exit);
| gpl-2.0 |
jcadduono/idleKernel-note3 | drivers/video/mbx/mbxfb.c | 4896 | 26101 | /*
* linux/drivers/video/mbx/mbxfb.c
*
* Copyright (C) 2006-2007 8D Technologies inc
* Raphael Assenat <raph@8d.com>
* - Added video overlay support
* - Various improvements
*
* Copyright (C) 2006 Compulab, Ltd.
* Mike Rapoport <mike@compulab.co.il>
* - Creation of driver
*
* Based on pxafb.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
* Intel 2700G (Marathon) Graphics Accelerator Frame Buffer Driver
*
*/
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <video/mbxfb.h>
#include "regs.h"
#include "reg_bits.h"
static void __iomem *virt_base_2700;
#define write_reg(val, reg) do { writel((val), (reg)); } while(0)
/* Without this delay, the graphics appears somehow scaled and
* there is a lot of jitter in scanlines. This delay is probably
* needed only after setting some specific register(s) somewhere,
* not all over the place... */
#define write_reg_dly(val, reg) do { writel((val), reg); udelay(1000); } while(0)
#define MIN_XRES 16
#define MIN_YRES 16
#define MAX_XRES 2048
#define MAX_YRES 2048
#define MAX_PALETTES 16
/* FIXME: take care of different chip revisions with different sizes
of ODFB */
#define MEMORY_OFFSET 0x60000
struct mbxfb_info {
struct device *dev;
struct resource *fb_res;
struct resource *fb_req;
struct resource *reg_res;
struct resource *reg_req;
void __iomem *fb_virt_addr;
unsigned long fb_phys_addr;
void __iomem *reg_virt_addr;
unsigned long reg_phys_addr;
int (*platform_probe) (struct fb_info * fb);
int (*platform_remove) (struct fb_info * fb);
u32 pseudo_palette[MAX_PALETTES];
#ifdef CONFIG_FB_MBX_DEBUG
void *debugfs_data;
#endif
};
static struct fb_var_screeninfo mbxfb_default __devinitdata = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
.yres_virtual = 480,
.bits_per_pixel = 16,
.red = {11, 5, 0},
.green = {5, 6, 0},
.blue = {0, 5, 0},
.activate = FB_ACTIVATE_TEST,
.height = -1,
.width = -1,
.pixclock = 40000,
.left_margin = 48,
.right_margin = 16,
.upper_margin = 33,
.lower_margin = 10,
.hsync_len = 96,
.vsync_len = 2,
.vmode = FB_VMODE_NONINTERLACED,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct fb_fix_screeninfo mbxfb_fix __devinitdata = {
.id = "MBX",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.xpanstep = 0,
.ypanstep = 0,
.ywrapstep = 0,
.accel = FB_ACCEL_NONE,
};
struct pixclock_div {
u8 m;
u8 n;
u8 p;
};
static unsigned int mbxfb_get_pixclock(unsigned int pixclock_ps,
struct pixclock_div *div)
{
u8 m, n, p;
unsigned int err = 0;
unsigned int min_err = ~0x0;
unsigned int clk;
unsigned int best_clk = 0;
unsigned int ref_clk = 13000; /* FIXME: take from platform data */
unsigned int pixclock;
/* convert pixclock to KHz */
pixclock = PICOS2KHZ(pixclock_ps);
/* PLL output freq = (ref_clk * M) / (N * 2^P)
*
* M: 1 to 63
* N: 1 to 7
* P: 0 to 7
*/
/* RAPH: When N==1, the resulting pixel clock appears to
* get divided by 2. Preventing N=1 by starting the following
* loop at 2 prevents this. Is this a bug with my chip
* revision or something I dont understand? */
for (m = 1; m < 64; m++) {
for (n = 2; n < 8; n++) {
for (p = 0; p < 8; p++) {
clk = (ref_clk * m) / (n * (1 << p));
err = (clk > pixclock) ? (clk - pixclock) :
(pixclock - clk);
if (err < min_err) {
min_err = err;
best_clk = clk;
div->m = m;
div->n = n;
div->p = p;
}
}
}
}
return KHZ2PICOS(best_clk);
}
static int mbxfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int trans, struct fb_info *info)
{
u32 val, ret = 1;
if (regno < MAX_PALETTES) {
u32 *pal = info->pseudo_palette;
val = (red & 0xf800) | ((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
pal[regno] = val;
ret = 0;
}
return ret;
}
static int mbxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct pixclock_div div;
var->pixclock = mbxfb_get_pixclock(var->pixclock, &div);
if (var->xres < MIN_XRES)
var->xres = MIN_XRES;
if (var->yres < MIN_YRES)
var->yres = MIN_YRES;
if (var->xres > MAX_XRES)
return -EINVAL;
if (var->yres > MAX_YRES)
return -EINVAL;
var->xres_virtual = max(var->xres_virtual, var->xres);
var->yres_virtual = max(var->yres_virtual, var->yres);
switch (var->bits_per_pixel) {
/* 8 bits-per-pixel is not supported yet */
case 8:
return -EINVAL;
case 16:
var->green.length = (var->green.length == 5) ? 5 : 6;
var->red.length = 5;
var->blue.length = 5;
var->transp.length = 6 - var->green.length;
var->blue.offset = 0;
var->green.offset = 5;
var->red.offset = 5 + var->green.length;
var->transp.offset = (5 + var->red.offset) & 15;
break;
case 24: /* RGB 888 */
case 32: /* RGBA 8888 */
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.length = var->bits_per_pixel - 24;
var->transp.offset = (var->transp.length) ? 24 : 0;
break;
}
var->red.msb_right = 0;
var->green.msb_right = 0;
var->blue.msb_right = 0;
var->transp.msb_right = 0;
return 0;
}
static int mbxfb_set_par(struct fb_info *info)
{
struct fb_var_screeninfo *var = &info->var;
struct pixclock_div div;
ushort hbps, ht, hfps, has;
ushort vbps, vt, vfps, vas;
u32 gsctrl = readl(GSCTRL);
u32 gsadr = readl(GSADR);
info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8;
/* setup color mode */
gsctrl &= ~(FMsk(GSCTRL_GPIXFMT));
/* FIXME: add *WORKING* support for 8-bits per color */
if (info->var.bits_per_pixel == 8) {
return -EINVAL;
} else {
fb_dealloc_cmap(&info->cmap);
gsctrl &= ~GSCTRL_LUT_EN;
info->fix.visual = FB_VISUAL_TRUECOLOR;
switch (info->var.bits_per_pixel) {
case 16:
if (info->var.green.length == 5)
gsctrl |= GSCTRL_GPIXFMT_ARGB1555;
else
gsctrl |= GSCTRL_GPIXFMT_RGB565;
break;
case 24:
gsctrl |= GSCTRL_GPIXFMT_RGB888;
break;
case 32:
gsctrl |= GSCTRL_GPIXFMT_ARGB8888;
break;
}
}
/* setup resolution */
gsctrl &= ~(FMsk(GSCTRL_GSWIDTH) | FMsk(GSCTRL_GSHEIGHT));
gsctrl |= Gsctrl_Width(info->var.xres) |
Gsctrl_Height(info->var.yres);
write_reg_dly(gsctrl, GSCTRL);
gsadr &= ~(FMsk(GSADR_SRCSTRIDE));
gsadr |= Gsadr_Srcstride(info->var.xres * info->var.bits_per_pixel /
(8 * 16) - 1);
write_reg_dly(gsadr, GSADR);
/* setup timings */
var->pixclock = mbxfb_get_pixclock(info->var.pixclock, &div);
write_reg_dly((Disp_Pll_M(div.m) | Disp_Pll_N(div.n) |
Disp_Pll_P(div.p) | DISP_PLL_EN), DISPPLL);
hbps = var->hsync_len;
has = hbps + var->left_margin;
hfps = has + var->xres;
ht = hfps + var->right_margin;
vbps = var->vsync_len;
vas = vbps + var->upper_margin;
vfps = vas + var->yres;
vt = vfps + var->lower_margin;
write_reg_dly((Dht01_Hbps(hbps) | Dht01_Ht(ht)), DHT01);
write_reg_dly((Dht02_Hlbs(has) | Dht02_Has(has)), DHT02);
write_reg_dly((Dht03_Hfps(hfps) | Dht03_Hrbs(hfps)), DHT03);
write_reg_dly((Dhdet_Hdes(has) | Dhdet_Hdef(hfps)), DHDET);
write_reg_dly((Dvt01_Vbps(vbps) | Dvt01_Vt(vt)), DVT01);
write_reg_dly((Dvt02_Vtbs(vas) | Dvt02_Vas(vas)), DVT02);
write_reg_dly((Dvt03_Vfps(vfps) | Dvt03_Vbbs(vfps)), DVT03);
write_reg_dly((Dvdet_Vdes(vas) | Dvdet_Vdef(vfps)), DVDET);
write_reg_dly((Dvectrl_Vevent(vfps) | Dvectrl_Vfetch(vbps)), DVECTRL);
write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL);
write_reg_dly(DINTRE_VEVENT0_EN, DINTRE);
return 0;
}
static int mbxfb_blank(int blank, struct fb_info *info)
{
switch (blank) {
case FB_BLANK_POWERDOWN:
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_NORMAL:
write_reg_dly((readl(DSCTRL) & ~DSCTRL_SYNCGEN_EN), DSCTRL);
write_reg_dly((readl(PIXCLK) & ~PIXCLK_EN), PIXCLK);
write_reg_dly((readl(VOVRCLK) & ~VOVRCLK_EN), VOVRCLK);
break;
case FB_BLANK_UNBLANK:
write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL);
write_reg_dly((readl(PIXCLK) | PIXCLK_EN), PIXCLK);
break;
}
return 0;
}
static int mbxfb_setupOverlay(struct mbxfb_overlaySetup *set)
{
u32 vsctrl, vscadr, vsadr;
u32 sssize, spoctrl, shctrl;
u32 vubase, vvbase;
u32 vovrclk;
if (set->scaled_width==0 || set->scaled_height==0)
return -EINVAL;
/* read registers which have reserved bits
* so we can write them back as-is. */
vovrclk = readl(VOVRCLK);
vsctrl = readl(VSCTRL);
vscadr = readl(VSCADR);
vubase = readl(VUBASE);
vvbase = readl(VVBASE);
shctrl = readl(SHCTRL);
spoctrl = readl(SPOCTRL);
sssize = readl(SSSIZE);
vsctrl &= ~( FMsk(VSCTRL_VSWIDTH) |
FMsk(VSCTRL_VSHEIGHT) |
FMsk(VSCTRL_VPIXFMT) |
VSCTRL_GAMMA_EN | VSCTRL_CSC_EN |
VSCTRL_COSITED );
vsctrl |= Vsctrl_Width(set->width) | Vsctrl_Height(set->height) |
VSCTRL_CSC_EN;
vscadr &= ~(VSCADR_STR_EN | FMsk(VSCADR_VBASE_ADR) );
vubase &= ~(VUBASE_UVHALFSTR | FMsk(VUBASE_UBASE_ADR));
vvbase &= ~(FMsk(VVBASE_VBASE_ADR));
switch (set->fmt) {
case MBXFB_FMT_YUV16:
vsctrl |= VSCTRL_VPIXFMT_YUV12;
set->Y_stride = ((set->width) + 0xf ) & ~0xf;
break;
case MBXFB_FMT_YUV12:
vsctrl |= VSCTRL_VPIXFMT_YUV12;
set->Y_stride = ((set->width) + 0xf ) & ~0xf;
vubase |= VUBASE_UVHALFSTR;
break;
case MBXFB_FMT_UY0VY1:
vsctrl |= VSCTRL_VPIXFMT_UY0VY1;
set->Y_stride = (set->width*2 + 0xf ) & ~0xf;
break;
case MBXFB_FMT_VY0UY1:
vsctrl |= VSCTRL_VPIXFMT_VY0UY1;
set->Y_stride = (set->width*2 + 0xf ) & ~0xf;
break;
case MBXFB_FMT_Y0UY1V:
vsctrl |= VSCTRL_VPIXFMT_Y0UY1V;
set->Y_stride = (set->width*2 + 0xf ) & ~0xf;
break;
case MBXFB_FMT_Y0VY1U:
vsctrl |= VSCTRL_VPIXFMT_Y0VY1U;
set->Y_stride = (set->width*2 + 0xf ) & ~0xf;
break;
default:
return -EINVAL;
}
/* VSCTRL has the bits which sets the Video Pixel Format.
* When passing from a packed to planar format,
* if we write VSCTRL first, VVBASE and VUBASE would
* be zero if we would not set them here. (And then,
* the chips hangs and only a reset seems to fix it).
*
* If course, the values calculated here have no meaning
* for packed formats.
*/
set->UV_stride = ((set->width/2) + 0x7 ) & ~0x7;
set->U_offset = set->height * set->Y_stride;
set->V_offset = set->U_offset +
set->height * set->UV_stride;
vubase |= Vubase_Ubase_Adr(
(0x60000 + set->mem_offset + set->U_offset)>>3);
vvbase |= Vvbase_Vbase_Adr(
(0x60000 + set->mem_offset + set->V_offset)>>3);
vscadr |= Vscadr_Vbase_Adr((0x60000 + set->mem_offset)>>4);
if (set->enable)
vscadr |= VSCADR_STR_EN;
vsadr = Vsadr_Srcstride((set->Y_stride)/16-1) |
Vsadr_Xstart(set->x) | Vsadr_Ystart(set->y);
sssize &= ~(FMsk(SSSIZE_SC_WIDTH) | FMsk(SSSIZE_SC_HEIGHT));
sssize = Sssize_Sc_Width(set->scaled_width-1) |
Sssize_Sc_Height(set->scaled_height-1);
spoctrl &= ~(SPOCTRL_H_SC_BP | SPOCTRL_V_SC_BP |
SPOCTRL_HV_SC_OR | SPOCTRL_VS_UR_C |
FMsk(SPOCTRL_VPITCH));
spoctrl |= Spoctrl_Vpitch((set->height<<11)/set->scaled_height);
/* Bypass horiz/vert scaler when same size */
if (set->scaled_width == set->width)
spoctrl |= SPOCTRL_H_SC_BP;
if (set->scaled_height == set->height)
spoctrl |= SPOCTRL_V_SC_BP;
shctrl &= ~(FMsk(SHCTRL_HPITCH) | SHCTRL_HDECIM);
shctrl |= Shctrl_Hpitch((set->width<<11)/set->scaled_width);
/* Video plane registers */
write_reg(vsctrl, VSCTRL);
write_reg(vscadr, VSCADR);
write_reg(vubase, VUBASE);
write_reg(vvbase, VVBASE);
write_reg(vsadr, VSADR);
/* Video scaler registers */
write_reg(sssize, SSSIZE);
write_reg(spoctrl, SPOCTRL);
write_reg(shctrl, SHCTRL);
/* Clock */
if (set->enable)
vovrclk |= 1;
else
vovrclk &= ~1;
write_reg(vovrclk, VOVRCLK);
return 0;
}
static int mbxfb_ioctl_planeorder(struct mbxfb_planeorder *porder)
{
unsigned long gscadr, vscadr;
if (porder->bottom == porder->top)
return -EINVAL;
gscadr = readl(GSCADR);
vscadr = readl(VSCADR);
gscadr &= ~(FMsk(GSCADR_BLEND_POS));
vscadr &= ~(FMsk(VSCADR_BLEND_POS));
switch (porder->bottom) {
case MBXFB_PLANE_GRAPHICS:
gscadr |= GSCADR_BLEND_GFX;
break;
case MBXFB_PLANE_VIDEO:
vscadr |= VSCADR_BLEND_GFX;
break;
default:
return -EINVAL;
}
switch (porder->top) {
case MBXFB_PLANE_GRAPHICS:
gscadr |= GSCADR_BLEND_VID;
break;
case MBXFB_PLANE_VIDEO:
vscadr |= GSCADR_BLEND_VID;
break;
default:
return -EINVAL;
}
write_reg_dly(vscadr, VSCADR);
write_reg_dly(gscadr, GSCADR);
return 0;
}
static int mbxfb_ioctl_alphactl(struct mbxfb_alphaCtl *alpha)
{
unsigned long vscadr, vbbase, vcmsk;
unsigned long gscadr, gbbase, gdrctrl;
vbbase = Vbbase_Glalpha(alpha->overlay_global_alpha) |
Vbbase_Colkey(alpha->overlay_colorkey);
gbbase = Gbbase_Glalpha(alpha->graphics_global_alpha) |
Gbbase_Colkey(alpha->graphics_colorkey);
vcmsk = readl(VCMSK);
vcmsk &= ~(FMsk(VCMSK_COLKEY_M));
vcmsk |= Vcmsk_colkey_m(alpha->overlay_colorkey_mask);
gdrctrl = readl(GDRCTRL);
gdrctrl &= ~(FMsk(GDRCTRL_COLKEYM));
gdrctrl |= Gdrctrl_Colkeym(alpha->graphics_colorkey_mask);
vscadr = readl(VSCADR);
vscadr &= ~(FMsk(VSCADR_BLEND_M) | VSCADR_COLKEYSRC | VSCADR_COLKEY_EN);
gscadr = readl(GSCADR);
gscadr &= ~(FMsk(GSCADR_BLEND_M) | GSCADR_COLKEY_EN | GSCADR_COLKEYSRC);
switch (alpha->overlay_colorkey_mode) {
case MBXFB_COLORKEY_DISABLED:
break;
case MBXFB_COLORKEY_PREVIOUS:
vscadr |= VSCADR_COLKEY_EN;
break;
case MBXFB_COLORKEY_CURRENT:
vscadr |= VSCADR_COLKEY_EN | VSCADR_COLKEYSRC;
break;
default:
return -EINVAL;
}
switch (alpha->overlay_blend_mode) {
case MBXFB_ALPHABLEND_NONE:
vscadr |= VSCADR_BLEND_NONE;
break;
case MBXFB_ALPHABLEND_GLOBAL:
vscadr |= VSCADR_BLEND_GLOB;
break;
case MBXFB_ALPHABLEND_PIXEL:
vscadr |= VSCADR_BLEND_PIX;
break;
default:
return -EINVAL;
}
switch (alpha->graphics_colorkey_mode) {
case MBXFB_COLORKEY_DISABLED:
break;
case MBXFB_COLORKEY_PREVIOUS:
gscadr |= GSCADR_COLKEY_EN;
break;
case MBXFB_COLORKEY_CURRENT:
gscadr |= GSCADR_COLKEY_EN | GSCADR_COLKEYSRC;
break;
default:
return -EINVAL;
}
switch (alpha->graphics_blend_mode) {
case MBXFB_ALPHABLEND_NONE:
gscadr |= GSCADR_BLEND_NONE;
break;
case MBXFB_ALPHABLEND_GLOBAL:
gscadr |= GSCADR_BLEND_GLOB;
break;
case MBXFB_ALPHABLEND_PIXEL:
gscadr |= GSCADR_BLEND_PIX;
break;
default:
return -EINVAL;
}
write_reg_dly(vbbase, VBBASE);
write_reg_dly(gbbase, GBBASE);
write_reg_dly(vcmsk, VCMSK);
write_reg_dly(gdrctrl, GDRCTRL);
write_reg_dly(gscadr, GSCADR);
write_reg_dly(vscadr, VSCADR);
return 0;
}
static int mbxfb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct mbxfb_overlaySetup setup;
struct mbxfb_planeorder porder;
struct mbxfb_alphaCtl alpha;
struct mbxfb_reg reg;
int res;
__u32 tmp;
switch (cmd)
{
case MBXFB_IOCX_OVERLAY:
if (copy_from_user(&setup, (void __user*)arg,
sizeof(struct mbxfb_overlaySetup)))
return -EFAULT;
res = mbxfb_setupOverlay(&setup);
if (res)
return res;
if (copy_to_user((void __user*)arg, &setup,
sizeof(struct mbxfb_overlaySetup)))
return -EFAULT;
return 0;
case MBXFB_IOCS_PLANEORDER:
if (copy_from_user(&porder, (void __user*)arg,
sizeof(struct mbxfb_planeorder)))
return -EFAULT;
return mbxfb_ioctl_planeorder(&porder);
case MBXFB_IOCS_ALPHA:
if (copy_from_user(&alpha, (void __user*)arg,
sizeof(struct mbxfb_alphaCtl)))
return -EFAULT;
return mbxfb_ioctl_alphactl(&alpha);
case MBXFB_IOCS_REG:
if (copy_from_user(®, (void __user*)arg,
sizeof(struct mbxfb_reg)))
return -EFAULT;
if (reg.addr >= 0x10000) /* regs are from 0x3fe0000 to 0x3feffff */
return -EINVAL;
tmp = readl(virt_base_2700 + reg.addr);
tmp &= ~reg.mask;
tmp |= reg.val & reg.mask;
writel(tmp, virt_base_2700 + reg.addr);
return 0;
case MBXFB_IOCX_REG:
if (copy_from_user(®, (void __user*)arg,
sizeof(struct mbxfb_reg)))
return -EFAULT;
if (reg.addr >= 0x10000) /* regs are from 0x3fe0000 to 0x3feffff */
return -EINVAL;
reg.val = readl(virt_base_2700 + reg.addr);
if (copy_to_user((void __user*)arg, ®,
sizeof(struct mbxfb_reg)))
return -EFAULT;
return 0;
}
return -EINVAL;
}
static struct fb_ops mbxfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = mbxfb_check_var,
.fb_set_par = mbxfb_set_par,
.fb_setcolreg = mbxfb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_blank = mbxfb_blank,
.fb_ioctl = mbxfb_ioctl,
};
/*
Enable external SDRAM controller. Assume that all clocks are active
by now.
*/
static void __devinit setup_memc(struct fb_info *fbi)
{
unsigned long tmp;
int i;
/* FIXME: use platform specific parameters */
/* setup SDRAM controller */
write_reg_dly((LMCFG_LMC_DS | LMCFG_LMC_TS | LMCFG_LMD_TS |
LMCFG_LMA_TS),
LMCFG);
write_reg_dly(LMPWR_MC_PWR_ACT, LMPWR);
/* setup SDRAM timings */
write_reg_dly((Lmtim_Tras(7) | Lmtim_Trp(3) | Lmtim_Trcd(3) |
Lmtim_Trc(9) | Lmtim_Tdpl(2)),
LMTIM);
/* setup SDRAM refresh rate */
write_reg_dly(0xc2b, LMREFRESH);
/* setup SDRAM type parameters */
write_reg_dly((LMTYPE_CASLAT_3 | LMTYPE_BKSZ_2 | LMTYPE_ROWSZ_11 |
LMTYPE_COLSZ_8),
LMTYPE);
/* enable memory controller */
write_reg_dly(LMPWR_MC_PWR_ACT, LMPWR);
/* perform dummy reads */
for ( i = 0; i < 16; i++ ) {
tmp = readl(fbi->screen_base);
}
}
static void enable_clocks(struct fb_info *fbi)
{
/* enable clocks */
write_reg_dly(SYSCLKSRC_PLL_2, SYSCLKSRC);
write_reg_dly(PIXCLKSRC_PLL_1, PIXCLKSRC);
write_reg_dly(0x00000000, CLKSLEEP);
/* PLL output = (Frefclk * M) / (N * 2^P )
*
* M: 0x17, N: 0x3, P: 0x0 == 100 Mhz!
* M: 0xb, N: 0x1, P: 0x1 == 71 Mhz
* */
write_reg_dly((Core_Pll_M(0xb) | Core_Pll_N(0x1) | Core_Pll_P(0x1) |
CORE_PLL_EN),
COREPLL);
write_reg_dly((Disp_Pll_M(0x1b) | Disp_Pll_N(0x7) | Disp_Pll_P(0x1) |
DISP_PLL_EN),
DISPPLL);
write_reg_dly(0x00000000, VOVRCLK);
write_reg_dly(PIXCLK_EN, PIXCLK);
write_reg_dly(MEMCLK_EN, MEMCLK);
write_reg_dly(0x00000001, M24CLK);
write_reg_dly(0x00000001, MBXCLK);
write_reg_dly(SDCLK_EN, SDCLK);
write_reg_dly(0x00000001, PIXCLKDIV);
}
static void __devinit setup_graphics(struct fb_info *fbi)
{
unsigned long gsctrl;
unsigned long vscadr;
gsctrl = GSCTRL_GAMMA_EN | Gsctrl_Width(fbi->var.xres) |
Gsctrl_Height(fbi->var.yres);
switch (fbi->var.bits_per_pixel) {
case 16:
if (fbi->var.green.length == 5)
gsctrl |= GSCTRL_GPIXFMT_ARGB1555;
else
gsctrl |= GSCTRL_GPIXFMT_RGB565;
break;
case 24:
gsctrl |= GSCTRL_GPIXFMT_RGB888;
break;
case 32:
gsctrl |= GSCTRL_GPIXFMT_ARGB8888;
break;
}
write_reg_dly(gsctrl, GSCTRL);
write_reg_dly(0x00000000, GBBASE);
write_reg_dly(0x00ffffff, GDRCTRL);
write_reg_dly((GSCADR_STR_EN | Gscadr_Gbase_Adr(0x6000)), GSCADR);
write_reg_dly(0x00000000, GPLUT);
vscadr = readl(VSCADR);
vscadr &= ~(FMsk(VSCADR_BLEND_POS) | FMsk(VSCADR_BLEND_M));
vscadr |= VSCADR_BLEND_VID | VSCADR_BLEND_NONE;
write_reg_dly(vscadr, VSCADR);
}
static void __devinit setup_display(struct fb_info *fbi)
{
unsigned long dsctrl = 0;
dsctrl = DSCTRL_BLNK_POL;
if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT)
dsctrl |= DSCTRL_HS_POL;
if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT)
dsctrl |= DSCTRL_VS_POL;
write_reg_dly(dsctrl, DSCTRL);
write_reg_dly(0xd0303010, DMCTRL);
write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL);
}
static void __devinit enable_controller(struct fb_info *fbi)
{
u32 svctrl, shctrl;
write_reg_dly(SYSRST_RST, SYSRST);
/* setup a timeout, raise drive strength */
write_reg_dly(0xffffff0c, SYSCFG);
enable_clocks(fbi);
setup_memc(fbi);
setup_graphics(fbi);
setup_display(fbi);
shctrl = readl(SHCTRL);
shctrl &= ~(FMsk(SHCTRL_HINITIAL));
shctrl |= Shctrl_Hinitial(4<<11);
writel(shctrl, SHCTRL);
svctrl = Svctrl_Initial1(1<<10) | Svctrl_Initial2(1<<10);
writel(svctrl, SVCTRL);
writel(SPOCTRL_H_SC_BP | SPOCTRL_V_SC_BP | SPOCTRL_VORDER_4TAP
, SPOCTRL);
/* Those coefficients are good for scaling up. For scaling
* down, the application has to calculate them. */
write_reg(0xff000100, VSCOEFF0);
write_reg(0xfdfcfdfe, VSCOEFF1);
write_reg(0x170d0500, VSCOEFF2);
write_reg(0x3d372d22, VSCOEFF3);
write_reg(0x00000040, VSCOEFF4);
write_reg(0xff010100, HSCOEFF0);
write_reg(0x00000000, HSCOEFF1);
write_reg(0x02010000, HSCOEFF2);
write_reg(0x01020302, HSCOEFF3);
write_reg(0xf9fbfe00, HSCOEFF4);
write_reg(0xfbf7f6f7, HSCOEFF5);
write_reg(0x1c110700, HSCOEFF6);
write_reg(0x3e393127, HSCOEFF7);
write_reg(0x00000040, HSCOEFF8);
}
#ifdef CONFIG_PM
/*
* Power management hooks. Note that we won't be called from IRQ context,
* unlike the blank functions above, so we may sleep.
*/
static int mbxfb_suspend(struct platform_device *dev, pm_message_t state)
{
/* make frame buffer memory enter self-refresh mode */
write_reg_dly(LMPWR_MC_PWR_SRM, LMPWR);
while (readl(LMPWRSTAT) != LMPWRSTAT_MC_PWR_SRM)
; /* empty statement */
/* reset the device, since it's initial state is 'mostly sleeping' */
write_reg_dly(SYSRST_RST, SYSRST);
return 0;
}
static int mbxfb_resume(struct platform_device *dev)
{
struct fb_info *fbi = platform_get_drvdata(dev);
enable_clocks(fbi);
/* setup_graphics(fbi); */
/* setup_display(fbi); */
write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL);
return 0;
}
#else
#define mbxfb_suspend NULL
#define mbxfb_resume NULL
#endif
/* debugfs entries */
#ifndef CONFIG_FB_MBX_DEBUG
#define mbxfb_debugfs_init(x) do {} while(0)
#define mbxfb_debugfs_remove(x) do {} while(0)
#endif
#define res_size(_r) (((_r)->end - (_r)->start) + 1)
static int __devinit mbxfb_probe(struct platform_device *dev)
{
int ret;
struct fb_info *fbi;
struct mbxfb_info *mfbi;
struct mbxfb_platform_data *pdata;
dev_dbg(&dev->dev, "mbxfb_probe\n");
pdata = dev->dev.platform_data;
if (!pdata) {
dev_err(&dev->dev, "platform data is required\n");
return -EINVAL;
}
fbi = framebuffer_alloc(sizeof(struct mbxfb_info), &dev->dev);
if (fbi == NULL) {
dev_err(&dev->dev, "framebuffer_alloc failed\n");
return -ENOMEM;
}
mfbi = fbi->par;
fbi->pseudo_palette = mfbi->pseudo_palette;
if (pdata->probe)
mfbi->platform_probe = pdata->probe;
if (pdata->remove)
mfbi->platform_remove = pdata->remove;
mfbi->fb_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
mfbi->reg_res = platform_get_resource(dev, IORESOURCE_MEM, 1);
if (!mfbi->fb_res || !mfbi->reg_res) {
dev_err(&dev->dev, "no resources found\n");
ret = -ENODEV;
goto err1;
}
mfbi->fb_req = request_mem_region(mfbi->fb_res->start,
res_size(mfbi->fb_res), dev->name);
if (mfbi->fb_req == NULL) {
dev_err(&dev->dev, "failed to claim framebuffer memory\n");
ret = -EINVAL;
goto err1;
}
mfbi->fb_phys_addr = mfbi->fb_res->start;
mfbi->reg_req = request_mem_region(mfbi->reg_res->start,
res_size(mfbi->reg_res), dev->name);
if (mfbi->reg_req == NULL) {
dev_err(&dev->dev, "failed to claim Marathon registers\n");
ret = -EINVAL;
goto err2;
}
mfbi->reg_phys_addr = mfbi->reg_res->start;
mfbi->reg_virt_addr = ioremap_nocache(mfbi->reg_phys_addr,
res_size(mfbi->reg_req));
if (!mfbi->reg_virt_addr) {
dev_err(&dev->dev, "failed to ioremap Marathon registers\n");
ret = -EINVAL;
goto err3;
}
virt_base_2700 = mfbi->reg_virt_addr;
mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr,
res_size(mfbi->fb_req));
if (!mfbi->reg_virt_addr) {
dev_err(&dev->dev, "failed to ioremap frame buffer\n");
ret = -EINVAL;
goto err4;
}
fbi->screen_base = (char __iomem *)(mfbi->fb_virt_addr + 0x60000);
fbi->screen_size = pdata->memsize;
fbi->fbops = &mbxfb_ops;
fbi->var = mbxfb_default;
fbi->fix = mbxfb_fix;
fbi->fix.smem_start = mfbi->fb_phys_addr + 0x60000;
fbi->fix.smem_len = pdata->memsize;
fbi->fix.line_length = mbxfb_default.xres_virtual *
mbxfb_default.bits_per_pixel / 8;
ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
if (ret < 0) {
dev_err(&dev->dev, "fb_alloc_cmap failed\n");
ret = -EINVAL;
goto err5;
}
platform_set_drvdata(dev, fbi);
printk(KERN_INFO "fb%d: mbx frame buffer device\n", fbi->node);
if (mfbi->platform_probe)
mfbi->platform_probe(fbi);
enable_controller(fbi);
mbxfb_debugfs_init(fbi);
ret = register_framebuffer(fbi);
if (ret < 0) {
dev_err(&dev->dev, "register_framebuffer failed\n");
ret = -EINVAL;
goto err6;
}
return 0;
err6:
fb_dealloc_cmap(&fbi->cmap);
err5:
iounmap(mfbi->fb_virt_addr);
err4:
iounmap(mfbi->reg_virt_addr);
err3:
release_mem_region(mfbi->reg_res->start, res_size(mfbi->reg_res));
err2:
release_mem_region(mfbi->fb_res->start, res_size(mfbi->fb_res));
err1:
framebuffer_release(fbi);
return ret;
}
static int __devexit mbxfb_remove(struct platform_device *dev)
{
struct fb_info *fbi = platform_get_drvdata(dev);
write_reg_dly(SYSRST_RST, SYSRST);
mbxfb_debugfs_remove(fbi);
if (fbi) {
struct mbxfb_info *mfbi = fbi->par;
unregister_framebuffer(fbi);
if (mfbi) {
if (mfbi->platform_remove)
mfbi->platform_remove(fbi);
if (mfbi->fb_virt_addr)
iounmap(mfbi->fb_virt_addr);
if (mfbi->reg_virt_addr)
iounmap(mfbi->reg_virt_addr);
if (mfbi->reg_req)
release_mem_region(mfbi->reg_req->start,
res_size(mfbi->reg_req));
if (mfbi->fb_req)
release_mem_region(mfbi->fb_req->start,
res_size(mfbi->fb_req));
}
framebuffer_release(fbi);
}
return 0;
}
static struct platform_driver mbxfb_driver = {
.probe = mbxfb_probe,
.remove = mbxfb_remove,
.suspend = mbxfb_suspend,
.resume = mbxfb_resume,
.driver = {
.name = "mbx-fb",
},
};
module_platform_driver(mbxfb_driver);
MODULE_DESCRIPTION("loadable framebuffer driver for Marathon device");
MODULE_AUTHOR("Mike Rapoport, Compulab");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Klozz/android_kernel_motorola_msm8226-old | drivers/staging/comedi/drivers/amplc_pci230.c | 4896 | 94645 | /*
comedi/drivers/amplc_pci230.c
Driver for Amplicon PCI230 and PCI260 Multifunction I/O boards.
Copyright (C) 2001 Allan Willcox <allanwillcox@ozemail.com.au>
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 2000 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: amplc_pci230
Description: Amplicon PCI230, PCI260 Multifunction I/O boards
Author: Allan Willcox <allanwillcox@ozemail.com.au>,
Steve D Sharples <steve.sharples@nottingham.ac.uk>,
Ian Abbott <abbotti@mev.co.uk>
Updated: Wed, 22 Oct 2008 12:34:49 +0100
Devices: [Amplicon] PCI230 (pci230 or amplc_pci230),
PCI230+ (pci230+ or amplc_pci230),
PCI260 (pci260 or amplc_pci230), PCI260+ (pci260+ or amplc_pci230)
Status: works
Configuration options:
[0] - PCI bus of device (optional).
[1] - PCI slot of device (optional).
If bus/slot is not specified, the first available PCI device
will be used.
Configuring a "amplc_pci230" will match any supported card and it will
choose the best match, picking the "+" models if possible. Configuring
a "pci230" will match a PCI230 or PCI230+ card and it will be treated as
a PCI230. Configuring a "pci260" will match a PCI260 or PCI260+ card
and it will be treated as a PCI260. Configuring a "pci230+" will match
a PCI230+ card. Configuring a "pci260+" will match a PCI260+ card.
Subdevices:
PCI230(+) PCI260(+)
--------- ---------
Subdevices 3 1
0 AI AI
1 AO
2 DIO
AI Subdevice:
The AI subdevice has 16 single-ended channels or 8 differential
channels.
The PCI230 and PCI260 cards have 12-bit resolution. The PCI230+ and
PCI260+ cards have 16-bit resolution.
For differential mode, use inputs 2N and 2N+1 for channel N (e.g. use
inputs 14 and 15 for channel 7). If the card is physically a PCI230
or PCI260 then it actually uses a "pseudo-differential" mode where the
inputs are sampled a few microseconds apart. The PCI230+ and PCI260+
use true differential sampling. Another difference is that if the
card is physically a PCI230 or PCI260, the inverting input is 2N,
whereas for a PCI230+ or PCI260+ the inverting input is 2N+1. So if a
PCI230 is physically replaced by a PCI230+ (or a PCI260 with a
PCI260+) and differential mode is used, the differential inputs need
to be physically swapped on the connector.
The following input ranges are supported:
0 => [-10, +10] V
1 => [-5, +5] V
2 => [-2.5, +2.5] V
3 => [-1.25, +1.25] V
4 => [0, 10] V
5 => [0, 5] V
6 => [0, 2.5] V
AI Commands:
+=========+==============+===========+============+==========+
|start_src|scan_begin_src|convert_src|scan_end_src| stop_src |
+=========+==============+===========+============+==========+
|TRIG_NOW | TRIG_FOLLOW |TRIG_TIMER | TRIG_COUNT |TRIG_NONE |
|TRIG_INT | |TRIG_EXT(3)| |TRIG_COUNT|
| | |TRIG_INT | | |
| |--------------|-----------| | |
| | TRIG_TIMER(1)|TRIG_TIMER | | |
| | TRIG_EXT(2) | | | |
| | TRIG_INT | | | |
+---------+--------------+-----------+------------+----------+
Note 1: If AI command and AO command are used simultaneously, only
one may have scan_begin_src == TRIG_TIMER.
Note 2: For PCI230 and PCI230+, scan_begin_src == TRIG_EXT uses
DIO channel 16 (pin 49) which will need to be configured as
a digital input. For PCI260+, the EXTTRIG/EXTCONVCLK input
(pin 17) is used instead. For PCI230, scan_begin_src ==
TRIG_EXT is not supported. The trigger is a rising edge
on the input.
Note 3: For convert_src == TRIG_EXT, the EXTTRIG/EXTCONVCLK input
(pin 25 on PCI230(+), pin 17 on PCI260(+)) is used. The
convert_arg value is interpreted as follows:
convert_arg == (CR_EDGE | 0) => rising edge
convert_arg == (CR_EDGE | CR_INVERT | 0) => falling edge
convert_arg == 0 => falling edge (backwards compatibility)
convert_arg == 1 => rising edge (backwards compatibility)
All entries in the channel list must use the same analogue reference.
If the analogue reference is not AREF_DIFF (not differential) each
pair of channel numbers (0 and 1, 2 and 3, etc.) must use the same
input range. The input ranges used in the sequence must be all
bipolar (ranges 0 to 3) or all unipolar (ranges 4 to 6). The channel
sequence must consist of 1 or more identical subsequences. Within the
subsequence, channels must be in ascending order with no repeated
channels. For example, the following sequences are valid: 0 1 2 3
(single valid subsequence), 0 2 3 5 0 2 3 5 (repeated valid
subsequence), 1 1 1 1 (repeated valid subsequence). The following
sequences are invalid: 0 3 2 1 (invalid subsequence), 0 2 3 5 0 2 3
(incompletely repeated subsequence). Some versions of the PCI230+ and
PCI260+ have a bug that requires a subsequence longer than one entry
long to include channel 0.
AO Subdevice:
The AO subdevice has 2 channels with 12-bit resolution.
The following output ranges are supported:
0 => [0, 10] V
1 => [-10, +10] V
AO Commands:
+=========+==============+===========+============+==========+
|start_src|scan_begin_src|convert_src|scan_end_src| stop_src |
+=========+==============+===========+============+==========+
|TRIG_INT | TRIG_TIMER(1)| TRIG_NOW | TRIG_COUNT |TRIG_NONE |
| | TRIG_EXT(2) | | |TRIG_COUNT|
| | TRIG_INT | | | |
+---------+--------------+-----------+------------+----------+
Note 1: If AI command and AO command are used simultaneously, only
one may have scan_begin_src == TRIG_TIMER.
Note 2: scan_begin_src == TRIG_EXT is only supported if the card is
configured as a PCI230+ and is only supported on later
versions of the card. As a card configured as a PCI230+ is
not guaranteed to support external triggering, please consider
this support to be a bonus. It uses the EXTTRIG/ EXTCONVCLK
input (PCI230+ pin 25). Triggering will be on the rising edge
unless the CR_INVERT flag is set in scan_begin_arg.
The channels in the channel sequence must be in ascending order with
no repeats. All entries in the channel sequence must use the same
output range.
DIO Subdevice:
The DIO subdevice is a 8255 chip providing 24 DIO channels. The DIO
channels are configurable as inputs or outputs in four groups:
Port A - channels 0 to 7
Port B - channels 8 to 15
Port CL - channels 16 to 19
Port CH - channels 20 to 23
Only mode 0 of the 8255 chip is supported.
Bit 0 of port C (DIO channel 16) is also used as an external scan
trigger input for AI commands on PCI230 and PCI230+, so would need to
be configured as an input to use it for that purpose.
*/
/*
Extra triggered scan functionality, interrupt bug-fix added by Steve Sharples.
Support for PCI230+/260+, more triggered scan functionality, and workarounds
for (or detection of) various hardware problems added by Ian Abbott.
*/
#include "../comedidev.h"
#include <linux/delay.h>
#include <linux/interrupt.h>
#include "comedi_pci.h"
#include "8253.h"
#include "8255.h"
/* PCI230 PCI configuration register information */
#define PCI_VENDOR_ID_AMPLICON 0x14dc
#define PCI_DEVICE_ID_PCI230 0x0000
#define PCI_DEVICE_ID_PCI260 0x0006
#define PCI_DEVICE_ID_INVALID 0xffff
#define PCI230_IO1_SIZE 32 /* Size of I/O space 1 */
#define PCI230_IO2_SIZE 16 /* Size of I/O space 2 */
/* PCI230 i/o space 1 registers. */
#define PCI230_PPI_X_BASE 0x00 /* User PPI (82C55) base */
#define PCI230_PPI_X_A 0x00 /* User PPI (82C55) port A */
#define PCI230_PPI_X_B 0x01 /* User PPI (82C55) port B */
#define PCI230_PPI_X_C 0x02 /* User PPI (82C55) port C */
#define PCI230_PPI_X_CMD 0x03 /* User PPI (82C55) control word */
#define PCI230_Z2_CT_BASE 0x14 /* 82C54 counter/timer base */
#define PCI230_Z2_CT0 0x14 /* 82C54 counter/timer 0 */
#define PCI230_Z2_CT1 0x15 /* 82C54 counter/timer 1 */
#define PCI230_Z2_CT2 0x16 /* 82C54 counter/timer 2 */
#define PCI230_Z2_CTC 0x17 /* 82C54 counter/timer control word */
#define PCI230_ZCLK_SCE 0x1A /* Group Z Clock Configuration */
#define PCI230_ZGAT_SCE 0x1D /* Group Z Gate Configuration */
#define PCI230_INT_SCE 0x1E /* Interrupt source mask (w) */
#define PCI230_INT_STAT 0x1E /* Interrupt status (r) */
/* PCI230 i/o space 2 registers. */
#define PCI230_DACCON 0x00 /* DAC control */
#define PCI230_DACOUT1 0x02 /* DAC channel 0 (w) */
#define PCI230_DACOUT2 0x04 /* DAC channel 1 (w) (not FIFO mode) */
#define PCI230_ADCDATA 0x08 /* ADC data (r) */
#define PCI230_ADCSWTRIG 0x08 /* ADC software trigger (w) */
#define PCI230_ADCCON 0x0A /* ADC control */
#define PCI230_ADCEN 0x0C /* ADC channel enable bits */
#define PCI230_ADCG 0x0E /* ADC gain control bits */
/* PCI230+ i/o space 2 additional registers. */
#define PCI230P_ADCTRIG 0x10 /* ADC start acquisition trigger */
#define PCI230P_ADCTH 0x12 /* ADC analog trigger threshold */
#define PCI230P_ADCFFTH 0x14 /* ADC FIFO interrupt threshold */
#define PCI230P_ADCFFLEV 0x16 /* ADC FIFO level (r) */
#define PCI230P_ADCPTSC 0x18 /* ADC pre-trigger sample count (r) */
#define PCI230P_ADCHYST 0x1A /* ADC analog trigger hysteresys */
#define PCI230P_EXTFUNC 0x1C /* Extended functions */
#define PCI230P_HWVER 0x1E /* Hardware version (r) */
/* PCI230+ hardware version 2 onwards. */
#define PCI230P2_DACDATA 0x02 /* DAC data (FIFO mode) (w) */
#define PCI230P2_DACSWTRIG 0x02 /* DAC soft trigger (FIFO mode) (r) */
#define PCI230P2_DACEN 0x06 /* DAC channel enable (FIFO mode) */
/* Convertor related constants. */
#define PCI230_DAC_SETTLE 5 /* Analogue output settling time in µs */
/* (DAC itself is 1µs nominally). */
#define PCI230_ADC_SETTLE 1 /* Analogue input settling time in µs */
/* (ADC itself is 1.6µs nominally but we poll
* anyway). */
#define PCI230_MUX_SETTLE 10 /* ADC MUX settling time in µS */
/* - 10µs for se, 20µs de. */
/* DACCON read-write values. */
#define PCI230_DAC_OR_UNI (0<<0) /* Output range unipolar */
#define PCI230_DAC_OR_BIP (1<<0) /* Output range bipolar */
#define PCI230_DAC_OR_MASK (1<<0)
/* The following applies only if DAC FIFO support is enabled in the EXTFUNC
* register (and only for PCI230+ hardware version 2 onwards). */
#define PCI230P2_DAC_FIFO_EN (1<<8) /* FIFO enable */
/* The following apply only if the DAC FIFO is enabled (and only for PCI230+
* hardware version 2 onwards). */
#define PCI230P2_DAC_TRIG_NONE (0<<2) /* No trigger */
#define PCI230P2_DAC_TRIG_SW (1<<2) /* Software trigger trigger */
#define PCI230P2_DAC_TRIG_EXTP (2<<2) /* EXTTRIG +ve edge trigger */
#define PCI230P2_DAC_TRIG_EXTN (3<<2) /* EXTTRIG -ve edge trigger */
#define PCI230P2_DAC_TRIG_Z2CT0 (4<<2) /* CT0-OUT +ve edge trigger */
#define PCI230P2_DAC_TRIG_Z2CT1 (5<<2) /* CT1-OUT +ve edge trigger */
#define PCI230P2_DAC_TRIG_Z2CT2 (6<<2) /* CT2-OUT +ve edge trigger */
#define PCI230P2_DAC_TRIG_MASK (7<<2)
#define PCI230P2_DAC_FIFO_WRAP (1<<7) /* FIFO wraparound mode */
#define PCI230P2_DAC_INT_FIFO_EMPTY (0<<9) /* FIFO interrupt empty */
#define PCI230P2_DAC_INT_FIFO_NEMPTY (1<<9)
#define PCI230P2_DAC_INT_FIFO_NHALF (2<<9) /* FIFO intr not half full */
#define PCI230P2_DAC_INT_FIFO_HALF (3<<9)
#define PCI230P2_DAC_INT_FIFO_NFULL (4<<9) /* FIFO interrupt not full */
#define PCI230P2_DAC_INT_FIFO_FULL (5<<9)
#define PCI230P2_DAC_INT_FIFO_MASK (7<<9)
/* DACCON read-only values. */
#define PCI230_DAC_BUSY (1<<1) /* DAC busy. */
/* The following apply only if the DAC FIFO is enabled (and only for PCI230+
* hardware version 2 onwards). */
#define PCI230P2_DAC_FIFO_UNDERRUN_LATCHED (1<<5) /* Underrun error */
#define PCI230P2_DAC_FIFO_EMPTY (1<<13) /* FIFO empty */
#define PCI230P2_DAC_FIFO_FULL (1<<14) /* FIFO full */
#define PCI230P2_DAC_FIFO_HALF (1<<15) /* FIFO half full */
/* DACCON write-only, transient values. */
/* The following apply only if the DAC FIFO is enabled (and only for PCI230+
* hardware version 2 onwards). */
#define PCI230P2_DAC_FIFO_UNDERRUN_CLEAR (1<<5) /* Clear underrun */
#define PCI230P2_DAC_FIFO_RESET (1<<12) /* FIFO reset */
/* PCI230+ hardware version 2 DAC FIFO levels. */
#define PCI230P2_DAC_FIFOLEVEL_HALF 512
#define PCI230P2_DAC_FIFOLEVEL_FULL 1024
/* Free space in DAC FIFO. */
#define PCI230P2_DAC_FIFOROOM_EMPTY PCI230P2_DAC_FIFOLEVEL_FULL
#define PCI230P2_DAC_FIFOROOM_ONETOHALF \
(PCI230P2_DAC_FIFOLEVEL_FULL - PCI230P2_DAC_FIFOLEVEL_HALF)
#define PCI230P2_DAC_FIFOROOM_HALFTOFULL 1
#define PCI230P2_DAC_FIFOROOM_FULL 0
/* ADCCON read/write values. */
#define PCI230_ADC_TRIG_NONE (0<<0) /* No trigger */
#define PCI230_ADC_TRIG_SW (1<<0) /* Software trigger trigger */
#define PCI230_ADC_TRIG_EXTP (2<<0) /* EXTTRIG +ve edge trigger */
#define PCI230_ADC_TRIG_EXTN (3<<0) /* EXTTRIG -ve edge trigger */
#define PCI230_ADC_TRIG_Z2CT0 (4<<0) /* CT0-OUT +ve edge trigger */
#define PCI230_ADC_TRIG_Z2CT1 (5<<0) /* CT1-OUT +ve edge trigger */
#define PCI230_ADC_TRIG_Z2CT2 (6<<0) /* CT2-OUT +ve edge trigger */
#define PCI230_ADC_TRIG_MASK (7<<0)
#define PCI230_ADC_IR_UNI (0<<3) /* Input range unipolar */
#define PCI230_ADC_IR_BIP (1<<3) /* Input range bipolar */
#define PCI230_ADC_IR_MASK (1<<3)
#define PCI230_ADC_IM_SE (0<<4) /* Input mode single ended */
#define PCI230_ADC_IM_DIF (1<<4) /* Input mode differential */
#define PCI230_ADC_IM_MASK (1<<4)
#define PCI230_ADC_FIFO_EN (1<<8) /* FIFO enable */
#define PCI230_ADC_INT_FIFO_EMPTY (0<<9)
#define PCI230_ADC_INT_FIFO_NEMPTY (1<<9) /* FIFO interrupt not empty */
#define PCI230_ADC_INT_FIFO_NHALF (2<<9)
#define PCI230_ADC_INT_FIFO_HALF (3<<9) /* FIFO interrupt half full */
#define PCI230_ADC_INT_FIFO_NFULL (4<<9)
#define PCI230_ADC_INT_FIFO_FULL (5<<9) /* FIFO interrupt full */
#define PCI230P_ADC_INT_FIFO_THRESH (7<<9) /* FIFO interrupt threshold */
#define PCI230_ADC_INT_FIFO_MASK (7<<9)
/* ADCCON write-only, transient values. */
#define PCI230_ADC_FIFO_RESET (1<<12) /* FIFO reset */
#define PCI230_ADC_GLOB_RESET (1<<13) /* Global reset */
/* ADCCON read-only values. */
#define PCI230_ADC_BUSY (1<<15) /* ADC busy */
#define PCI230_ADC_FIFO_EMPTY (1<<12) /* FIFO empty */
#define PCI230_ADC_FIFO_FULL (1<<13) /* FIFO full */
#define PCI230_ADC_FIFO_HALF (1<<14) /* FIFO half full */
#define PCI230_ADC_FIFO_FULL_LATCHED (1<<5) /* Indicates overrun occurred */
/* PCI230 ADC FIFO levels. */
#define PCI230_ADC_FIFOLEVEL_HALFFULL 2049 /* Value for FIFO half full */
#define PCI230_ADC_FIFOLEVEL_FULL 4096 /* FIFO size */
/* Value to write to ADCSWTRIG to trigger ADC conversion in software trigger
* mode. Can be anything. */
#define PCI230_ADC_CONV 0xffff
/* PCI230+ EXTFUNC values. */
#define PCI230P_EXTFUNC_GAT_EXTTRIG (1<<0)
/* Route EXTTRIG pin to external gate inputs. */
/* PCI230+ hardware version 2 values. */
#define PCI230P2_EXTFUNC_DACFIFO (1<<1)
/* Allow DAC FIFO to be enabled. */
/*
* Counter/timer clock input configuration sources.
*/
#define CLK_CLK 0 /* reserved (channel-specific clock) */
#define CLK_10MHZ 1 /* internal 10 MHz clock */
#define CLK_1MHZ 2 /* internal 1 MHz clock */
#define CLK_100KHZ 3 /* internal 100 kHz clock */
#define CLK_10KHZ 4 /* internal 10 kHz clock */
#define CLK_1KHZ 5 /* internal 1 kHz clock */
#define CLK_OUTNM1 6 /* output of channel-1 modulo total */
#define CLK_EXT 7 /* external clock */
/* Macro to construct clock input configuration register value. */
#define CLK_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7))
/* Timebases in ns. */
#define TIMEBASE_10MHZ 100
#define TIMEBASE_1MHZ 1000
#define TIMEBASE_100KHZ 10000
#define TIMEBASE_10KHZ 100000
#define TIMEBASE_1KHZ 1000000
/*
* Counter/timer gate input configuration sources.
*/
#define GAT_VCC 0 /* VCC (i.e. enabled) */
#define GAT_GND 1 /* GND (i.e. disabled) */
#define GAT_EXT 2 /* external gate input (PPCn on PCI230) */
#define GAT_NOUTNM2 3 /* inverted output of channel-2 modulo total */
/* Macro to construct gate input configuration register value. */
#define GAT_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7))
/*
* Summary of CLK_OUTNM1 and GAT_NOUTNM2 connections for PCI230 and PCI260:
*
* Channel's Channel's
* clock input gate input
* Channel CLK_OUTNM1 GAT_NOUTNM2
* ------- ---------- -----------
* Z2-CT0 Z2-CT2-OUT /Z2-CT1-OUT
* Z2-CT1 Z2-CT0-OUT /Z2-CT2-OUT
* Z2-CT2 Z2-CT1-OUT /Z2-CT0-OUT
*/
/* Interrupt enables/status register values. */
#define PCI230_INT_DISABLE 0
#define PCI230_INT_PPI_C0 (1<<0)
#define PCI230_INT_PPI_C3 (1<<1)
#define PCI230_INT_ADC (1<<2)
#define PCI230_INT_ZCLK_CT1 (1<<5)
/* For PCI230+ hardware version 2 when DAC FIFO enabled. */
#define PCI230P2_INT_DAC (1<<4)
#define PCI230_TEST_BIT(val, n) ((val>>n)&1)
/* Assumes bits numbered with zero offset, ie. 0-15 */
/* (Potentially) shared resources and their owners */
enum {
RES_Z2CT0, /* Z2-CT0 */
RES_Z2CT1, /* Z2-CT1 */
RES_Z2CT2, /* Z2-CT2 */
NUM_RESOURCES /* Number of (potentially) shared resources. */
};
enum {
OWNER_NONE, /* Not owned */
OWNER_AICMD, /* Owned by AI command */
OWNER_AOCMD /* Owned by AO command */
};
/*
* Handy macros.
*/
/* Combine old and new bits. */
#define COMBINE(old, new, mask) (((old) & ~(mask)) | ((new) & (mask)))
/* A generic null function pointer value. */
#define NULLFUNC 0
/* Current CPU. XXX should this be hard_smp_processor_id()? */
#define THISCPU smp_processor_id()
/* State flags for atomic bit operations */
#define AI_CMD_STARTED 0
#define AO_CMD_STARTED 1
/*
* Board descriptions for the two boards supported.
*/
struct pci230_board {
const char *name;
unsigned short id;
int ai_chans;
int ai_bits;
int ao_chans;
int ao_bits;
int have_dio;
unsigned int min_hwver; /* Minimum hardware version supported. */
};
static const struct pci230_board pci230_boards[] = {
{
.name = "pci230+",
.id = PCI_DEVICE_ID_PCI230,
.ai_chans = 16,
.ai_bits = 16,
.ao_chans = 2,
.ao_bits = 12,
.have_dio = 1,
.min_hwver = 1,
},
{
.name = "pci260+",
.id = PCI_DEVICE_ID_PCI260,
.ai_chans = 16,
.ai_bits = 16,
.ao_chans = 0,
.ao_bits = 0,
.have_dio = 0,
.min_hwver = 1,
},
{
.name = "pci230",
.id = PCI_DEVICE_ID_PCI230,
.ai_chans = 16,
.ai_bits = 12,
.ao_chans = 2,
.ao_bits = 12,
.have_dio = 1,
},
{
.name = "pci260",
.id = PCI_DEVICE_ID_PCI260,
.ai_chans = 16,
.ai_bits = 12,
.ao_chans = 0,
.ao_bits = 0,
.have_dio = 0,
},
{
.name = "amplc_pci230", /* Wildcard matches any above */
.id = PCI_DEVICE_ID_INVALID,
},
};
static DEFINE_PCI_DEVICE_TABLE(pci230_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI230) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI260) },
{0}
};
MODULE_DEVICE_TABLE(pci, pci230_pci_table);
/*
* Useful for shorthand access to the particular board structure
*/
#define n_pci230_boards ARRAY_SIZE(pci230_boards)
#define thisboard ((const struct pci230_board *)dev->board_ptr)
/* this structure is for data unique to this hardware driver. If
several hardware drivers keep similar information in this structure,
feel free to suggest moving the variable to the struct comedi_device struct. */
struct pci230_private {
struct pci_dev *pci_dev;
spinlock_t isr_spinlock; /* Interrupt spin lock */
spinlock_t res_spinlock; /* Shared resources spin lock */
spinlock_t ai_stop_spinlock; /* Spin lock for stopping AI command */
spinlock_t ao_stop_spinlock; /* Spin lock for stopping AO command */
unsigned long state; /* State flags */
unsigned long iobase1; /* PCI230's I/O space 1 */
unsigned int ao_readback[2]; /* Used for AO readback */
unsigned int ai_scan_count; /* Number of analogue input scans
* remaining. */
unsigned int ai_scan_pos; /* Current position within analogue
* input scan */
unsigned int ao_scan_count; /* Number of analogue output scans
* remaining. */
int intr_cpuid; /* ID of CPU running interrupt routine. */
unsigned short hwver; /* Hardware version (for '+' models). */
unsigned short adccon; /* ADCCON register value. */
unsigned short daccon; /* DACCON register value. */
unsigned short adcfifothresh; /* ADC FIFO programmable interrupt
* level threshold (PCI230+/260+). */
unsigned short adcg; /* ADCG register value. */
unsigned char int_en; /* Interrupt enables bits. */
unsigned char ai_continuous; /* Flag set when cmd->stop_src ==
* TRIG_NONE - user chooses to stop
* continuous conversion by
* cancelation. */
unsigned char ao_continuous; /* Flag set when cmd->stop_src ==
* TRIG_NONE - user chooses to stop
* continuous conversion by
* cancelation. */
unsigned char ai_bipolar; /* Set if bipolar input range so we
* know to mangle it. */
unsigned char ao_bipolar; /* Set if bipolar output range so we
* know to mangle it. */
unsigned char ier; /* Copy of interrupt enables/status register. */
unsigned char intr_running; /* Flag set in interrupt routine. */
unsigned char res_owner[NUM_RESOURCES]; /* Shared resource owners. */
};
#define devpriv ((struct pci230_private *)dev->private)
/* PCI230 clock source periods in ns */
static const unsigned int pci230_timebase[8] = {
[CLK_10MHZ] = TIMEBASE_10MHZ,
[CLK_1MHZ] = TIMEBASE_1MHZ,
[CLK_100KHZ] = TIMEBASE_100KHZ,
[CLK_10KHZ] = TIMEBASE_10KHZ,
[CLK_1KHZ] = TIMEBASE_1KHZ,
};
/* PCI230 analogue input range table */
static const struct comedi_lrange pci230_ai_range = { 7, {
BIP_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5)
}
};
/* PCI230 analogue gain bits for each input range. */
static const unsigned char pci230_ai_gain[7] = { 0, 1, 2, 3, 1, 2, 3 };
/* PCI230 adccon bipolar flag for each analogue input range. */
static const unsigned char pci230_ai_bipolar[7] = { 1, 1, 1, 1, 0, 0, 0 };
/* PCI230 analogue output range table */
static const struct comedi_lrange pci230_ao_range = { 2, {
UNI_RANGE(10),
BIP_RANGE(10)
}
};
/* PCI230 daccon bipolar flag for each analogue output range. */
static const unsigned char pci230_ao_bipolar[2] = { 0, 1 };
/*
* The struct comedi_driver structure tells the Comedi core module
* which functions to call to configure/deconfigure (attach/detach)
* the board, and also about the kernel module that contains
* the device code.
*/
static int pci230_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int pci230_detach(struct comedi_device *dev);
static struct comedi_driver driver_amplc_pci230 = {
.driver_name = "amplc_pci230",
.module = THIS_MODULE,
.attach = pci230_attach,
.detach = pci230_detach,
.board_name = &pci230_boards[0].name,
.offset = sizeof(pci230_boards[0]),
.num_names = ARRAY_SIZE(pci230_boards),
};
static int __devinit driver_amplc_pci230_pci_probe(struct pci_dev *dev,
const struct pci_device_id
*ent)
{
return comedi_pci_auto_config(dev, driver_amplc_pci230.driver_name);
}
static void __devexit driver_amplc_pci230_pci_remove(struct pci_dev *dev)
{
comedi_pci_auto_unconfig(dev);
}
static struct pci_driver driver_amplc_pci230_pci_driver = {
.id_table = pci230_pci_table,
.probe = &driver_amplc_pci230_pci_probe,
.remove = __devexit_p(&driver_amplc_pci230_pci_remove)
};
static int __init driver_amplc_pci230_init_module(void)
{
int retval;
retval = comedi_driver_register(&driver_amplc_pci230);
if (retval < 0)
return retval;
driver_amplc_pci230_pci_driver.name =
(char *)driver_amplc_pci230.driver_name;
return pci_register_driver(&driver_amplc_pci230_pci_driver);
}
static void __exit driver_amplc_pci230_cleanup_module(void)
{
pci_unregister_driver(&driver_amplc_pci230_pci_driver);
comedi_driver_unregister(&driver_amplc_pci230);
}
module_init(driver_amplc_pci230_init_module);
module_exit(driver_amplc_pci230_cleanup_module);
static int pci230_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data);
static int pci230_ao_winsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data);
static int pci230_ao_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data);
static void pci230_ct_setup_ns_mode(struct comedi_device *dev, unsigned int ct,
unsigned int mode, uint64_t ns,
unsigned int round);
static void pci230_ns_to_single_timer(unsigned int *ns, unsigned int round);
static void pci230_cancel_ct(struct comedi_device *dev, unsigned int ct);
static irqreturn_t pci230_interrupt(int irq, void *d);
static int pci230_ao_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd);
static int pci230_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
static int pci230_ao_cancel(struct comedi_device *dev,
struct comedi_subdevice *s);
static void pci230_ao_stop(struct comedi_device *dev,
struct comedi_subdevice *s);
static void pci230_handle_ao_nofifo(struct comedi_device *dev,
struct comedi_subdevice *s);
static int pci230_handle_ao_fifo(struct comedi_device *dev,
struct comedi_subdevice *s);
static int pci230_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd);
static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
static int pci230_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s);
static void pci230_ai_stop(struct comedi_device *dev,
struct comedi_subdevice *s);
static void pci230_handle_ai(struct comedi_device *dev,
struct comedi_subdevice *s);
static short pci230_ai_read(struct comedi_device *dev)
{
/* Read sample. */
short data = (short)inw(dev->iobase + PCI230_ADCDATA);
/* PCI230 is 12 bit - stored in upper bits of 16 bit register (lower
* four bits reserved for expansion). */
/* PCI230+ is 16 bit AI. */
data = data >> (16 - thisboard->ai_bits);
/* If a bipolar range was specified, mangle it (twos
* complement->straight binary). */
if (devpriv->ai_bipolar)
data ^= 1 << (thisboard->ai_bits - 1);
return data;
}
static inline unsigned short pci230_ao_mangle_datum(struct comedi_device *dev,
short datum)
{
/* If a bipolar range was specified, mangle it (straight binary->twos
* complement). */
if (devpriv->ao_bipolar)
datum ^= 1 << (thisboard->ao_bits - 1);
/* PCI230 is 12 bit - stored in upper bits of 16 bit register (lower
* four bits reserved for expansion). */
/* PCI230+ is also 12 bit AO. */
datum <<= (16 - thisboard->ao_bits);
return (unsigned short)datum;
}
static inline void pci230_ao_write_nofifo(struct comedi_device *dev,
short datum, unsigned int chan)
{
/* Store unmangled datum to be read back later. */
devpriv->ao_readback[chan] = datum;
/* Write mangled datum to appropriate DACOUT register. */
outw(pci230_ao_mangle_datum(dev, datum), dev->iobase + (((chan) == 0)
? PCI230_DACOUT1
:
PCI230_DACOUT2));
}
static inline void pci230_ao_write_fifo(struct comedi_device *dev, short datum,
unsigned int chan)
{
/* Store unmangled datum to be read back later. */
devpriv->ao_readback[chan] = datum;
/* Write mangled datum to appropriate DACDATA register. */
outw(pci230_ao_mangle_datum(dev, datum),
dev->iobase + PCI230P2_DACDATA);
}
/*
* Attach is called by the Comedi core to configure the driver
* for a particular board. If you specified a board_name array
* in the driver structure, dev->board_ptr contains that
* address.
*/
static int pci230_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
unsigned long iobase1, iobase2;
/* PCI230's I/O spaces 1 and 2 respectively. */
struct pci_dev *pci_dev = NULL;
int i = 0, irq_hdl, rc;
printk("comedi%d: amplc_pci230: attach %s %d,%d\n", dev->minor,
thisboard->name, it->options[0], it->options[1]);
/* Allocate the private structure area using alloc_private().
* Macro defined in comedidev.h - memsets struct fields to 0. */
if ((alloc_private(dev, sizeof(struct pci230_private))) < 0)
return -ENOMEM;
spin_lock_init(&devpriv->isr_spinlock);
spin_lock_init(&devpriv->res_spinlock);
spin_lock_init(&devpriv->ai_stop_spinlock);
spin_lock_init(&devpriv->ao_stop_spinlock);
/* Find card */
for_each_pci_dev(pci_dev) {
if (it->options[0] || it->options[1]) {
/* Match against bus/slot options. */
if (it->options[0] != pci_dev->bus->number ||
it->options[1] != PCI_SLOT(pci_dev->devfn))
continue;
}
if (pci_dev->vendor != PCI_VENDOR_ID_AMPLICON)
continue;
if (thisboard->id == PCI_DEVICE_ID_INVALID) {
/* The name was specified as "amplc_pci230" which is
* used to match any supported device. Replace the
* current dev->board_ptr with one that matches the
* PCI device ID. */
for (i = 0; i < n_pci230_boards; i++) {
if (pci_dev->device == pci230_boards[i].id) {
if (pci230_boards[i].min_hwver > 0) {
/* Check for a '+' model.
* First check length of
* registers. */
if (pci_resource_len(pci_dev, 3)
< 32) {
/* Not a '+' model. */
continue;
}
/* TODO: temporarily enable the
* PCI device and read the
* hardware version register.
* For now assume it's okay. */
}
/* Change board_ptr to matched board */
dev->board_ptr = &pci230_boards[i];
break;
}
}
if (i < n_pci230_boards)
break;
} else {
/* The name was specified as a specific device name.
* The current dev->board_ptr is correct. Check
* whether it matches the PCI device ID. */
if (thisboard->id == pci_dev->device) {
/* Check minimum hardware version. */
if (thisboard->min_hwver > 0) {
/* Looking for a '+' model. First
* check length of registers. */
if (pci_resource_len(pci_dev, 3) < 32) {
/* Not a '+' model. */
continue;
}
/* TODO: temporarily enable the PCI
* device and read the hardware version
* register. For now, assume it's
* okay. */
break;
} else {
break;
}
}
}
}
if (!pci_dev) {
printk("comedi%d: No %s card found\n", dev->minor,
thisboard->name);
return -EIO;
}
devpriv->pci_dev = pci_dev;
/*
* Initialize dev->board_name.
*/
dev->board_name = thisboard->name;
/* Enable PCI device and reserve I/O spaces. */
if (comedi_pci_enable(pci_dev, "amplc_pci230") < 0) {
printk("comedi%d: failed to enable PCI device "
"and request regions\n", dev->minor);
return -EIO;
}
/* Read base addresses of the PCI230's two I/O regions from PCI
* configuration register. */
iobase1 = pci_resource_start(pci_dev, 2);
iobase2 = pci_resource_start(pci_dev, 3);
printk("comedi%d: %s I/O region 1 0x%04lx I/O region 2 0x%04lx\n",
dev->minor, dev->board_name, iobase1, iobase2);
devpriv->iobase1 = iobase1;
dev->iobase = iobase2;
/* Read bits of DACCON register - only the output range. */
devpriv->daccon = inw(dev->iobase + PCI230_DACCON) & PCI230_DAC_OR_MASK;
/* Read hardware version register and set extended function register
* if they exist. */
if (pci_resource_len(pci_dev, 3) >= 32) {
unsigned short extfunc = 0;
devpriv->hwver = inw(dev->iobase + PCI230P_HWVER);
if (devpriv->hwver < thisboard->min_hwver) {
printk("comedi%d: %s - bad hardware version "
"- got %u, need %u\n", dev->minor,
dev->board_name, devpriv->hwver,
thisboard->min_hwver);
return -EIO;
}
if (devpriv->hwver > 0) {
if (!thisboard->have_dio) {
/* No DIO ports. Route counters' external gates
* to the EXTTRIG signal (PCI260+ pin 17).
* (Otherwise, they would be routed to DIO
* inputs PC0, PC1 and PC2 which don't exist
* on PCI260[+].) */
extfunc |= PCI230P_EXTFUNC_GAT_EXTTRIG;
}
if ((thisboard->ao_chans > 0)
&& (devpriv->hwver >= 2)) {
/* Enable DAC FIFO functionality. */
extfunc |= PCI230P2_EXTFUNC_DACFIFO;
}
}
outw(extfunc, dev->iobase + PCI230P_EXTFUNC);
if ((extfunc & PCI230P2_EXTFUNC_DACFIFO) != 0) {
/* Temporarily enable DAC FIFO, reset it and disable
* FIFO wraparound. */
outw(devpriv->daccon | PCI230P2_DAC_FIFO_EN
| PCI230P2_DAC_FIFO_RESET,
dev->iobase + PCI230_DACCON);
/* Clear DAC FIFO channel enable register. */
outw(0, dev->iobase + PCI230P2_DACEN);
/* Disable DAC FIFO. */
outw(devpriv->daccon, dev->iobase + PCI230_DACCON);
}
}
/* Disable board's interrupts. */
outb(0, devpriv->iobase1 + PCI230_INT_SCE);
/* Set ADC to a reasonable state. */
devpriv->adcg = 0;
devpriv->adccon = PCI230_ADC_TRIG_NONE | PCI230_ADC_IM_SE
| PCI230_ADC_IR_BIP;
outw(1 << 0, dev->iobase + PCI230_ADCEN);
outw(devpriv->adcg, dev->iobase + PCI230_ADCG);
outw(devpriv->adccon | PCI230_ADC_FIFO_RESET,
dev->iobase + PCI230_ADCCON);
/* Register the interrupt handler. */
irq_hdl = request_irq(devpriv->pci_dev->irq, pci230_interrupt,
IRQF_SHARED, "amplc_pci230", dev);
if (irq_hdl < 0) {
printk("comedi%d: unable to register irq, "
"commands will not be available %d\n", dev->minor,
devpriv->pci_dev->irq);
} else {
dev->irq = devpriv->pci_dev->irq;
printk("comedi%d: registered irq %u\n", dev->minor,
devpriv->pci_dev->irq);
}
/*
* Allocate the subdevice structures. alloc_subdevice() is a
* convenient macro defined in comedidev.h.
*/
if (alloc_subdevices(dev, 3) < 0)
return -ENOMEM;
s = dev->subdevices + 0;
/* analog input subdevice */
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND;
s->n_chan = thisboard->ai_chans;
s->maxdata = (1 << thisboard->ai_bits) - 1;
s->range_table = &pci230_ai_range;
s->insn_read = &pci230_ai_rinsn;
s->len_chanlist = 256; /* but there are restrictions. */
/* Only register commands if the interrupt handler is installed. */
if (irq_hdl == 0) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->do_cmd = &pci230_ai_cmd;
s->do_cmdtest = &pci230_ai_cmdtest;
s->cancel = pci230_ai_cancel;
}
s = dev->subdevices + 1;
/* analog output subdevice */
if (thisboard->ao_chans > 0) {
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = thisboard->ao_chans;
s->maxdata = (1 << thisboard->ao_bits) - 1;
s->range_table = &pci230_ao_range;
s->insn_write = &pci230_ao_winsn;
s->insn_read = &pci230_ao_rinsn;
s->len_chanlist = thisboard->ao_chans;
/* Only register commands if the interrupt handler is
* installed. */
if (irq_hdl == 0) {
dev->write_subdev = s;
s->subdev_flags |= SDF_CMD_WRITE;
s->do_cmd = &pci230_ao_cmd;
s->do_cmdtest = &pci230_ao_cmdtest;
s->cancel = pci230_ao_cancel;
}
} else {
s->type = COMEDI_SUBD_UNUSED;
}
s = dev->subdevices + 2;
/* digital i/o subdevice */
if (thisboard->have_dio) {
rc = subdev_8255_init(dev, s, NULL,
(devpriv->iobase1 + PCI230_PPI_X_BASE));
if (rc < 0)
return rc;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
printk("comedi%d: attached\n", dev->minor);
return 1;
}
/*
* _detach is called to deconfigure a device. It should deallocate
* resources.
* This function is also called when _attach() fails, so it should be
* careful not to release resources that were not necessarily
* allocated by _attach(). dev->private and dev->subdevices are
* deallocated automatically by the core.
*/
static int pci230_detach(struct comedi_device *dev)
{
printk("comedi%d: amplc_pci230: remove\n", dev->minor);
if (dev->subdevices && thisboard->have_dio)
/* Clean up dio subdevice. */
subdev_8255_cleanup(dev, dev->subdevices + 2);
if (dev->irq)
free_irq(dev->irq, dev);
if (devpriv) {
if (devpriv->pci_dev) {
if (dev->iobase)
comedi_pci_disable(devpriv->pci_dev);
pci_dev_put(devpriv->pci_dev);
}
}
return 0;
}
static int get_resources(struct comedi_device *dev, unsigned int res_mask,
unsigned char owner)
{
int ok;
unsigned int i;
unsigned int b;
unsigned int claimed;
unsigned long irqflags;
ok = 1;
claimed = 0;
spin_lock_irqsave(&devpriv->res_spinlock, irqflags);
for (b = 1, i = 0; (i < NUM_RESOURCES)
&& (res_mask != 0); b <<= 1, i++) {
if ((res_mask & b) != 0) {
res_mask &= ~b;
if (devpriv->res_owner[i] == OWNER_NONE) {
devpriv->res_owner[i] = owner;
claimed |= b;
} else if (devpriv->res_owner[i] != owner) {
for (b = 1, i = 0; claimed != 0; b <<= 1, i++) {
if ((claimed & b) != 0) {
devpriv->res_owner[i]
= OWNER_NONE;
claimed &= ~b;
}
}
ok = 0;
break;
}
}
}
spin_unlock_irqrestore(&devpriv->res_spinlock, irqflags);
return ok;
}
static inline int get_one_resource(struct comedi_device *dev,
unsigned int resource, unsigned char owner)
{
return get_resources(dev, (1U << resource), owner);
}
static void put_resources(struct comedi_device *dev, unsigned int res_mask,
unsigned char owner)
{
unsigned int i;
unsigned int b;
unsigned long irqflags;
spin_lock_irqsave(&devpriv->res_spinlock, irqflags);
for (b = 1, i = 0; (i < NUM_RESOURCES)
&& (res_mask != 0); b <<= 1, i++) {
if ((res_mask & b) != 0) {
res_mask &= ~b;
if (devpriv->res_owner[i] == owner)
devpriv->res_owner[i] = OWNER_NONE;
}
}
spin_unlock_irqrestore(&devpriv->res_spinlock, irqflags);
}
static inline void put_one_resource(struct comedi_device *dev,
unsigned int resource, unsigned char owner)
{
put_resources(dev, (1U << resource), owner);
}
static inline void put_all_resources(struct comedi_device *dev,
unsigned char owner)
{
put_resources(dev, (1U << NUM_RESOURCES) - 1, owner);
}
/*
* COMEDI_SUBD_AI instruction;
*/
static int pci230_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
{
unsigned int n, i;
unsigned int chan, range, aref;
unsigned int gainshift;
unsigned int status;
unsigned short adccon, adcen;
/* Unpack channel and range. */
chan = CR_CHAN(insn->chanspec);
range = CR_RANGE(insn->chanspec);
aref = CR_AREF(insn->chanspec);
if (aref == AREF_DIFF) {
/* Differential. */
if (chan >= s->n_chan / 2) {
DPRINTK("comedi%d: amplc_pci230: ai_rinsn: "
"differential channel number out of range "
"0 to %u\n", dev->minor, (s->n_chan / 2) - 1);
return -EINVAL;
}
}
/* Use Z2-CT2 as a conversion trigger instead of the built-in
* software trigger, as otherwise triggering of differential channels
* doesn't work properly for some versions of PCI230/260. Also set
* FIFO mode because the ADC busy bit only works for software triggers.
*/
adccon = PCI230_ADC_TRIG_Z2CT2 | PCI230_ADC_FIFO_EN;
/* Set Z2-CT2 output low to avoid any false triggers. */
i8254_set_mode(devpriv->iobase1 + PCI230_Z2_CT_BASE, 0, 2, I8254_MODE0);
devpriv->ai_bipolar = pci230_ai_bipolar[range];
if (aref == AREF_DIFF) {
/* Differential. */
gainshift = chan * 2;
if (devpriv->hwver == 0) {
/* Original PCI230/260 expects both inputs of the
* differential channel to be enabled. */
adcen = 3 << gainshift;
} else {
/* PCI230+/260+ expects only one input of the
* differential channel to be enabled. */
adcen = 1 << gainshift;
}
adccon |= PCI230_ADC_IM_DIF;
} else {
/* Single ended. */
adcen = 1 << chan;
gainshift = chan & ~1;
adccon |= PCI230_ADC_IM_SE;
}
devpriv->adcg = (devpriv->adcg & ~(3 << gainshift))
| (pci230_ai_gain[range] << gainshift);
if (devpriv->ai_bipolar)
adccon |= PCI230_ADC_IR_BIP;
else
adccon |= PCI230_ADC_IR_UNI;
/* Enable only this channel in the scan list - otherwise by default
* we'll get one sample from each channel. */
outw(adcen, dev->iobase + PCI230_ADCEN);
/* Set gain for channel. */
outw(devpriv->adcg, dev->iobase + PCI230_ADCG);
/* Specify uni/bip, se/diff, conversion source, and reset FIFO. */
devpriv->adccon = adccon;
outw(adccon | PCI230_ADC_FIFO_RESET, dev->iobase + PCI230_ADCCON);
/* Convert n samples */
for (n = 0; n < insn->n; n++) {
/* Trigger conversion by toggling Z2-CT2 output (finish with
* output high). */
i8254_set_mode(devpriv->iobase1 + PCI230_Z2_CT_BASE, 0, 2,
I8254_MODE0);
i8254_set_mode(devpriv->iobase1 + PCI230_Z2_CT_BASE, 0, 2,
I8254_MODE1);
#define TIMEOUT 100
/* wait for conversion to end */
for (i = 0; i < TIMEOUT; i++) {
status = inw(dev->iobase + PCI230_ADCCON);
if (!(status & PCI230_ADC_FIFO_EMPTY))
break;
udelay(1);
}
if (i == TIMEOUT) {
/* printk() should be used instead of printk()
* whenever the code can be called from real-time. */
printk("timeout\n");
return -ETIMEDOUT;
}
/* read data */
data[n] = pci230_ai_read(dev);
}
/* return the number of samples read/written */
return n;
}
/*
* COMEDI_SUBD_AO instructions;
*/
static int pci230_ao_winsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
{
int i;
int chan, range;
/* Unpack channel and range. */
chan = CR_CHAN(insn->chanspec);
range = CR_RANGE(insn->chanspec);
/* Set range - see analogue output range table; 0 => unipolar 10V,
* 1 => bipolar +/-10V range scale */
devpriv->ao_bipolar = pci230_ao_bipolar[range];
outw(range, dev->iobase + PCI230_DACCON);
/* Writing a list of values to an AO channel is probably not
* very useful, but that's how the interface is defined. */
for (i = 0; i < insn->n; i++) {
/* Write value to DAC and store it. */
pci230_ao_write_nofifo(dev, data[i], chan);
}
/* return the number of samples read/written */
return i;
}
/* AO subdevices should have a read insn as well as a write insn.
* Usually this means copying a value stored in devpriv. */
static int pci230_ao_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data)
{
int i;
int chan = CR_CHAN(insn->chanspec);
for (i = 0; i < insn->n; i++)
data[i] = devpriv->ao_readback[chan];
return i;
}
static int pci230_ao_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
int err = 0;
unsigned int tmp;
/* cmdtest tests a particular command to see if it is valid.
* Using the cmdtest ioctl, a user can create a valid cmd
* and then have it executes by the cmd ioctl.
*
* cmdtest returns 1,2,3,4 or 0, depending on which tests
* the command passes. */
/* Step 1: make sure trigger sources are trivially valid.
* "invalid source" returned by comedilib to user mode process
* if this fails. */
tmp = cmd->start_src;
cmd->start_src &= TRIG_INT;
if (!cmd->start_src || tmp != cmd->start_src)
err++;
tmp = cmd->scan_begin_src;
if ((thisboard->min_hwver > 0) && (devpriv->hwver >= 2)) {
/*
* For PCI230+ hardware version 2 onwards, allow external
* trigger from EXTTRIG/EXTCONVCLK input (PCI230+ pin 25).
*
* FIXME: The permitted scan_begin_src values shouldn't depend
* on devpriv->hwver (the detected card's actual hardware
* version). They should only depend on thisboard->min_hwver
* (the static capabilities of the configured card). To fix
* it, a new card model, e.g. "pci230+2" would have to be
* defined with min_hwver set to 2. It doesn't seem worth it
* for this alone. At the moment, please consider
* scan_begin_src==TRIG_EXT support to be a bonus rather than a
* guarantee!
*/
cmd->scan_begin_src &= TRIG_TIMER | TRIG_INT | TRIG_EXT;
} else {
cmd->scan_begin_src &= TRIG_TIMER | TRIG_INT;
}
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
tmp = cmd->convert_src;
cmd->convert_src &= TRIG_NOW;
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
tmp = cmd->scan_end_src;
cmd->scan_end_src &= TRIG_COUNT;
if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
err++;
tmp = cmd->stop_src;
cmd->stop_src &= TRIG_COUNT | TRIG_NONE;
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
if (err)
return 1;
/* Step 2: make sure trigger sources are unique and mutually compatible
* "source conflict" returned by comedilib to user mode process
* if this fails. */
/* these tests are true if more than one _src bit is set */
if ((cmd->start_src & (cmd->start_src - 1)) != 0)
err++;
if ((cmd->scan_begin_src & (cmd->scan_begin_src - 1)) != 0)
err++;
if ((cmd->convert_src & (cmd->convert_src - 1)) != 0)
err++;
if ((cmd->scan_end_src & (cmd->scan_end_src - 1)) != 0)
err++;
if ((cmd->stop_src & (cmd->stop_src - 1)) != 0)
err++;
if (err)
return 2;
/* Step 3: make sure arguments are trivially compatible.
* "invalid argument" returned by comedilib to user mode process
* if this fails. */
if (cmd->start_arg != 0) {
cmd->start_arg = 0;
err++;
}
#define MAX_SPEED_AO 8000 /* 8000 ns => 125 kHz */
#define MIN_SPEED_AO 4294967295u /* 4294967295ns = 4.29s */
/*- Comedi limit due to unsigned int cmd. Driver limit
* = 2^16 (16bit * counter) * 1000000ns (1kHz onboard
* clock) = 65.536s */
switch (cmd->scan_begin_src) {
case TRIG_TIMER:
if (cmd->scan_begin_arg < MAX_SPEED_AO) {
cmd->scan_begin_arg = MAX_SPEED_AO;
err++;
}
if (cmd->scan_begin_arg > MIN_SPEED_AO) {
cmd->scan_begin_arg = MIN_SPEED_AO;
err++;
}
break;
case TRIG_EXT:
/* External trigger - for PCI230+ hardware version 2 onwards. */
/* Trigger number must be 0. */
if ((cmd->scan_begin_arg & ~CR_FLAGS_MASK) != 0) {
cmd->scan_begin_arg = COMBINE(cmd->scan_begin_arg, 0,
~CR_FLAGS_MASK);
err++;
}
/* The only flags allowed are CR_EDGE and CR_INVERT. The
* CR_EDGE flag is ignored. */
if ((cmd->scan_begin_arg
& (CR_FLAGS_MASK & ~(CR_EDGE | CR_INVERT))) != 0) {
cmd->scan_begin_arg =
COMBINE(cmd->scan_begin_arg, 0,
CR_FLAGS_MASK & ~(CR_EDGE | CR_INVERT));
err++;
}
break;
default:
if (cmd->scan_begin_arg != 0) {
cmd->scan_begin_arg = 0;
err++;
}
break;
}
if (cmd->scan_end_arg != cmd->chanlist_len) {
cmd->scan_end_arg = cmd->chanlist_len;
err++;
}
if (cmd->stop_src == TRIG_NONE) {
/* TRIG_NONE */
if (cmd->stop_arg != 0) {
cmd->stop_arg = 0;
err++;
}
}
if (err)
return 3;
/* Step 4: fix up any arguments.
* "argument conflict" returned by comedilib to user mode process
* if this fails. */
if (cmd->scan_begin_src == TRIG_TIMER) {
tmp = cmd->scan_begin_arg;
pci230_ns_to_single_timer(&cmd->scan_begin_arg,
cmd->flags & TRIG_ROUND_MASK);
if (tmp != cmd->scan_begin_arg)
err++;
}
if (err)
return 4;
/* Step 5: check channel list if it exists. */
if (cmd->chanlist && cmd->chanlist_len > 0) {
enum {
seq_err = (1 << 0),
range_err = (1 << 1)
};
unsigned int errors;
unsigned int n;
unsigned int chan, prev_chan;
unsigned int range, first_range;
prev_chan = CR_CHAN(cmd->chanlist[0]);
first_range = CR_RANGE(cmd->chanlist[0]);
errors = 0;
for (n = 1; n < cmd->chanlist_len; n++) {
chan = CR_CHAN(cmd->chanlist[n]);
range = CR_RANGE(cmd->chanlist[n]);
/* Channel numbers must strictly increase. */
if (chan < prev_chan)
errors |= seq_err;
/* Ranges must be the same. */
if (range != first_range)
errors |= range_err;
prev_chan = chan;
}
if (errors != 0) {
err++;
if ((errors & seq_err) != 0) {
DPRINTK("comedi%d: amplc_pci230: ao_cmdtest: "
"channel numbers must increase\n",
dev->minor);
}
if ((errors & range_err) != 0) {
DPRINTK("comedi%d: amplc_pci230: ao_cmdtest: "
"channels must have the same range\n",
dev->minor);
}
}
}
if (err)
return 5;
return 0;
}
static int pci230_ao_inttrig_scan_begin(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int trig_num)
{
unsigned long irqflags;
if (trig_num != 0)
return -EINVAL;
spin_lock_irqsave(&devpriv->ao_stop_spinlock, irqflags);
if (test_bit(AO_CMD_STARTED, &devpriv->state)) {
/* Perform scan. */
if (devpriv->hwver < 2) {
/* Not using DAC FIFO. */
spin_unlock_irqrestore(&devpriv->ao_stop_spinlock,
irqflags);
pci230_handle_ao_nofifo(dev, s);
comedi_event(dev, s);
} else {
/* Using DAC FIFO. */
/* Read DACSWTRIG register to trigger conversion. */
inw(dev->iobase + PCI230P2_DACSWTRIG);
spin_unlock_irqrestore(&devpriv->ao_stop_spinlock,
irqflags);
}
/* Delay. Should driver be responsible for this? */
/* XXX TODO: See if DAC busy bit can be used. */
udelay(8);
}
return 1;
}
static void pci230_ao_start(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned long irqflags;
set_bit(AO_CMD_STARTED, &devpriv->state);
if (!devpriv->ao_continuous && (devpriv->ao_scan_count == 0)) {
/* An empty acquisition! */
async->events |= COMEDI_CB_EOA;
pci230_ao_stop(dev, s);
comedi_event(dev, s);
} else {
if (devpriv->hwver >= 2) {
/* Using DAC FIFO. */
unsigned short scantrig;
int run;
/* Preload FIFO data. */
run = pci230_handle_ao_fifo(dev, s);
comedi_event(dev, s);
if (!run) {
/* Stopped. */
return;
}
/* Set scan trigger source. */
switch (cmd->scan_begin_src) {
case TRIG_TIMER:
scantrig = PCI230P2_DAC_TRIG_Z2CT1;
break;
case TRIG_EXT:
/* Trigger on EXTTRIG/EXTCONVCLK pin. */
if ((cmd->scan_begin_arg & CR_INVERT) == 0) {
/* +ve edge */
scantrig = PCI230P2_DAC_TRIG_EXTP;
} else {
/* -ve edge */
scantrig = PCI230P2_DAC_TRIG_EXTN;
}
break;
case TRIG_INT:
scantrig = PCI230P2_DAC_TRIG_SW;
break;
default:
/* Shouldn't get here. */
scantrig = PCI230P2_DAC_TRIG_NONE;
break;
}
devpriv->daccon = (devpriv->daccon
& ~PCI230P2_DAC_TRIG_MASK) |
scantrig;
outw(devpriv->daccon, dev->iobase + PCI230_DACCON);
}
switch (cmd->scan_begin_src) {
case TRIG_TIMER:
if (devpriv->hwver < 2) {
/* Not using DAC FIFO. */
/* Enable CT1 timer interrupt. */
spin_lock_irqsave(&devpriv->isr_spinlock,
irqflags);
devpriv->int_en |= PCI230_INT_ZCLK_CT1;
devpriv->ier |= PCI230_INT_ZCLK_CT1;
outb(devpriv->ier,
devpriv->iobase1 + PCI230_INT_SCE);
spin_unlock_irqrestore(&devpriv->isr_spinlock,
irqflags);
}
/* Set CT1 gate high to start counting. */
outb(GAT_CONFIG(1, GAT_VCC),
devpriv->iobase1 + PCI230_ZGAT_SCE);
break;
case TRIG_INT:
async->inttrig = pci230_ao_inttrig_scan_begin;
break;
}
if (devpriv->hwver >= 2) {
/* Using DAC FIFO. Enable DAC FIFO interrupt. */
spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
devpriv->int_en |= PCI230P2_INT_DAC;
devpriv->ier |= PCI230P2_INT_DAC;
outb(devpriv->ier, devpriv->iobase1 + PCI230_INT_SCE);
spin_unlock_irqrestore(&devpriv->isr_spinlock,
irqflags);
}
}
}
static int pci230_ao_inttrig_start(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int trig_num)
{
if (trig_num != 0)
return -EINVAL;
s->async->inttrig = NULLFUNC;
pci230_ao_start(dev, s);
return 1;
}
static int pci230_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
unsigned short daccon;
unsigned int range;
/* Get the command. */
struct comedi_cmd *cmd = &s->async->cmd;
if (cmd->scan_begin_src == TRIG_TIMER) {
/* Claim Z2-CT1. */
if (!get_one_resource(dev, RES_Z2CT1, OWNER_AOCMD))
return -EBUSY;
}
/* Get number of scans required. */
if (cmd->stop_src == TRIG_COUNT) {
devpriv->ao_scan_count = cmd->stop_arg;
devpriv->ao_continuous = 0;
} else {
/* TRIG_NONE, user calls cancel. */
devpriv->ao_scan_count = 0;
devpriv->ao_continuous = 1;
}
/* Set range - see analogue output range table; 0 => unipolar 10V,
* 1 => bipolar +/-10V range scale */
range = CR_RANGE(cmd->chanlist[0]);
devpriv->ao_bipolar = pci230_ao_bipolar[range];
daccon = devpriv->ao_bipolar ? PCI230_DAC_OR_BIP : PCI230_DAC_OR_UNI;
/* Use DAC FIFO for hardware version 2 onwards. */
if (devpriv->hwver >= 2) {
unsigned short dacen;
unsigned int i;
dacen = 0;
for (i = 0; i < cmd->chanlist_len; i++)
dacen |= 1 << CR_CHAN(cmd->chanlist[i]);
/* Set channel scan list. */
outw(dacen, dev->iobase + PCI230P2_DACEN);
/*
* Enable DAC FIFO.
* Set DAC scan source to 'none'.
* Set DAC FIFO interrupt trigger level to 'not half full'.
* Reset DAC FIFO and clear underrun.
*
* N.B. DAC FIFO interrupts are currently disabled.
*/
daccon |= PCI230P2_DAC_FIFO_EN | PCI230P2_DAC_FIFO_RESET
| PCI230P2_DAC_FIFO_UNDERRUN_CLEAR
| PCI230P2_DAC_TRIG_NONE | PCI230P2_DAC_INT_FIFO_NHALF;
}
/* Set DACCON. */
outw(daccon, dev->iobase + PCI230_DACCON);
/* Preserve most of DACCON apart from write-only, transient bits. */
devpriv->daccon = daccon
& ~(PCI230P2_DAC_FIFO_RESET | PCI230P2_DAC_FIFO_UNDERRUN_CLEAR);
if (cmd->scan_begin_src == TRIG_TIMER) {
/* Set the counter timer 1 to the specified scan frequency. */
/* cmd->scan_begin_arg is sampling period in ns */
/* gate it off for now. */
outb(GAT_CONFIG(1, GAT_GND),
devpriv->iobase1 + PCI230_ZGAT_SCE);
pci230_ct_setup_ns_mode(dev, 1, I8254_MODE3,
cmd->scan_begin_arg,
cmd->flags & TRIG_ROUND_MASK);
}
/* N.B. cmd->start_src == TRIG_INT */
s->async->inttrig = pci230_ao_inttrig_start;
return 0;
}
static int pci230_ai_check_scan_period(struct comedi_cmd *cmd)
{
unsigned int min_scan_period, chanlist_len;
int err = 0;
chanlist_len = cmd->chanlist_len;
if (cmd->chanlist_len == 0)
chanlist_len = 1;
min_scan_period = chanlist_len * cmd->convert_arg;
if ((min_scan_period < chanlist_len)
|| (min_scan_period < cmd->convert_arg)) {
/* Arithmetic overflow. */
min_scan_period = UINT_MAX;
err++;
}
if (cmd->scan_begin_arg < min_scan_period) {
cmd->scan_begin_arg = min_scan_period;
err++;
}
return !err;
}
static int pci230_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
int err = 0;
unsigned int tmp;
/* cmdtest tests a particular command to see if it is valid.
* Using the cmdtest ioctl, a user can create a valid cmd
* and then have it executes by the cmd ioctl.
*
* cmdtest returns 1,2,3,4,5 or 0, depending on which tests
* the command passes. */
/* Step 1: make sure trigger sources are trivially valid.
* "invalid source" returned by comedilib to user mode process
* if this fails. */
tmp = cmd->start_src;
cmd->start_src &= TRIG_NOW | TRIG_INT;
if (!cmd->start_src || tmp != cmd->start_src)
err++;
tmp = cmd->scan_begin_src;
/* Unfortunately, we cannot trigger a scan off an external source
* on the PCI260 board, since it uses the PPIC0 (DIO) input, which
* isn't present on the PCI260. For PCI260+ we can use the
* EXTTRIG/EXTCONVCLK input on pin 17 instead. */
if ((thisboard->have_dio) || (thisboard->min_hwver > 0)) {
cmd->scan_begin_src &= TRIG_FOLLOW | TRIG_TIMER | TRIG_INT
| TRIG_EXT;
} else {
cmd->scan_begin_src &= TRIG_FOLLOW | TRIG_TIMER | TRIG_INT;
}
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
tmp = cmd->convert_src;
cmd->convert_src &= TRIG_TIMER | TRIG_INT | TRIG_EXT;
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
tmp = cmd->scan_end_src;
cmd->scan_end_src &= TRIG_COUNT;
if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
err++;
tmp = cmd->stop_src;
cmd->stop_src &= TRIG_COUNT | TRIG_NONE;
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
if (err)
return 1;
/* Step 2: make sure trigger sources are unique and mutually compatible
* "source conflict" returned by comedilib to user mode process
* if this fails. */
/* these tests are true if more than one _src bit is set */
if ((cmd->start_src & (cmd->start_src - 1)) != 0)
err++;
if ((cmd->scan_begin_src & (cmd->scan_begin_src - 1)) != 0)
err++;
if ((cmd->convert_src & (cmd->convert_src - 1)) != 0)
err++;
if ((cmd->scan_end_src & (cmd->scan_end_src - 1)) != 0)
err++;
if ((cmd->stop_src & (cmd->stop_src - 1)) != 0)
err++;
/* If scan_begin_src is not TRIG_FOLLOW, then a monostable will be
* set up to generate a fixed number of timed conversion pulses. */
if ((cmd->scan_begin_src != TRIG_FOLLOW)
&& (cmd->convert_src != TRIG_TIMER))
err++;
if (err)
return 2;
/* Step 3: make sure arguments are trivially compatible.
* "invalid argument" returned by comedilib to user mode process
* if this fails. */
if (cmd->start_arg != 0) {
cmd->start_arg = 0;
err++;
}
#define MAX_SPEED_AI_SE 3200 /* PCI230 SE: 3200 ns => 312.5 kHz */
#define MAX_SPEED_AI_DIFF 8000 /* PCI230 DIFF: 8000 ns => 125 kHz */
#define MAX_SPEED_AI_PLUS 4000 /* PCI230+: 4000 ns => 250 kHz */
#define MIN_SPEED_AI 4294967295u /* 4294967295ns = 4.29s */
/*- Comedi limit due to unsigned int cmd. Driver limit
* = 2^16 (16bit * counter) * 1000000ns (1kHz onboard
* clock) = 65.536s */
if (cmd->convert_src == TRIG_TIMER) {
unsigned int max_speed_ai;
if (devpriv->hwver == 0) {
/* PCI230 or PCI260. Max speed depends whether
* single-ended or pseudo-differential. */
if (cmd->chanlist && (cmd->chanlist_len > 0)) {
/* Peek analogue reference of first channel. */
if (CR_AREF(cmd->chanlist[0]) == AREF_DIFF)
max_speed_ai = MAX_SPEED_AI_DIFF;
else
max_speed_ai = MAX_SPEED_AI_SE;
} else {
/* No channel list. Assume single-ended. */
max_speed_ai = MAX_SPEED_AI_SE;
}
} else {
/* PCI230+ or PCI260+. */
max_speed_ai = MAX_SPEED_AI_PLUS;
}
if (cmd->convert_arg < max_speed_ai) {
cmd->convert_arg = max_speed_ai;
err++;
}
if (cmd->convert_arg > MIN_SPEED_AI) {
cmd->convert_arg = MIN_SPEED_AI;
err++;
}
} else if (cmd->convert_src == TRIG_EXT) {
/*
* external trigger
*
* convert_arg == (CR_EDGE | 0)
* => trigger on +ve edge.
* convert_arg == (CR_EDGE | CR_INVERT | 0)
* => trigger on -ve edge.
*/
if ((cmd->convert_arg & CR_FLAGS_MASK) != 0) {
/* Trigger number must be 0. */
if ((cmd->convert_arg & ~CR_FLAGS_MASK) != 0) {
cmd->convert_arg = COMBINE(cmd->convert_arg, 0,
~CR_FLAGS_MASK);
err++;
}
/* The only flags allowed are CR_INVERT and CR_EDGE.
* CR_EDGE is required. */
if ((cmd->convert_arg & (CR_FLAGS_MASK & ~CR_INVERT))
!= CR_EDGE) {
/* Set CR_EDGE, preserve CR_INVERT. */
cmd->convert_arg =
COMBINE(cmd->start_arg, (CR_EDGE | 0),
CR_FLAGS_MASK & ~CR_INVERT);
err++;
}
} else {
/* Backwards compatibility with previous versions. */
/* convert_arg == 0 => trigger on -ve edge. */
/* convert_arg == 1 => trigger on +ve edge. */
if (cmd->convert_arg > 1) {
/* Default to trigger on +ve edge. */
cmd->convert_arg = 1;
err++;
}
}
} else {
if (cmd->convert_arg != 0) {
cmd->convert_arg = 0;
err++;
}
}
if (cmd->scan_end_arg != cmd->chanlist_len) {
cmd->scan_end_arg = cmd->chanlist_len;
err++;
}
if (cmd->stop_src == TRIG_NONE) {
if (cmd->stop_arg != 0) {
cmd->stop_arg = 0;
err++;
}
}
if (cmd->scan_begin_src == TRIG_EXT) {
/* external "trigger" to begin each scan
* scan_begin_arg==0 => use PPC0 input -> gate of CT0 -> gate
* of CT2 (sample convert trigger is CT2) */
if ((cmd->scan_begin_arg & ~CR_FLAGS_MASK) != 0) {
cmd->scan_begin_arg = COMBINE(cmd->scan_begin_arg, 0,
~CR_FLAGS_MASK);
err++;
}
/* The only flag allowed is CR_EDGE, which is ignored. */
if ((cmd->scan_begin_arg & CR_FLAGS_MASK & ~CR_EDGE) != 0) {
cmd->scan_begin_arg = COMBINE(cmd->scan_begin_arg, 0,
CR_FLAGS_MASK & ~CR_EDGE);
err++;
}
} else if (cmd->scan_begin_src == TRIG_TIMER) {
/* N.B. cmd->convert_arg is also TRIG_TIMER */
if (!pci230_ai_check_scan_period(cmd))
err++;
} else {
if (cmd->scan_begin_arg != 0) {
cmd->scan_begin_arg = 0;
err++;
}
}
if (err)
return 3;
/* Step 4: fix up any arguments.
* "argument conflict" returned by comedilib to user mode process
* if this fails. */
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
pci230_ns_to_single_timer(&cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
if (tmp != cmd->convert_arg)
err++;
}
if (cmd->scan_begin_src == TRIG_TIMER) {
/* N.B. cmd->convert_arg is also TRIG_TIMER */
tmp = cmd->scan_begin_arg;
pci230_ns_to_single_timer(&cmd->scan_begin_arg,
cmd->flags & TRIG_ROUND_MASK);
if (!pci230_ai_check_scan_period(cmd)) {
/* Was below minimum required. Round up. */
pci230_ns_to_single_timer(&cmd->scan_begin_arg,
TRIG_ROUND_UP);
pci230_ai_check_scan_period(cmd);
}
if (tmp != cmd->scan_begin_arg)
err++;
}
if (err)
return 4;
/* Step 5: check channel list if it exists. */
if (cmd->chanlist && cmd->chanlist_len > 0) {
enum {
seq_err = 1 << 0,
rangepair_err = 1 << 1,
polarity_err = 1 << 2,
aref_err = 1 << 3,
diffchan_err = 1 << 4,
buggy_chan0_err = 1 << 5
};
unsigned int errors;
unsigned int chan, prev_chan;
unsigned int range, prev_range;
unsigned int polarity, prev_polarity;
unsigned int aref, prev_aref;
unsigned int subseq_len;
unsigned int n;
subseq_len = 0;
errors = 0;
prev_chan = prev_aref = prev_range = prev_polarity = 0;
for (n = 0; n < cmd->chanlist_len; n++) {
chan = CR_CHAN(cmd->chanlist[n]);
range = CR_RANGE(cmd->chanlist[n]);
aref = CR_AREF(cmd->chanlist[n]);
polarity = pci230_ai_bipolar[range];
/* Only the first half of the channels are available if
* differential. (These are remapped in software. In
* hardware, only the even channels are available.) */
if ((aref == AREF_DIFF)
&& (chan >= (s->n_chan / 2))) {
errors |= diffchan_err;
}
if (n > 0) {
/* Channel numbers must strictly increase or
* subsequence must repeat exactly. */
if ((chan <= prev_chan)
&& (subseq_len == 0)) {
subseq_len = n;
}
if ((subseq_len > 0)
&& (cmd->chanlist[n] !=
cmd->chanlist[n % subseq_len])) {
errors |= seq_err;
}
/* Channels must have same AREF. */
if (aref != prev_aref)
errors |= aref_err;
/* Channel ranges must have same polarity. */
if (polarity != prev_polarity)
errors |= polarity_err;
/* Single-ended channel pairs must have same
* range. */
if ((aref != AREF_DIFF)
&& (((chan ^ prev_chan) & ~1) == 0)
&& (range != prev_range)) {
errors |= rangepair_err;
}
}
prev_chan = chan;
prev_range = range;
prev_aref = aref;
prev_polarity = polarity;
}
if (subseq_len == 0) {
/* Subsequence is whole sequence. */
subseq_len = n;
}
/* If channel list is a repeating subsequence, need a whole
* number of repeats. */
if ((n % subseq_len) != 0)
errors |= seq_err;
if ((devpriv->hwver > 0) && (devpriv->hwver < 4)) {
/*
* Buggy PCI230+ or PCI260+ requires channel 0 to be
* (first) in the sequence if the sequence contains
* more than one channel. Hardware versions 1 and 2
* have the bug. There is no hardware version 3.
*
* Actually, there are two firmwares that report
* themselves as hardware version 1 (the boards
* have different ADC chips with slightly different
* timing requirements, which was supposed to be
* invisible to software). The first one doesn't
* seem to have the bug, but the second one
* does, and we can't tell them apart!
*/
if ((subseq_len > 1)
&& (CR_CHAN(cmd->chanlist[0]) != 0)) {
errors |= buggy_chan0_err;
}
}
if (errors != 0) {
err++;
if ((errors & seq_err) != 0) {
DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: "
"channel numbers must increase or "
"sequence must repeat exactly\n",
dev->minor);
}
if ((errors & rangepair_err) != 0) {
DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: "
"single-ended channel pairs must "
"have the same range\n", dev->minor);
}
if ((errors & polarity_err) != 0) {
DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: "
"channel sequence ranges must be all "
"bipolar or all unipolar\n",
dev->minor);
}
if ((errors & aref_err) != 0) {
DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: "
"channel sequence analogue references "
"must be all the same (single-ended "
"or differential)\n", dev->minor);
}
if ((errors & diffchan_err) != 0) {
DPRINTK("comedi%d: amplc_pci230: ai_cmdtest: "
"differential channel number out of "
"range 0 to %u\n", dev->minor,
(s->n_chan / 2) - 1);
}
if ((errors & buggy_chan0_err) != 0) {
/* Use printk instead of DPRINTK here. */
printk("comedi: comedi%d: amplc_pci230: "
"ai_cmdtest: Buggy PCI230+/260+ "
"h/w version %u requires first channel "
"of multi-channel sequence to be 0 "
"(corrected in h/w version 4)\n",
dev->minor, devpriv->hwver);
}
}
}
if (err)
return 5;
return 0;
}
static void pci230_ai_update_fifo_trigger_level(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int scanlen = cmd->scan_end_arg;
unsigned int wake;
unsigned short triglev;
unsigned short adccon;
if ((cmd->flags & TRIG_WAKE_EOS) != 0) {
/* Wake at end of scan. */
wake = scanlen - devpriv->ai_scan_pos;
} else {
if (devpriv->ai_continuous
|| (devpriv->ai_scan_count >= PCI230_ADC_FIFOLEVEL_HALFFULL)
|| (scanlen >= PCI230_ADC_FIFOLEVEL_HALFFULL)) {
wake = PCI230_ADC_FIFOLEVEL_HALFFULL;
} else {
wake = (devpriv->ai_scan_count * scanlen)
- devpriv->ai_scan_pos;
}
}
if (wake >= PCI230_ADC_FIFOLEVEL_HALFFULL) {
triglev = PCI230_ADC_INT_FIFO_HALF;
} else {
if ((wake > 1) && (devpriv->hwver > 0)) {
/* PCI230+/260+ programmable FIFO interrupt level. */
if (devpriv->adcfifothresh != wake) {
devpriv->adcfifothresh = wake;
outw(wake, dev->iobase + PCI230P_ADCFFTH);
}
triglev = PCI230P_ADC_INT_FIFO_THRESH;
} else {
triglev = PCI230_ADC_INT_FIFO_NEMPTY;
}
}
adccon = (devpriv->adccon & ~PCI230_ADC_INT_FIFO_MASK) | triglev;
if (adccon != devpriv->adccon) {
devpriv->adccon = adccon;
outw(adccon, dev->iobase + PCI230_ADCCON);
}
}
static int pci230_ai_inttrig_convert(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int trig_num)
{
unsigned long irqflags;
if (trig_num != 0)
return -EINVAL;
spin_lock_irqsave(&devpriv->ai_stop_spinlock, irqflags);
if (test_bit(AI_CMD_STARTED, &devpriv->state)) {
unsigned int delayus;
/* Trigger conversion by toggling Z2-CT2 output. Finish
* with output high. */
i8254_set_mode(devpriv->iobase1 + PCI230_Z2_CT_BASE, 0, 2,
I8254_MODE0);
i8254_set_mode(devpriv->iobase1 + PCI230_Z2_CT_BASE, 0, 2,
I8254_MODE1);
/* Delay. Should driver be responsible for this? An
* alternative would be to wait until conversion is complete,
* but we can't tell when it's complete because the ADC busy
* bit has a different meaning when FIFO enabled (and when
* FIFO not enabled, it only works for software triggers). */
if (((devpriv->adccon & PCI230_ADC_IM_MASK)
== PCI230_ADC_IM_DIF)
&& (devpriv->hwver == 0)) {
/* PCI230/260 in differential mode */
delayus = 8;
} else {
/* single-ended or PCI230+/260+ */
delayus = 4;
}
spin_unlock_irqrestore(&devpriv->ai_stop_spinlock, irqflags);
udelay(delayus);
} else {
spin_unlock_irqrestore(&devpriv->ai_stop_spinlock, irqflags);
}
return 1;
}
static int pci230_ai_inttrig_scan_begin(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int trig_num)
{
unsigned long irqflags;
unsigned char zgat;
if (trig_num != 0)
return -EINVAL;
spin_lock_irqsave(&devpriv->ai_stop_spinlock, irqflags);
if (test_bit(AI_CMD_STARTED, &devpriv->state)) {
/* Trigger scan by waggling CT0 gate source. */
zgat = GAT_CONFIG(0, GAT_GND);
outb(zgat, devpriv->iobase1 + PCI230_ZGAT_SCE);
zgat = GAT_CONFIG(0, GAT_VCC);
outb(zgat, devpriv->iobase1 + PCI230_ZGAT_SCE);
}
spin_unlock_irqrestore(&devpriv->ai_stop_spinlock, irqflags);
return 1;
}
static void pci230_ai_start(struct comedi_device *dev,
struct comedi_subdevice *s)
{
unsigned long irqflags;
unsigned short conv;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
set_bit(AI_CMD_STARTED, &devpriv->state);
if (!devpriv->ai_continuous && (devpriv->ai_scan_count == 0)) {
/* An empty acquisition! */
async->events |= COMEDI_CB_EOA;
pci230_ai_stop(dev, s);
comedi_event(dev, s);
} else {
/* Enable ADC FIFO trigger level interrupt. */
spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
devpriv->int_en |= PCI230_INT_ADC;
devpriv->ier |= PCI230_INT_ADC;
outb(devpriv->ier, devpriv->iobase1 + PCI230_INT_SCE);
spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
/* Update conversion trigger source which is currently set
* to CT2 output, which is currently stuck high. */
switch (cmd->convert_src) {
default:
conv = PCI230_ADC_TRIG_NONE;
break;
case TRIG_TIMER:
/* Using CT2 output. */
conv = PCI230_ADC_TRIG_Z2CT2;
break;
case TRIG_EXT:
if ((cmd->convert_arg & CR_EDGE) != 0) {
if ((cmd->convert_arg & CR_INVERT) == 0) {
/* Trigger on +ve edge. */
conv = PCI230_ADC_TRIG_EXTP;
} else {
/* Trigger on -ve edge. */
conv = PCI230_ADC_TRIG_EXTN;
}
} else {
/* Backwards compatibility. */
if (cmd->convert_arg != 0) {
/* Trigger on +ve edge. */
conv = PCI230_ADC_TRIG_EXTP;
} else {
/* Trigger on -ve edge. */
conv = PCI230_ADC_TRIG_EXTN;
}
}
break;
case TRIG_INT:
/* Use CT2 output for software trigger due to problems
* in differential mode on PCI230/260. */
conv = PCI230_ADC_TRIG_Z2CT2;
break;
}
devpriv->adccon = (devpriv->adccon & ~PCI230_ADC_TRIG_MASK)
| conv;
outw(devpriv->adccon, dev->iobase + PCI230_ADCCON);
if (cmd->convert_src == TRIG_INT)
async->inttrig = pci230_ai_inttrig_convert;
/* Update FIFO interrupt trigger level, which is currently
* set to "full". */
pci230_ai_update_fifo_trigger_level(dev, s);
if (cmd->convert_src == TRIG_TIMER) {
/* Update timer gates. */
unsigned char zgat;
if (cmd->scan_begin_src != TRIG_FOLLOW) {
/* Conversion timer CT2 needs to be gated by
* inverted output of monostable CT2. */
zgat = GAT_CONFIG(2, GAT_NOUTNM2);
} else {
/* Conversion timer CT2 needs to be gated on
* continuously. */
zgat = GAT_CONFIG(2, GAT_VCC);
}
outb(zgat, devpriv->iobase1 + PCI230_ZGAT_SCE);
if (cmd->scan_begin_src != TRIG_FOLLOW) {
/* Set monostable CT0 trigger source. */
switch (cmd->scan_begin_src) {
default:
zgat = GAT_CONFIG(0, GAT_VCC);
break;
case TRIG_EXT:
/*
* For CT0 on PCI230, the external
* trigger (gate) signal comes from
* PPC0, which is channel 16 of the DIO
* subdevice. The application needs to
* configure this as an input in order
* to use it as an external scan
* trigger.
*/
zgat = GAT_CONFIG(0, GAT_EXT);
break;
case TRIG_TIMER:
/*
* Monostable CT0 triggered by rising
* edge on inverted output of CT1
* (falling edge on CT1).
*/
zgat = GAT_CONFIG(0, GAT_NOUTNM2);
break;
case TRIG_INT:
/*
* Monostable CT0 is triggered by
* inttrig function waggling the CT0
* gate source.
*/
zgat = GAT_CONFIG(0, GAT_VCC);
break;
}
outb(zgat, devpriv->iobase1 + PCI230_ZGAT_SCE);
switch (cmd->scan_begin_src) {
case TRIG_TIMER:
/* Scan period timer CT1 needs to be
* gated on to start counting. */
zgat = GAT_CONFIG(1, GAT_VCC);
outb(zgat, devpriv->iobase1
+ PCI230_ZGAT_SCE);
break;
case TRIG_INT:
async->inttrig =
pci230_ai_inttrig_scan_begin;
break;
}
}
} else if (cmd->convert_src != TRIG_INT) {
/* No longer need Z2-CT2. */
put_one_resource(dev, RES_Z2CT2, OWNER_AICMD);
}
}
}
static int pci230_ai_inttrig_start(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int trig_num)
{
if (trig_num != 0)
return -EINVAL;
s->async->inttrig = NULLFUNC;
pci230_ai_start(dev, s);
return 1;
}
static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
unsigned int i, chan, range, diff;
unsigned int res_mask;
unsigned short adccon, adcen;
unsigned char zgat;
/* Get the command. */
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
/*
* Determine which shared resources are needed.
*/
res_mask = 0;
/* Need Z2-CT2 to supply a conversion trigger source at a high
* logic level, even if not doing timed conversions. */
res_mask |= (1U << RES_Z2CT2);
if (cmd->scan_begin_src != TRIG_FOLLOW) {
/* Using Z2-CT0 monostable to gate Z2-CT2 conversion timer */
res_mask |= (1U << RES_Z2CT0);
if (cmd->scan_begin_src == TRIG_TIMER) {
/* Using Z2-CT1 for scan frequency */
res_mask |= (1U << RES_Z2CT1);
}
}
/* Claim resources. */
if (!get_resources(dev, res_mask, OWNER_AICMD))
return -EBUSY;
/* Get number of scans required. */
if (cmd->stop_src == TRIG_COUNT) {
devpriv->ai_scan_count = cmd->stop_arg;
devpriv->ai_continuous = 0;
} else {
/* TRIG_NONE, user calls cancel. */
devpriv->ai_scan_count = 0;
devpriv->ai_continuous = 1;
}
devpriv->ai_scan_pos = 0; /* Position within scan. */
/* Steps;
* - Set channel scan list.
* - Set channel gains.
* - Enable and reset FIFO, specify uni/bip, se/diff, and set
* start conversion source to point to something at a high logic
* level (we use the output of counter/timer 2 for this purpose.
* - PAUSE to allow things to settle down.
* - Reset the FIFO again because it needs resetting twice and there
* may have been a false conversion trigger on some versions of
* PCI230/260 due to the start conversion source being set to a
* high logic level.
* - Enable ADC FIFO level interrupt.
* - Set actual conversion trigger source and FIFO interrupt trigger
* level.
* - If convert_src is TRIG_TIMER, set up the timers.
*/
adccon = PCI230_ADC_FIFO_EN;
adcen = 0;
if (CR_AREF(cmd->chanlist[0]) == AREF_DIFF) {
/* Differential - all channels must be differential. */
diff = 1;
adccon |= PCI230_ADC_IM_DIF;
} else {
/* Single ended - all channels must be single-ended. */
diff = 0;
adccon |= PCI230_ADC_IM_SE;
}
range = CR_RANGE(cmd->chanlist[0]);
devpriv->ai_bipolar = pci230_ai_bipolar[range];
if (devpriv->ai_bipolar)
adccon |= PCI230_ADC_IR_BIP;
else
adccon |= PCI230_ADC_IR_UNI;
for (i = 0; i < cmd->chanlist_len; i++) {
unsigned int gainshift;
chan = CR_CHAN(cmd->chanlist[i]);
range = CR_RANGE(cmd->chanlist[i]);
if (diff) {
gainshift = 2 * chan;
if (devpriv->hwver == 0) {
/* Original PCI230/260 expects both inputs of
* the differential channel to be enabled. */
adcen |= 3 << gainshift;
} else {
/* PCI230+/260+ expects only one input of the
* differential channel to be enabled. */
adcen |= 1 << gainshift;
}
} else {
gainshift = (chan & ~1);
adcen |= 1 << chan;
}
devpriv->adcg = (devpriv->adcg & ~(3 << gainshift))
| (pci230_ai_gain[range] << gainshift);
}
/* Set channel scan list. */
outw(adcen, dev->iobase + PCI230_ADCEN);
/* Set channel gains. */
outw(devpriv->adcg, dev->iobase + PCI230_ADCG);
/* Set counter/timer 2 output high for use as the initial start
* conversion source. */
i8254_set_mode(devpriv->iobase1 + PCI230_Z2_CT_BASE, 0, 2, I8254_MODE1);
/* Temporarily use CT2 output as conversion trigger source and
* temporarily set FIFO interrupt trigger level to 'full'. */
adccon |= PCI230_ADC_INT_FIFO_FULL | PCI230_ADC_TRIG_Z2CT2;
/* Enable and reset FIFO, specify FIFO trigger level full, specify
* uni/bip, se/diff, and temporarily set the start conversion source
* to CT2 output. Note that CT2 output is currently high, and this
* will produce a false conversion trigger on some versions of the
* PCI230/260, but that will be dealt with later. */
devpriv->adccon = adccon;
outw(adccon | PCI230_ADC_FIFO_RESET, dev->iobase + PCI230_ADCCON);
/* Delay */
/* Failure to include this will result in the first few channels'-worth
* of data being corrupt, normally manifesting itself by large negative
* voltages. It seems the board needs time to settle between the first
* FIFO reset (above) and the second FIFO reset (below). Setting the
* channel gains and scan list _before_ the first FIFO reset also
* helps, though only slightly. */
udelay(25);
/* Reset FIFO again. */
outw(adccon | PCI230_ADC_FIFO_RESET, dev->iobase + PCI230_ADCCON);
if (cmd->convert_src == TRIG_TIMER) {
/* Set up CT2 as conversion timer, but gate it off for now.
* Note, counter/timer output 2 can be monitored on the
* connector: PCI230 pin 21, PCI260 pin 18. */
zgat = GAT_CONFIG(2, GAT_GND);
outb(zgat, devpriv->iobase1 + PCI230_ZGAT_SCE);
/* Set counter/timer 2 to the specified conversion period. */
pci230_ct_setup_ns_mode(dev, 2, I8254_MODE3, cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
if (cmd->scan_begin_src != TRIG_FOLLOW) {
/*
* Set up monostable on CT0 output for scan timing. A
* rising edge on the trigger (gate) input of CT0 will
* trigger the monostable, causing its output to go low
* for the configured period. The period depends on
* the conversion period and the number of conversions
* in the scan.
*
* Set the trigger high before setting up the
* monostable to stop it triggering. The trigger
* source will be changed later.
*/
zgat = GAT_CONFIG(0, GAT_VCC);
outb(zgat, devpriv->iobase1 + PCI230_ZGAT_SCE);
pci230_ct_setup_ns_mode(dev, 0, I8254_MODE1,
((uint64_t) cmd->convert_arg
* cmd->scan_end_arg),
TRIG_ROUND_UP);
if (cmd->scan_begin_src == TRIG_TIMER) {
/*
* Monostable on CT0 will be triggered by
* output of CT1 at configured scan frequency.
*
* Set up CT1 but gate it off for now.
*/
zgat = GAT_CONFIG(1, GAT_GND);
outb(zgat, devpriv->iobase1 + PCI230_ZGAT_SCE);
pci230_ct_setup_ns_mode(dev, 1, I8254_MODE3,
cmd->scan_begin_arg,
cmd->
flags &
TRIG_ROUND_MASK);
}
}
}
if (cmd->start_src == TRIG_INT) {
s->async->inttrig = pci230_ai_inttrig_start;
} else {
/* TRIG_NOW */
pci230_ai_start(dev, s);
}
return 0;
}
static unsigned int divide_ns(uint64_t ns, unsigned int timebase,
unsigned int round_mode)
{
uint64_t div;
unsigned int rem;
div = ns;
rem = do_div(div, timebase);
round_mode &= TRIG_ROUND_MASK;
switch (round_mode) {
default:
case TRIG_ROUND_NEAREST:
div += (rem + (timebase / 2)) / timebase;
break;
case TRIG_ROUND_DOWN:
break;
case TRIG_ROUND_UP:
div += (rem + timebase - 1) / timebase;
break;
}
return div > UINT_MAX ? UINT_MAX : (unsigned int)div;
}
/* Given desired period in ns, returns the required internal clock source
* and gets the initial count. */
static unsigned int pci230_choose_clk_count(uint64_t ns, unsigned int *count,
unsigned int round_mode)
{
unsigned int clk_src, cnt;
for (clk_src = CLK_10MHZ;; clk_src++) {
cnt = divide_ns(ns, pci230_timebase[clk_src], round_mode);
if ((cnt <= 65536) || (clk_src == CLK_1KHZ))
break;
}
*count = cnt;
return clk_src;
}
static void pci230_ns_to_single_timer(unsigned int *ns, unsigned int round)
{
unsigned int count;
unsigned int clk_src;
clk_src = pci230_choose_clk_count(*ns, &count, round);
*ns = count * pci230_timebase[clk_src];
return;
}
static void pci230_ct_setup_ns_mode(struct comedi_device *dev, unsigned int ct,
unsigned int mode, uint64_t ns,
unsigned int round)
{
unsigned int clk_src;
unsigned int count;
/* Set mode. */
i8254_set_mode(devpriv->iobase1 + PCI230_Z2_CT_BASE, 0, ct, mode);
/* Determine clock source and count. */
clk_src = pci230_choose_clk_count(ns, &count, round);
/* Program clock source. */
outb(CLK_CONFIG(ct, clk_src), devpriv->iobase1 + PCI230_ZCLK_SCE);
/* Set initial count. */
if (count >= 65536)
count = 0;
i8254_write(devpriv->iobase1 + PCI230_Z2_CT_BASE, 0, ct, count);
}
static void pci230_cancel_ct(struct comedi_device *dev, unsigned int ct)
{
i8254_set_mode(devpriv->iobase1 + PCI230_Z2_CT_BASE, 0, ct,
I8254_MODE1);
/* Counter ct, 8254 mode 1, initial count not written. */
}
/* Interrupt handler */
static irqreturn_t pci230_interrupt(int irq, void *d)
{
unsigned char status_int, valid_status_int;
struct comedi_device *dev = (struct comedi_device *)d;
struct comedi_subdevice *s;
unsigned long irqflags;
/* Read interrupt status/enable register. */
status_int = inb(devpriv->iobase1 + PCI230_INT_STAT);
if (status_int == PCI230_INT_DISABLE)
return IRQ_NONE;
spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
valid_status_int = devpriv->int_en & status_int;
/* Disable triggered interrupts.
* (Only those interrupts that need re-enabling, are, later in the
* handler). */
devpriv->ier = devpriv->int_en & ~status_int;
outb(devpriv->ier, devpriv->iobase1 + PCI230_INT_SCE);
devpriv->intr_running = 1;
devpriv->intr_cpuid = THISCPU;
spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
/*
* Check the source of interrupt and handle it.
* The PCI230 can cope with concurrent ADC, DAC, PPI C0 and C3
* interrupts. However, at present (Comedi-0.7.60) does not allow
* concurrent execution of commands, instructions or a mixture of the
* two.
*/
if ((valid_status_int & PCI230_INT_ZCLK_CT1) != 0) {
s = dev->write_subdev;
pci230_handle_ao_nofifo(dev, s);
comedi_event(dev, s);
}
if ((valid_status_int & PCI230P2_INT_DAC) != 0) {
s = dev->write_subdev;
pci230_handle_ao_fifo(dev, s);
comedi_event(dev, s);
}
if ((valid_status_int & PCI230_INT_ADC) != 0) {
s = dev->read_subdev;
pci230_handle_ai(dev, s);
comedi_event(dev, s);
}
/* Reenable interrupts. */
spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
if (devpriv->ier != devpriv->int_en) {
devpriv->ier = devpriv->int_en;
outb(devpriv->ier, devpriv->iobase1 + PCI230_INT_SCE);
}
devpriv->intr_running = 0;
spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
return IRQ_HANDLED;
}
static void pci230_handle_ao_nofifo(struct comedi_device *dev,
struct comedi_subdevice *s)
{
short data;
int i, ret;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
if (!devpriv->ao_continuous && (devpriv->ao_scan_count == 0))
return;
for (i = 0; i < cmd->chanlist_len; i++) {
/* Read sample from Comedi's circular buffer. */
ret = comedi_buf_get(s->async, &data);
if (ret == 0) {
s->async->events |= COMEDI_CB_OVERFLOW;
pci230_ao_stop(dev, s);
comedi_error(dev, "AO buffer underrun");
return;
}
/* Write value to DAC. */
pci230_ao_write_nofifo(dev, data, CR_CHAN(cmd->chanlist[i]));
}
async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
if (!devpriv->ao_continuous) {
devpriv->ao_scan_count--;
if (devpriv->ao_scan_count == 0) {
/* End of acquisition. */
async->events |= COMEDI_CB_EOA;
pci230_ao_stop(dev, s);
}
}
}
/* Loads DAC FIFO (if using it) from buffer. */
/* Returns 0 if AO finished due to completion or error, 1 if still going. */
static int pci230_handle_ao_fifo(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
unsigned int num_scans;
unsigned int room;
unsigned short dacstat;
unsigned int i, n;
unsigned int bytes_per_scan;
unsigned int events = 0;
int running;
/* Get DAC FIFO status. */
dacstat = inw(dev->iobase + PCI230_DACCON);
/* Determine number of scans available in buffer. */
bytes_per_scan = cmd->chanlist_len * sizeof(short);
num_scans = comedi_buf_read_n_available(async) / bytes_per_scan;
if (!devpriv->ao_continuous) {
/* Fixed number of scans. */
if (num_scans > devpriv->ao_scan_count)
num_scans = devpriv->ao_scan_count;
if (devpriv->ao_scan_count == 0) {
/* End of acquisition. */
events |= COMEDI_CB_EOA;
}
}
if (events == 0) {
/* Check for FIFO underrun. */
if ((dacstat & PCI230P2_DAC_FIFO_UNDERRUN_LATCHED) != 0) {
comedi_error(dev, "AO FIFO underrun");
events |= COMEDI_CB_OVERFLOW | COMEDI_CB_ERROR;
}
/* Check for buffer underrun if FIFO less than half full
* (otherwise there will be loads of "DAC FIFO not half full"
* interrupts). */
if ((num_scans == 0)
&& ((dacstat & PCI230P2_DAC_FIFO_HALF) == 0)) {
comedi_error(dev, "AO buffer underrun");
events |= COMEDI_CB_OVERFLOW | COMEDI_CB_ERROR;
}
}
if (events == 0) {
/* Determine how much room is in the FIFO (in samples). */
if ((dacstat & PCI230P2_DAC_FIFO_FULL) != 0)
room = PCI230P2_DAC_FIFOROOM_FULL;
else if ((dacstat & PCI230P2_DAC_FIFO_HALF) != 0)
room = PCI230P2_DAC_FIFOROOM_HALFTOFULL;
else if ((dacstat & PCI230P2_DAC_FIFO_EMPTY) != 0)
room = PCI230P2_DAC_FIFOROOM_EMPTY;
else
room = PCI230P2_DAC_FIFOROOM_ONETOHALF;
/* Convert room to number of scans that can be added. */
room /= cmd->chanlist_len;
/* Determine number of scans to process. */
if (num_scans > room)
num_scans = room;
/* Process scans. */
for (n = 0; n < num_scans; n++) {
for (i = 0; i < cmd->chanlist_len; i++) {
short datum;
comedi_buf_get(async, &datum);
pci230_ao_write_fifo(dev, datum,
CR_CHAN(cmd->chanlist[i]));
}
}
events |= COMEDI_CB_EOS | COMEDI_CB_BLOCK;
if (!devpriv->ao_continuous) {
devpriv->ao_scan_count -= num_scans;
if (devpriv->ao_scan_count == 0) {
/* All data for the command has been written
* to FIFO. Set FIFO interrupt trigger level
* to 'empty'. */
devpriv->daccon = (devpriv->daccon
&
~PCI230P2_DAC_INT_FIFO_MASK)
| PCI230P2_DAC_INT_FIFO_EMPTY;
outw(devpriv->daccon,
dev->iobase + PCI230_DACCON);
}
}
/* Check if FIFO underrun occurred while writing to FIFO. */
dacstat = inw(dev->iobase + PCI230_DACCON);
if ((dacstat & PCI230P2_DAC_FIFO_UNDERRUN_LATCHED) != 0) {
comedi_error(dev, "AO FIFO underrun");
events |= COMEDI_CB_OVERFLOW | COMEDI_CB_ERROR;
}
}
if ((events & (COMEDI_CB_EOA | COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW))
!= 0) {
/* Stopping AO due to completion or error. */
pci230_ao_stop(dev, s);
running = 0;
} else {
running = 1;
}
async->events |= events;
return running;
}
static void pci230_handle_ai(struct comedi_device *dev,
struct comedi_subdevice *s)
{
unsigned int events = 0;
unsigned int status_fifo;
unsigned int i;
unsigned int todo;
unsigned int fifoamount;
struct comedi_async *async = s->async;
unsigned int scanlen = async->cmd.scan_end_arg;
/* Determine number of samples to read. */
if (devpriv->ai_continuous) {
todo = PCI230_ADC_FIFOLEVEL_HALFFULL;
} else if (devpriv->ai_scan_count == 0) {
todo = 0;
} else if ((devpriv->ai_scan_count > PCI230_ADC_FIFOLEVEL_HALFFULL)
|| (scanlen > PCI230_ADC_FIFOLEVEL_HALFFULL)) {
todo = PCI230_ADC_FIFOLEVEL_HALFFULL;
} else {
todo = (devpriv->ai_scan_count * scanlen)
- devpriv->ai_scan_pos;
if (todo > PCI230_ADC_FIFOLEVEL_HALFFULL)
todo = PCI230_ADC_FIFOLEVEL_HALFFULL;
}
if (todo == 0)
return;
fifoamount = 0;
for (i = 0; i < todo; i++) {
if (fifoamount == 0) {
/* Read FIFO state. */
status_fifo = inw(dev->iobase + PCI230_ADCCON);
if ((status_fifo & PCI230_ADC_FIFO_FULL_LATCHED) != 0) {
/* Report error otherwise FIFO overruns will go
* unnoticed by the caller. */
comedi_error(dev, "AI FIFO overrun");
events |= COMEDI_CB_OVERFLOW | COMEDI_CB_ERROR;
break;
} else if ((status_fifo & PCI230_ADC_FIFO_EMPTY) != 0) {
/* FIFO empty. */
break;
} else if ((status_fifo & PCI230_ADC_FIFO_HALF) != 0) {
/* FIFO half full. */
fifoamount = PCI230_ADC_FIFOLEVEL_HALFFULL;
} else {
/* FIFO not empty. */
if (devpriv->hwver > 0) {
/* Read PCI230+/260+ ADC FIFO level. */
fifoamount = inw(dev->iobase
+ PCI230P_ADCFFLEV);
if (fifoamount == 0) {
/* Shouldn't happen. */
break;
}
} else {
fifoamount = 1;
}
}
}
/* Read sample and store in Comedi's circular buffer. */
if (comedi_buf_put(async, pci230_ai_read(dev)) == 0) {
events |= COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW;
comedi_error(dev, "AI buffer overflow");
break;
}
fifoamount--;
devpriv->ai_scan_pos++;
if (devpriv->ai_scan_pos == scanlen) {
/* End of scan. */
devpriv->ai_scan_pos = 0;
devpriv->ai_scan_count--;
async->events |= COMEDI_CB_EOS;
}
}
if (!devpriv->ai_continuous && (devpriv->ai_scan_count == 0)) {
/* End of acquisition. */
events |= COMEDI_CB_EOA;
} else {
/* More samples required, tell Comedi to block. */
events |= COMEDI_CB_BLOCK;
}
async->events |= events;
if ((async->events & (COMEDI_CB_EOA | COMEDI_CB_ERROR |
COMEDI_CB_OVERFLOW)) != 0) {
/* disable hardware conversions */
pci230_ai_stop(dev, s);
} else {
/* update FIFO interrupt trigger level */
pci230_ai_update_fifo_trigger_level(dev, s);
}
}
static void pci230_ao_stop(struct comedi_device *dev,
struct comedi_subdevice *s)
{
unsigned long irqflags;
unsigned char intsrc;
int started;
struct comedi_cmd *cmd;
spin_lock_irqsave(&devpriv->ao_stop_spinlock, irqflags);
started = test_and_clear_bit(AO_CMD_STARTED, &devpriv->state);
spin_unlock_irqrestore(&devpriv->ao_stop_spinlock, irqflags);
if (!started)
return;
cmd = &s->async->cmd;
if (cmd->scan_begin_src == TRIG_TIMER) {
/* Stop scan rate generator. */
pci230_cancel_ct(dev, 1);
}
/* Determine interrupt source. */
if (devpriv->hwver < 2) {
/* Not using DAC FIFO. Using CT1 interrupt. */
intsrc = PCI230_INT_ZCLK_CT1;
} else {
/* Using DAC FIFO interrupt. */
intsrc = PCI230P2_INT_DAC;
}
/* Disable interrupt and wait for interrupt routine to finish running
* unless we are called from the interrupt routine. */
spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
devpriv->int_en &= ~intsrc;
while (devpriv->intr_running && devpriv->intr_cpuid != THISCPU) {
spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
}
if (devpriv->ier != devpriv->int_en) {
devpriv->ier = devpriv->int_en;
outb(devpriv->ier, devpriv->iobase1 + PCI230_INT_SCE);
}
spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
if (devpriv->hwver >= 2) {
/* Using DAC FIFO. Reset FIFO, clear underrun error,
* disable FIFO. */
devpriv->daccon &= PCI230_DAC_OR_MASK;
outw(devpriv->daccon | PCI230P2_DAC_FIFO_RESET
| PCI230P2_DAC_FIFO_UNDERRUN_CLEAR,
dev->iobase + PCI230_DACCON);
}
/* Release resources. */
put_all_resources(dev, OWNER_AOCMD);
}
static int pci230_ao_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
pci230_ao_stop(dev, s);
return 0;
}
static void pci230_ai_stop(struct comedi_device *dev,
struct comedi_subdevice *s)
{
unsigned long irqflags;
struct comedi_cmd *cmd;
int started;
spin_lock_irqsave(&devpriv->ai_stop_spinlock, irqflags);
started = test_and_clear_bit(AI_CMD_STARTED, &devpriv->state);
spin_unlock_irqrestore(&devpriv->ai_stop_spinlock, irqflags);
if (!started)
return;
cmd = &s->async->cmd;
if (cmd->convert_src == TRIG_TIMER) {
/* Stop conversion rate generator. */
pci230_cancel_ct(dev, 2);
}
if (cmd->scan_begin_src != TRIG_FOLLOW) {
/* Stop scan period monostable. */
pci230_cancel_ct(dev, 0);
}
spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
/* Disable ADC interrupt and wait for interrupt routine to finish
* running unless we are called from the interrupt routine. */
devpriv->int_en &= ~PCI230_INT_ADC;
while (devpriv->intr_running && devpriv->intr_cpuid != THISCPU) {
spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
}
if (devpriv->ier != devpriv->int_en) {
devpriv->ier = devpriv->int_en;
outb(devpriv->ier, devpriv->iobase1 + PCI230_INT_SCE);
}
spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
/* Reset FIFO, disable FIFO and set start conversion source to none.
* Keep se/diff and bip/uni settings */
devpriv->adccon = (devpriv->adccon & (PCI230_ADC_IR_MASK
| PCI230_ADC_IM_MASK)) |
PCI230_ADC_TRIG_NONE;
outw(devpriv->adccon | PCI230_ADC_FIFO_RESET,
dev->iobase + PCI230_ADCCON);
/* Release resources. */
put_all_resources(dev, OWNER_AICMD);
}
static int pci230_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
pci230_ai_stop(dev, s);
return 0;
}
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
MoKee/android_kernel_google_msm | drivers/staging/olpc_dcon/olpc_dcon.c | 4896 | 20738 | /*
* Mainly by David Woodhouse, somewhat modified by Jordan Crouse
*
* Copyright © 2006-2007 Red Hat, Inc.
* Copyright © 2006-2007 Advanced Micro Devices, Inc.
* Copyright © 2009 VIA Technology, Inc.
* Copyright (c) 2010-2011 Andres Salomon <dilinger@queued.net>
*
* This program is free software. You can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/fb.h>
#include <linux/console.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/backlight.h>
#include <linux/device.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>
#include <linux/reboot.h>
#include <asm/tsc.h>
#include <asm/olpc.h>
#include "olpc_dcon.h"
/* Module definitions */
static ushort resumeline = 898;
module_param(resumeline, ushort, 0444);
/* Default off since it doesn't work on DCON ASIC in B-test OLPC board */
static int useaa = 1;
module_param(useaa, int, 0444);
static struct dcon_platform_data *pdata;
/* I2C structures */
/* Platform devices */
static struct platform_device *dcon_device;
static DECLARE_WAIT_QUEUE_HEAD(dcon_wait_queue);
static unsigned short normal_i2c[] = { 0x0d, I2C_CLIENT_END };
static s32 dcon_write(struct dcon_priv *dcon, u8 reg, u16 val)
{
return i2c_smbus_write_word_data(dcon->client, reg, val);
}
static s32 dcon_read(struct dcon_priv *dcon, u8 reg)
{
return i2c_smbus_read_word_data(dcon->client, reg);
}
/* ===== API functions - these are called by a variety of users ==== */
static int dcon_hw_init(struct dcon_priv *dcon, int is_init)
{
uint16_t ver;
int rc = 0;
ver = dcon_read(dcon, DCON_REG_ID);
if ((ver >> 8) != 0xDC) {
printk(KERN_ERR "olpc-dcon: DCON ID not 0xDCxx: 0x%04x "
"instead.\n", ver);
rc = -ENXIO;
goto err;
}
if (is_init) {
printk(KERN_INFO "olpc-dcon: Discovered DCON version %x\n",
ver & 0xFF);
rc = pdata->init(dcon);
if (rc != 0) {
printk(KERN_ERR "olpc-dcon: Unable to init.\n");
goto err;
}
}
if (ver < 0xdc02) {
dev_err(&dcon->client->dev,
"DCON v1 is unsupported, giving up..\n");
rc = -ENODEV;
goto err;
}
/* SDRAM setup/hold time */
dcon_write(dcon, 0x3a, 0xc040);
dcon_write(dcon, 0x41, 0x0000);
dcon_write(dcon, 0x41, 0x0101);
dcon_write(dcon, 0x42, 0x0101);
/* Colour swizzle, AA, no passthrough, backlight */
if (is_init) {
dcon->disp_mode = MODE_PASSTHRU | MODE_BL_ENABLE |
MODE_CSWIZZLE;
if (useaa)
dcon->disp_mode |= MODE_COL_AA;
}
dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
/* Set the scanline to interrupt on during resume */
dcon_write(dcon, DCON_REG_SCAN_INT, resumeline);
err:
return rc;
}
/*
* The smbus doesn't always come back due to what is believed to be
* hardware (power rail) bugs. For older models where this is known to
* occur, our solution is to attempt to wait for the bus to stabilize;
* if it doesn't happen, cut power to the dcon, repower it, and wait
* for the bus to stabilize. Rinse, repeat until we have a working
* smbus. For newer models, we simply BUG(); we want to know if this
* still happens despite the power fixes that have been made!
*/
static int dcon_bus_stabilize(struct dcon_priv *dcon, int is_powered_down)
{
unsigned long timeout;
int x;
power_up:
if (is_powered_down) {
x = 1;
x = olpc_ec_cmd(0x26, (unsigned char *) &x, 1, NULL, 0);
if (x) {
printk(KERN_WARNING "olpc-dcon: unable to force dcon "
"to power up: %d!\n", x);
return x;
}
msleep(10); /* we'll be conservative */
}
pdata->bus_stabilize_wiggle();
for (x = -1, timeout = 50; timeout && x < 0; timeout--) {
msleep(1);
x = dcon_read(dcon, DCON_REG_ID);
}
if (x < 0) {
printk(KERN_ERR "olpc-dcon: unable to stabilize dcon's "
"smbus, reasserting power and praying.\n");
BUG_ON(olpc_board_at_least(olpc_board(0xc2)));
x = 0;
olpc_ec_cmd(0x26, (unsigned char *) &x, 1, NULL, 0);
msleep(100);
is_powered_down = 1;
goto power_up; /* argh, stupid hardware.. */
}
if (is_powered_down)
return dcon_hw_init(dcon, 0);
return 0;
}
static void dcon_set_backlight(struct dcon_priv *dcon, u8 level)
{
dcon->bl_val = level;
dcon_write(dcon, DCON_REG_BRIGHT, dcon->bl_val);
/* Purposely turn off the backlight when we go to level 0 */
if (dcon->bl_val == 0) {
dcon->disp_mode &= ~MODE_BL_ENABLE;
dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
} else if (!(dcon->disp_mode & MODE_BL_ENABLE)) {
dcon->disp_mode |= MODE_BL_ENABLE;
dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
}
}
/* Set the output type to either color or mono */
static int dcon_set_mono_mode(struct dcon_priv *dcon, bool enable_mono)
{
if (dcon->mono == enable_mono)
return 0;
dcon->mono = enable_mono;
if (enable_mono) {
dcon->disp_mode &= ~(MODE_CSWIZZLE | MODE_COL_AA);
dcon->disp_mode |= MODE_MONO_LUMA;
} else {
dcon->disp_mode &= ~(MODE_MONO_LUMA);
dcon->disp_mode |= MODE_CSWIZZLE;
if (useaa)
dcon->disp_mode |= MODE_COL_AA;
}
dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
return 0;
}
/* For now, this will be really stupid - we need to address how
* DCONLOAD works in a sleep and account for it accordingly
*/
static void dcon_sleep(struct dcon_priv *dcon, bool sleep)
{
int x;
/* Turn off the backlight and put the DCON to sleep */
if (dcon->asleep == sleep)
return;
if (!olpc_board_at_least(olpc_board(0xc2)))
return;
if (sleep) {
x = 0;
x = olpc_ec_cmd(0x26, (unsigned char *) &x, 1, NULL, 0);
if (x)
printk(KERN_WARNING "olpc-dcon: unable to force dcon "
"to power down: %d!\n", x);
else
dcon->asleep = sleep;
} else {
/* Only re-enable the backlight if the backlight value is set */
if (dcon->bl_val != 0)
dcon->disp_mode |= MODE_BL_ENABLE;
x = dcon_bus_stabilize(dcon, 1);
if (x)
printk(KERN_WARNING "olpc-dcon: unable to reinit dcon"
" hardware: %d!\n", x);
else
dcon->asleep = sleep;
/* Restore backlight */
dcon_set_backlight(dcon, dcon->bl_val);
}
/* We should turn off some stuff in the framebuffer - but what? */
}
/* the DCON seems to get confused if we change DCONLOAD too
* frequently -- i.e., approximately faster than frame time.
* normally we don't change it this fast, so in general we won't
* delay here.
*/
static void dcon_load_holdoff(struct dcon_priv *dcon)
{
struct timespec delta_t, now;
while (1) {
getnstimeofday(&now);
delta_t = timespec_sub(now, dcon->load_time);
if (delta_t.tv_sec != 0 ||
delta_t.tv_nsec > NSEC_PER_MSEC * 20) {
break;
}
mdelay(4);
}
}
static bool dcon_blank_fb(struct dcon_priv *dcon, bool blank)
{
int err;
if (!lock_fb_info(dcon->fbinfo)) {
dev_err(&dcon->client->dev, "unable to lock framebuffer\n");
return false;
}
console_lock();
dcon->ignore_fb_events = true;
err = fb_blank(dcon->fbinfo,
blank ? FB_BLANK_POWERDOWN : FB_BLANK_UNBLANK);
dcon->ignore_fb_events = false;
console_unlock();
unlock_fb_info(dcon->fbinfo);
if (err) {
dev_err(&dcon->client->dev, "couldn't %sblank framebuffer\n",
blank ? "" : "un");
return false;
}
return true;
}
/* Set the source of the display (CPU or DCON) */
static void dcon_source_switch(struct work_struct *work)
{
struct dcon_priv *dcon = container_of(work, struct dcon_priv,
switch_source);
DECLARE_WAITQUEUE(wait, current);
int source = dcon->pending_src;
if (dcon->curr_src == source)
return;
dcon_load_holdoff(dcon);
dcon->switched = false;
switch (source) {
case DCON_SOURCE_CPU:
printk("dcon_source_switch to CPU\n");
/* Enable the scanline interrupt bit */
if (dcon_write(dcon, DCON_REG_MODE,
dcon->disp_mode | MODE_SCAN_INT))
printk(KERN_ERR
"olpc-dcon: couldn't enable scanline interrupt!\n");
else {
/* Wait up to one second for the scanline interrupt */
wait_event_timeout(dcon_wait_queue,
dcon->switched == true, HZ);
}
if (!dcon->switched)
printk(KERN_ERR "olpc-dcon: Timeout entering CPU mode; expect a screen glitch.\n");
/* Turn off the scanline interrupt */
if (dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode))
printk(KERN_ERR "olpc-dcon: couldn't disable scanline interrupt!\n");
/*
* Ideally we'd like to disable interrupts here so that the
* fb unblanking and DCON turn on happen at a known time value;
* however, we can't do that right now with fb_blank
* messing with semaphores.
*
* For now, we just hope..
*/
if (!dcon_blank_fb(dcon, false)) {
printk(KERN_ERR "olpc-dcon: Failed to enter CPU mode\n");
dcon->pending_src = DCON_SOURCE_DCON;
return;
}
/* And turn off the DCON */
pdata->set_dconload(1);
getnstimeofday(&dcon->load_time);
printk(KERN_INFO "olpc-dcon: The CPU has control\n");
break;
case DCON_SOURCE_DCON:
{
int t;
struct timespec delta_t;
printk(KERN_INFO "dcon_source_switch to DCON\n");
add_wait_queue(&dcon_wait_queue, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
/* Clear DCONLOAD - this implies that the DCON is in control */
pdata->set_dconload(0);
getnstimeofday(&dcon->load_time);
t = schedule_timeout(HZ/2);
remove_wait_queue(&dcon_wait_queue, &wait);
set_current_state(TASK_RUNNING);
if (!dcon->switched) {
printk(KERN_ERR "olpc-dcon: Timeout entering DCON mode; expect a screen glitch.\n");
} else {
/* sometimes the DCON doesn't follow its own rules,
* and doesn't wait for two vsync pulses before
* ack'ing the frame load with an IRQ. the result
* is that the display shows the *previously*
* loaded frame. we can detect this by looking at
* the time between asserting DCONLOAD and the IRQ --
* if it's less than 20msec, then the DCON couldn't
* have seen two VSYNC pulses. in that case we
* deassert and reassert, and hope for the best.
* see http://dev.laptop.org/ticket/9664
*/
delta_t = timespec_sub(dcon->irq_time, dcon->load_time);
if (dcon->switched && delta_t.tv_sec == 0 &&
delta_t.tv_nsec < NSEC_PER_MSEC * 20) {
printk(KERN_ERR "olpc-dcon: missed loading, retrying\n");
pdata->set_dconload(1);
mdelay(41);
pdata->set_dconload(0);
getnstimeofday(&dcon->load_time);
mdelay(41);
}
}
dcon_blank_fb(dcon, true);
printk(KERN_INFO "olpc-dcon: The DCON has control\n");
break;
}
default:
BUG();
}
dcon->curr_src = source;
}
static void dcon_set_source(struct dcon_priv *dcon, int arg)
{
if (dcon->pending_src == arg)
return;
dcon->pending_src = arg;
if ((dcon->curr_src != arg) && !work_pending(&dcon->switch_source))
schedule_work(&dcon->switch_source);
}
static void dcon_set_source_sync(struct dcon_priv *dcon, int arg)
{
dcon_set_source(dcon, arg);
flush_scheduled_work();
}
static ssize_t dcon_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dcon_priv *dcon = dev_get_drvdata(dev);
return sprintf(buf, "%4.4X\n", dcon->disp_mode);
}
static ssize_t dcon_sleep_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dcon_priv *dcon = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", dcon->asleep);
}
static ssize_t dcon_freeze_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dcon_priv *dcon = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", dcon->curr_src == DCON_SOURCE_DCON ? 1 : 0);
}
static ssize_t dcon_mono_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dcon_priv *dcon = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", dcon->mono);
}
static ssize_t dcon_resumeline_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", resumeline);
}
static ssize_t dcon_mono_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long enable_mono;
int rc;
rc = kstrtoul(buf, 10, &enable_mono);
if (rc)
return rc;
dcon_set_mono_mode(dev_get_drvdata(dev), enable_mono ? true : false);
return count;
}
static ssize_t dcon_freeze_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct dcon_priv *dcon = dev_get_drvdata(dev);
unsigned long output;
int ret;
ret = kstrtoul(buf, 10, &output);
if (ret)
return ret;
printk(KERN_INFO "dcon_freeze_store: %lu\n", output);
switch (output) {
case 0:
dcon_set_source(dcon, DCON_SOURCE_CPU);
break;
case 1:
dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
break;
case 2: /* normally unused */
dcon_set_source(dcon, DCON_SOURCE_DCON);
break;
default:
return -EINVAL;
}
return count;
}
static ssize_t dcon_resumeline_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned short rl;
int rc;
rc = kstrtou16(buf, 10, &rl);
if (rc)
return rc;
resumeline = rl;
dcon_write(dev_get_drvdata(dev), DCON_REG_SCAN_INT, resumeline);
return count;
}
static ssize_t dcon_sleep_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long output;
int ret;
ret = kstrtoul(buf, 10, &output);
if (ret)
return ret;
dcon_sleep(dev_get_drvdata(dev), output ? true : false);
return count;
}
static struct device_attribute dcon_device_files[] = {
__ATTR(mode, 0444, dcon_mode_show, NULL),
__ATTR(sleep, 0644, dcon_sleep_show, dcon_sleep_store),
__ATTR(freeze, 0644, dcon_freeze_show, dcon_freeze_store),
__ATTR(monochrome, 0644, dcon_mono_show, dcon_mono_store),
__ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store),
};
static int dcon_bl_update(struct backlight_device *dev)
{
struct dcon_priv *dcon = bl_get_data(dev);
u8 level = dev->props.brightness & 0x0F;
if (dev->props.power != FB_BLANK_UNBLANK)
level = 0;
if (level != dcon->bl_val)
dcon_set_backlight(dcon, level);
return 0;
}
static int dcon_bl_get(struct backlight_device *dev)
{
struct dcon_priv *dcon = bl_get_data(dev);
return dcon->bl_val;
}
static const struct backlight_ops dcon_bl_ops = {
.update_status = dcon_bl_update,
.get_brightness = dcon_bl_get,
};
static struct backlight_properties dcon_bl_props = {
.max_brightness = 15,
.type = BACKLIGHT_RAW,
.power = FB_BLANK_UNBLANK,
};
static int dcon_reboot_notify(struct notifier_block *nb,
unsigned long foo, void *bar)
{
struct dcon_priv *dcon = container_of(nb, struct dcon_priv, reboot_nb);
if (!dcon || !dcon->client)
return 0;
/* Turn off the DCON. Entirely. */
dcon_write(dcon, DCON_REG_MODE, 0x39);
dcon_write(dcon, DCON_REG_MODE, 0x32);
return 0;
}
static int unfreeze_on_panic(struct notifier_block *nb,
unsigned long e, void *p)
{
pdata->set_dconload(1);
return NOTIFY_DONE;
}
static struct notifier_block dcon_panic_nb = {
.notifier_call = unfreeze_on_panic,
};
/*
* When the framebuffer sleeps due to external sources (e.g. user idle), power
* down the DCON as well. Power it back up when the fb comes back to life.
*/
static int dcon_fb_notifier(struct notifier_block *self,
unsigned long event, void *data)
{
struct fb_event *evdata = data;
struct dcon_priv *dcon = container_of(self, struct dcon_priv,
fbevent_nb);
int *blank = (int *) evdata->data;
if (((event != FB_EVENT_BLANK) && (event != FB_EVENT_CONBLANK)) ||
dcon->ignore_fb_events)
return 0;
dcon_sleep(dcon, *blank ? true : false);
return 0;
}
static int dcon_detect(struct i2c_client *client, struct i2c_board_info *info)
{
strlcpy(info->type, "olpc_dcon", I2C_NAME_SIZE);
return 0;
}
static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct dcon_priv *dcon;
int rc, i, j;
if (!pdata)
return -ENXIO;
dcon = kzalloc(sizeof(*dcon), GFP_KERNEL);
if (!dcon)
return -ENOMEM;
dcon->client = client;
INIT_WORK(&dcon->switch_source, dcon_source_switch);
dcon->reboot_nb.notifier_call = dcon_reboot_notify;
dcon->reboot_nb.priority = -1;
dcon->fbevent_nb.notifier_call = dcon_fb_notifier;
i2c_set_clientdata(client, dcon);
if (num_registered_fb < 1) {
dev_err(&client->dev, "DCON driver requires a registered fb\n");
rc = -EIO;
goto einit;
}
dcon->fbinfo = registered_fb[0];
rc = dcon_hw_init(dcon, 1);
if (rc)
goto einit;
/* Add the DCON device */
dcon_device = platform_device_alloc("dcon", -1);
if (dcon_device == NULL) {
printk(KERN_ERR "dcon: Unable to create the DCON device\n");
rc = -ENOMEM;
goto eirq;
}
rc = platform_device_add(dcon_device);
platform_set_drvdata(dcon_device, dcon);
if (rc) {
printk(KERN_ERR "dcon: Unable to add the DCON device\n");
goto edev;
}
for (i = 0; i < ARRAY_SIZE(dcon_device_files); i++) {
rc = device_create_file(&dcon_device->dev,
&dcon_device_files[i]);
if (rc) {
dev_err(&dcon_device->dev, "Cannot create sysfs file\n");
goto ecreate;
}
}
dcon->bl_val = dcon_read(dcon, DCON_REG_BRIGHT) & 0x0F;
/* Add the backlight device for the DCON */
dcon_bl_props.brightness = dcon->bl_val;
dcon->bl_dev = backlight_device_register("dcon-bl", &dcon_device->dev,
dcon, &dcon_bl_ops, &dcon_bl_props);
if (IS_ERR(dcon->bl_dev)) {
dev_err(&client->dev, "cannot register backlight dev (%ld)\n",
PTR_ERR(dcon->bl_dev));
dcon->bl_dev = NULL;
}
register_reboot_notifier(&dcon->reboot_nb);
atomic_notifier_chain_register(&panic_notifier_list, &dcon_panic_nb);
fb_register_client(&dcon->fbevent_nb);
return 0;
ecreate:
for (j = 0; j < i; j++)
device_remove_file(&dcon_device->dev, &dcon_device_files[j]);
edev:
platform_device_unregister(dcon_device);
dcon_device = NULL;
eirq:
free_irq(DCON_IRQ, dcon);
einit:
kfree(dcon);
return rc;
}
static int dcon_remove(struct i2c_client *client)
{
struct dcon_priv *dcon = i2c_get_clientdata(client);
fb_unregister_client(&dcon->fbevent_nb);
unregister_reboot_notifier(&dcon->reboot_nb);
atomic_notifier_chain_unregister(&panic_notifier_list, &dcon_panic_nb);
free_irq(DCON_IRQ, dcon);
if (dcon->bl_dev)
backlight_device_unregister(dcon->bl_dev);
if (dcon_device != NULL)
platform_device_unregister(dcon_device);
cancel_work_sync(&dcon->switch_source);
kfree(dcon);
return 0;
}
#ifdef CONFIG_PM
static int dcon_suspend(struct i2c_client *client, pm_message_t state)
{
struct dcon_priv *dcon = i2c_get_clientdata(client);
if (!dcon->asleep) {
/* Set up the DCON to have the source */
dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
}
return 0;
}
static int dcon_resume(struct i2c_client *client)
{
struct dcon_priv *dcon = i2c_get_clientdata(client);
if (!dcon->asleep) {
dcon_bus_stabilize(dcon, 0);
dcon_set_source(dcon, DCON_SOURCE_CPU);
}
return 0;
}
#endif
irqreturn_t dcon_interrupt(int irq, void *id)
{
struct dcon_priv *dcon = id;
u8 status;
if (pdata->read_status(&status))
return IRQ_NONE;
switch (status & 3) {
case 3:
printk(KERN_DEBUG "olpc-dcon: DCONLOAD_MISSED interrupt\n");
break;
case 2: /* switch to DCON mode */
case 1: /* switch to CPU mode */
dcon->switched = true;
getnstimeofday(&dcon->irq_time);
wake_up(&dcon_wait_queue);
break;
case 0:
/* workaround resume case: the DCON (on 1.5) doesn't
* ever assert status 0x01 when switching to CPU mode
* during resume. this is because DCONLOAD is de-asserted
* _immediately_ upon exiting S3, so the actual release
* of the DCON happened long before this point.
* see http://dev.laptop.org/ticket/9869
*/
if (dcon->curr_src != dcon->pending_src && !dcon->switched) {
dcon->switched = true;
getnstimeofday(&dcon->irq_time);
wake_up(&dcon_wait_queue);
printk(KERN_DEBUG "olpc-dcon: switching w/ status 0/0\n");
} else {
printk(KERN_DEBUG "olpc-dcon: scanline interrupt w/CPU\n");
}
}
return IRQ_HANDLED;
}
static const struct i2c_device_id dcon_idtable[] = {
{ "olpc_dcon", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, dcon_idtable);
struct i2c_driver dcon_driver = {
.driver = {
.name = "olpc_dcon",
},
.class = I2C_CLASS_DDC | I2C_CLASS_HWMON,
.id_table = dcon_idtable,
.probe = dcon_probe,
.remove = __devexit_p(dcon_remove),
.detect = dcon_detect,
.address_list = normal_i2c,
#ifdef CONFIG_PM
.suspend = dcon_suspend,
.resume = dcon_resume,
#endif
};
static int __init olpc_dcon_init(void)
{
#ifdef CONFIG_FB_OLPC_DCON_1_5
/* XO-1.5 */
if (olpc_board_at_least(olpc_board(0xd0)))
pdata = &dcon_pdata_xo_1_5;
#endif
#ifdef CONFIG_FB_OLPC_DCON_1
if (!pdata)
pdata = &dcon_pdata_xo_1;
#endif
return i2c_add_driver(&dcon_driver);
}
static void __exit olpc_dcon_exit(void)
{
i2c_del_driver(&dcon_driver);
}
module_init(olpc_dcon_init);
module_exit(olpc_dcon_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
piaoxue99/lp_l24_kernel | drivers/message/i2o/i2o_proc.c | 4896 | 53362 | /*
* procfs handler for Linux I2O subsystem
*
* (c) Copyright 1999 Deepak Saxena
*
* Originally written by Deepak Saxena(deepak@plexity.net)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This is an initial test release. The code is based on the design of the
* ide procfs system (drivers/block/ide-proc.c). Some code taken from
* i2o-core module by Alan Cox.
*
* DISCLAIMER: This code is still under development/test and may cause
* your system to behave unpredictably. Use at your own discretion.
*
*
* Fixes/additions:
* Juha Sievänen (Juha.Sievanen@cs.Helsinki.FI),
* Auvo Häkkinen (Auvo.Hakkinen@cs.Helsinki.FI)
* University of Helsinki, Department of Computer Science
* LAN entries
* Markus Lidel <Markus.Lidel@shadowconnect.com>
* Changes for new I2O API
*/
#define OSM_NAME "proc-osm"
#define OSM_VERSION "1.316"
#define OSM_DESCRIPTION "I2O ProcFS OSM"
#define I2O_MAX_MODULES 4
// FIXME!
#define FMT_U64_HEX "0x%08x%08x"
#define U64_VAL(pu64) *((u32*)(pu64)+1), *((u32*)(pu64))
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/i2o.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
/* Structure used to define /proc entries */
typedef struct _i2o_proc_entry_t {
char *name; /* entry name */
umode_t mode; /* mode */
const struct file_operations *fops; /* open function */
} i2o_proc_entry;
/* global I2O /proc/i2o entry */
static struct proc_dir_entry *i2o_proc_dir_root;
/* proc OSM driver struct */
static struct i2o_driver i2o_proc_driver = {
.name = OSM_NAME,
};
static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
{
int i;
/* 19990419 -sralston
* The I2O v1.5 (and v2.0 so far) "official specification"
* got serial numbers WRONG!
* Apparently, and despite what Section 3.4.4 says and
* Figure 3-35 shows (pg 3-39 in the pdf doc),
* the convention / consensus seems to be:
* + First byte is SNFormat
* + Second byte is SNLen (but only if SNFormat==7 (?))
* + (v2.0) SCSI+BS may use IEEE Registered (64 or 128 bit) format
*/
switch (serialno[0]) {
case I2O_SNFORMAT_BINARY: /* Binary */
seq_printf(seq, "0x");
for (i = 0; i < serialno[1]; i++) {
seq_printf(seq, "%02X", serialno[2 + i]);
}
break;
case I2O_SNFORMAT_ASCII: /* ASCII */
if (serialno[1] < ' ') { /* printable or SNLen? */
/* sanity */
max_len =
(max_len < serialno[1]) ? max_len : serialno[1];
serialno[1 + max_len] = '\0';
/* just print it */
seq_printf(seq, "%s", &serialno[2]);
} else {
/* print chars for specified length */
for (i = 0; i < serialno[1]; i++) {
seq_printf(seq, "%c", serialno[2 + i]);
}
}
break;
case I2O_SNFORMAT_UNICODE: /* UNICODE */
seq_printf(seq, "UNICODE Format. Can't Display\n");
break;
case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */
seq_printf(seq, "LAN-48 MAC address @ %pM", &serialno[2]);
break;
case I2O_SNFORMAT_WAN: /* WAN MAC Address */
/* FIXME: Figure out what a WAN access address looks like?? */
seq_printf(seq, "WAN Access Address");
break;
/* plus new in v2.0 */
case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */
/* FIXME: Figure out what a LAN-64 address really looks like?? */
seq_printf(seq,
"LAN-64 MAC address @ [?:%02X:%02X:?] %pM",
serialno[8], serialno[9], &serialno[2]);
break;
case I2O_SNFORMAT_DDM: /* I2O DDM */
seq_printf(seq,
"DDM: Tid=%03Xh, Rsvd=%04Xh, OrgId=%04Xh",
*(u16 *) & serialno[2],
*(u16 *) & serialno[4], *(u16 *) & serialno[6]);
break;
case I2O_SNFORMAT_IEEE_REG64: /* IEEE Registered (64-bit) */
case I2O_SNFORMAT_IEEE_REG128: /* IEEE Registered (128-bit) */
/* FIXME: Figure if this is even close?? */
seq_printf(seq,
"IEEE NodeName(hi,lo)=(%08Xh:%08Xh), PortName(hi,lo)=(%08Xh:%08Xh)\n",
*(u32 *) & serialno[2],
*(u32 *) & serialno[6],
*(u32 *) & serialno[10], *(u32 *) & serialno[14]);
break;
case I2O_SNFORMAT_UNKNOWN: /* Unknown 0 */
case I2O_SNFORMAT_UNKNOWN2: /* Unknown 0xff */
default:
seq_printf(seq, "Unknown data format (0x%02x)", serialno[0]);
break;
}
return 0;
}
/**
* i2o_get_class_name - do i2o class name lookup
* @class: class number
*
* Return a descriptive string for an i2o class.
*/
static const char *i2o_get_class_name(int class)
{
int idx = 16;
static char *i2o_class_name[] = {
"Executive",
"Device Driver Module",
"Block Device",
"Tape Device",
"LAN Interface",
"WAN Interface",
"Fibre Channel Port",
"Fibre Channel Device",
"SCSI Device",
"ATE Port",
"ATE Device",
"Floppy Controller",
"Floppy Device",
"Secondary Bus Port",
"Peer Transport Agent",
"Peer Transport",
"Unknown"
};
switch (class & 0xfff) {
case I2O_CLASS_EXECUTIVE:
idx = 0;
break;
case I2O_CLASS_DDM:
idx = 1;
break;
case I2O_CLASS_RANDOM_BLOCK_STORAGE:
idx = 2;
break;
case I2O_CLASS_SEQUENTIAL_STORAGE:
idx = 3;
break;
case I2O_CLASS_LAN:
idx = 4;
break;
case I2O_CLASS_WAN:
idx = 5;
break;
case I2O_CLASS_FIBRE_CHANNEL_PORT:
idx = 6;
break;
case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
idx = 7;
break;
case I2O_CLASS_SCSI_PERIPHERAL:
idx = 8;
break;
case I2O_CLASS_ATE_PORT:
idx = 9;
break;
case I2O_CLASS_ATE_PERIPHERAL:
idx = 10;
break;
case I2O_CLASS_FLOPPY_CONTROLLER:
idx = 11;
break;
case I2O_CLASS_FLOPPY_DEVICE:
idx = 12;
break;
case I2O_CLASS_BUS_ADAPTER:
idx = 13;
break;
case I2O_CLASS_PEER_TRANSPORT_AGENT:
idx = 14;
break;
case I2O_CLASS_PEER_TRANSPORT:
idx = 15;
break;
}
return i2o_class_name[idx];
}
#define SCSI_TABLE_SIZE 13
static char *scsi_devices[] = {
"Direct-Access Read/Write",
"Sequential-Access Storage",
"Printer",
"Processor",
"WORM Device",
"CD-ROM Device",
"Scanner Device",
"Optical Memory Device",
"Medium Changer Device",
"Communications Device",
"Graphics Art Pre-Press Device",
"Graphics Art Pre-Press Device",
"Array Controller Device"
};
static char *chtostr(u8 * chars, int n)
{
char tmp[256];
tmp[0] = 0;
return strncat(tmp, (char *)chars, n);
}
static int i2o_report_query_status(struct seq_file *seq, int block_status,
char *group)
{
switch (block_status) {
case -ETIMEDOUT:
return seq_printf(seq, "Timeout reading group %s.\n", group);
case -ENOMEM:
return seq_printf(seq, "No free memory to read the table.\n");
case -I2O_PARAMS_STATUS_INVALID_GROUP_ID:
return seq_printf(seq, "Group %s not supported.\n", group);
default:
return seq_printf(seq,
"Error reading group %s. BlockStatus 0x%02X\n",
group, -block_status);
}
}
static char *bus_strings[] = {
"Local Bus",
"ISA",
"EISA",
"MCA",
"PCI",
"PCMCIA",
"NUBUS",
"CARDBUS"
};
static int i2o_seq_show_hrt(struct seq_file *seq, void *v)
{
struct i2o_controller *c = (struct i2o_controller *)seq->private;
i2o_hrt *hrt = (i2o_hrt *) c->hrt.virt;
u32 bus;
int i;
if (hrt->hrt_version) {
seq_printf(seq,
"HRT table for controller is too new a version.\n");
return 0;
}
seq_printf(seq, "HRT has %d entries of %d bytes each.\n",
hrt->num_entries, hrt->entry_len << 2);
for (i = 0; i < hrt->num_entries; i++) {
seq_printf(seq, "Entry %d:\n", i);
seq_printf(seq, " Adapter ID: %0#10x\n",
hrt->hrt_entry[i].adapter_id);
seq_printf(seq, " Controlling tid: %0#6x\n",
hrt->hrt_entry[i].parent_tid);
if (hrt->hrt_entry[i].bus_type != 0x80) {
bus = hrt->hrt_entry[i].bus_type;
seq_printf(seq, " %s Information\n",
bus_strings[bus]);
switch (bus) {
case I2O_BUS_LOCAL:
seq_printf(seq, " IOBase: %0#6x,",
hrt->hrt_entry[i].bus.local_bus.
LbBaseIOPort);
seq_printf(seq, " MemoryBase: %0#10x\n",
hrt->hrt_entry[i].bus.local_bus.
LbBaseMemoryAddress);
break;
case I2O_BUS_ISA:
seq_printf(seq, " IOBase: %0#6x,",
hrt->hrt_entry[i].bus.isa_bus.
IsaBaseIOPort);
seq_printf(seq, " MemoryBase: %0#10x,",
hrt->hrt_entry[i].bus.isa_bus.
IsaBaseMemoryAddress);
seq_printf(seq, " CSN: %0#4x,",
hrt->hrt_entry[i].bus.isa_bus.CSN);
break;
case I2O_BUS_EISA:
seq_printf(seq, " IOBase: %0#6x,",
hrt->hrt_entry[i].bus.eisa_bus.
EisaBaseIOPort);
seq_printf(seq, " MemoryBase: %0#10x,",
hrt->hrt_entry[i].bus.eisa_bus.
EisaBaseMemoryAddress);
seq_printf(seq, " Slot: %0#4x,",
hrt->hrt_entry[i].bus.eisa_bus.
EisaSlotNumber);
break;
case I2O_BUS_MCA:
seq_printf(seq, " IOBase: %0#6x,",
hrt->hrt_entry[i].bus.mca_bus.
McaBaseIOPort);
seq_printf(seq, " MemoryBase: %0#10x,",
hrt->hrt_entry[i].bus.mca_bus.
McaBaseMemoryAddress);
seq_printf(seq, " Slot: %0#4x,",
hrt->hrt_entry[i].bus.mca_bus.
McaSlotNumber);
break;
case I2O_BUS_PCI:
seq_printf(seq, " Bus: %0#4x",
hrt->hrt_entry[i].bus.pci_bus.
PciBusNumber);
seq_printf(seq, " Dev: %0#4x",
hrt->hrt_entry[i].bus.pci_bus.
PciDeviceNumber);
seq_printf(seq, " Func: %0#4x",
hrt->hrt_entry[i].bus.pci_bus.
PciFunctionNumber);
seq_printf(seq, " Vendor: %0#6x",
hrt->hrt_entry[i].bus.pci_bus.
PciVendorID);
seq_printf(seq, " Device: %0#6x\n",
hrt->hrt_entry[i].bus.pci_bus.
PciDeviceID);
break;
default:
seq_printf(seq, " Unsupported Bus Type\n");
}
} else
seq_printf(seq, " Unknown Bus Type\n");
}
return 0;
}
static int i2o_seq_show_lct(struct seq_file *seq, void *v)
{
struct i2o_controller *c = (struct i2o_controller *)seq->private;
i2o_lct *lct = (i2o_lct *) c->lct;
int entries;
int i;
#define BUS_TABLE_SIZE 3
static char *bus_ports[] = {
"Generic Bus",
"SCSI Bus",
"Fibre Channel Bus"
};
entries = (lct->table_size - 3) / 9;
seq_printf(seq, "LCT contains %d %s\n", entries,
entries == 1 ? "entry" : "entries");
if (lct->boot_tid)
seq_printf(seq, "Boot Device @ ID %d\n", lct->boot_tid);
seq_printf(seq, "Current Change Indicator: %#10x\n", lct->change_ind);
for (i = 0; i < entries; i++) {
seq_printf(seq, "Entry %d\n", i);
seq_printf(seq, " Class, SubClass : %s",
i2o_get_class_name(lct->lct_entry[i].class_id));
/*
* Classes which we'll print subclass info for
*/
switch (lct->lct_entry[i].class_id & 0xFFF) {
case I2O_CLASS_RANDOM_BLOCK_STORAGE:
switch (lct->lct_entry[i].sub_class) {
case 0x00:
seq_printf(seq, ", Direct-Access Read/Write");
break;
case 0x04:
seq_printf(seq, ", WORM Drive");
break;
case 0x05:
seq_printf(seq, ", CD-ROM Drive");
break;
case 0x07:
seq_printf(seq, ", Optical Memory Device");
break;
default:
seq_printf(seq, ", Unknown (0x%02x)",
lct->lct_entry[i].sub_class);
break;
}
break;
case I2O_CLASS_LAN:
switch (lct->lct_entry[i].sub_class & 0xFF) {
case 0x30:
seq_printf(seq, ", Ethernet");
break;
case 0x40:
seq_printf(seq, ", 100base VG");
break;
case 0x50:
seq_printf(seq, ", IEEE 802.5/Token-Ring");
break;
case 0x60:
seq_printf(seq, ", ANSI X3T9.5 FDDI");
break;
case 0x70:
seq_printf(seq, ", Fibre Channel");
break;
default:
seq_printf(seq, ", Unknown Sub-Class (0x%02x)",
lct->lct_entry[i].sub_class & 0xFF);
break;
}
break;
case I2O_CLASS_SCSI_PERIPHERAL:
if (lct->lct_entry[i].sub_class < SCSI_TABLE_SIZE)
seq_printf(seq, ", %s",
scsi_devices[lct->lct_entry[i].
sub_class]);
else
seq_printf(seq, ", Unknown Device Type");
break;
case I2O_CLASS_BUS_ADAPTER:
if (lct->lct_entry[i].sub_class < BUS_TABLE_SIZE)
seq_printf(seq, ", %s",
bus_ports[lct->lct_entry[i].
sub_class]);
else
seq_printf(seq, ", Unknown Bus Type");
break;
}
seq_printf(seq, "\n");
seq_printf(seq, " Local TID : 0x%03x\n",
lct->lct_entry[i].tid);
seq_printf(seq, " User TID : 0x%03x\n",
lct->lct_entry[i].user_tid);
seq_printf(seq, " Parent TID : 0x%03x\n",
lct->lct_entry[i].parent_tid);
seq_printf(seq, " Identity Tag : 0x%x%x%x%x%x%x%x%x\n",
lct->lct_entry[i].identity_tag[0],
lct->lct_entry[i].identity_tag[1],
lct->lct_entry[i].identity_tag[2],
lct->lct_entry[i].identity_tag[3],
lct->lct_entry[i].identity_tag[4],
lct->lct_entry[i].identity_tag[5],
lct->lct_entry[i].identity_tag[6],
lct->lct_entry[i].identity_tag[7]);
seq_printf(seq, " Change Indicator : %0#10x\n",
lct->lct_entry[i].change_ind);
seq_printf(seq, " Event Capab Mask : %0#10x\n",
lct->lct_entry[i].device_flags);
}
return 0;
}
static int i2o_seq_show_status(struct seq_file *seq, void *v)
{
struct i2o_controller *c = (struct i2o_controller *)seq->private;
char prodstr[25];
int version;
i2o_status_block *sb = c->status_block.virt;
i2o_status_get(c); // reread the status block
seq_printf(seq, "Organization ID : %0#6x\n", sb->org_id);
version = sb->i2o_version;
/* FIXME for Spec 2.0
if (version == 0x02) {
seq_printf(seq, "Lowest I2O version supported: ");
switch(workspace[2]) {
case 0x00:
seq_printf(seq, "1.0\n");
break;
case 0x01:
seq_printf(seq, "1.5\n");
break;
case 0x02:
seq_printf(seq, "2.0\n");
break;
}
seq_printf(seq, "Highest I2O version supported: ");
switch(workspace[3]) {
case 0x00:
seq_printf(seq, "1.0\n");
break;
case 0x01:
seq_printf(seq, "1.5\n");
break;
case 0x02:
seq_printf(seq, "2.0\n");
break;
}
}
*/
seq_printf(seq, "IOP ID : %0#5x\n", sb->iop_id);
seq_printf(seq, "Host Unit ID : %0#6x\n", sb->host_unit_id);
seq_printf(seq, "Segment Number : %0#5x\n", sb->segment_number);
seq_printf(seq, "I2O version : ");
switch (version) {
case 0x00:
seq_printf(seq, "1.0\n");
break;
case 0x01:
seq_printf(seq, "1.5\n");
break;
case 0x02:
seq_printf(seq, "2.0\n");
break;
default:
seq_printf(seq, "Unknown version\n");
}
seq_printf(seq, "IOP State : ");
switch (sb->iop_state) {
case 0x01:
seq_printf(seq, "INIT\n");
break;
case 0x02:
seq_printf(seq, "RESET\n");
break;
case 0x04:
seq_printf(seq, "HOLD\n");
break;
case 0x05:
seq_printf(seq, "READY\n");
break;
case 0x08:
seq_printf(seq, "OPERATIONAL\n");
break;
case 0x10:
seq_printf(seq, "FAILED\n");
break;
case 0x11:
seq_printf(seq, "FAULTED\n");
break;
default:
seq_printf(seq, "Unknown\n");
break;
}
seq_printf(seq, "Messenger Type : ");
switch (sb->msg_type) {
case 0x00:
seq_printf(seq, "Memory mapped\n");
break;
case 0x01:
seq_printf(seq, "Memory mapped only\n");
break;
case 0x02:
seq_printf(seq, "Remote only\n");
break;
case 0x03:
seq_printf(seq, "Memory mapped and remote\n");
break;
default:
seq_printf(seq, "Unknown\n");
}
seq_printf(seq, "Inbound Frame Size : %d bytes\n",
sb->inbound_frame_size << 2);
seq_printf(seq, "Max Inbound Frames : %d\n",
sb->max_inbound_frames);
seq_printf(seq, "Current Inbound Frames : %d\n",
sb->cur_inbound_frames);
seq_printf(seq, "Max Outbound Frames : %d\n",
sb->max_outbound_frames);
/* Spec doesn't say if NULL terminated or not... */
memcpy(prodstr, sb->product_id, 24);
prodstr[24] = '\0';
seq_printf(seq, "Product ID : %s\n", prodstr);
seq_printf(seq, "Expected LCT Size : %d bytes\n",
sb->expected_lct_size);
seq_printf(seq, "IOP Capabilities\n");
seq_printf(seq, " Context Field Size Support : ");
switch (sb->iop_capabilities & 0x0000003) {
case 0:
seq_printf(seq, "Supports only 32-bit context fields\n");
break;
case 1:
seq_printf(seq, "Supports only 64-bit context fields\n");
break;
case 2:
seq_printf(seq, "Supports 32-bit and 64-bit context fields, "
"but not concurrently\n");
break;
case 3:
seq_printf(seq, "Supports 32-bit and 64-bit context fields "
"concurrently\n");
break;
default:
seq_printf(seq, "0x%08x\n", sb->iop_capabilities);
}
seq_printf(seq, " Current Context Field Size : ");
switch (sb->iop_capabilities & 0x0000000C) {
case 0:
seq_printf(seq, "not configured\n");
break;
case 4:
seq_printf(seq, "Supports only 32-bit context fields\n");
break;
case 8:
seq_printf(seq, "Supports only 64-bit context fields\n");
break;
case 12:
seq_printf(seq, "Supports both 32-bit or 64-bit context fields "
"concurrently\n");
break;
default:
seq_printf(seq, "\n");
}
seq_printf(seq, " Inbound Peer Support : %s\n",
(sb->
iop_capabilities & 0x00000010) ? "Supported" :
"Not supported");
seq_printf(seq, " Outbound Peer Support : %s\n",
(sb->
iop_capabilities & 0x00000020) ? "Supported" :
"Not supported");
seq_printf(seq, " Peer to Peer Support : %s\n",
(sb->
iop_capabilities & 0x00000040) ? "Supported" :
"Not supported");
seq_printf(seq, "Desired private memory size : %d kB\n",
sb->desired_mem_size >> 10);
seq_printf(seq, "Allocated private memory size : %d kB\n",
sb->current_mem_size >> 10);
seq_printf(seq, "Private memory base address : %0#10x\n",
sb->current_mem_base);
seq_printf(seq, "Desired private I/O size : %d kB\n",
sb->desired_io_size >> 10);
seq_printf(seq, "Allocated private I/O size : %d kB\n",
sb->current_io_size >> 10);
seq_printf(seq, "Private I/O base address : %0#10x\n",
sb->current_io_base);
return 0;
}
static int i2o_seq_show_hw(struct seq_file *seq, void *v)
{
struct i2o_controller *c = (struct i2o_controller *)seq->private;
static u32 work32[5];
static u8 *work8 = (u8 *) work32;
static u16 *work16 = (u16 *) work32;
int token;
u32 hwcap;
static char *cpu_table[] = {
"Intel 80960 series",
"AMD2900 series",
"Motorola 68000 series",
"ARM series",
"MIPS series",
"Sparc series",
"PowerPC series",
"Intel x86 series"
};
token =
i2o_parm_field_get(c->exec, 0x0000, -1, &work32, sizeof(work32));
if (token < 0) {
i2o_report_query_status(seq, token, "0x0000 IOP Hardware");
return 0;
}
seq_printf(seq, "I2O Vendor ID : %0#6x\n", work16[0]);
seq_printf(seq, "Product ID : %0#6x\n", work16[1]);
seq_printf(seq, "CPU : ");
if (work8[16] > 8)
seq_printf(seq, "Unknown\n");
else
seq_printf(seq, "%s\n", cpu_table[work8[16]]);
/* Anyone using ProcessorVersion? */
seq_printf(seq, "RAM : %dkB\n", work32[1] >> 10);
seq_printf(seq, "Non-Volatile Mem : %dkB\n", work32[2] >> 10);
hwcap = work32[3];
seq_printf(seq, "Capabilities : 0x%08x\n", hwcap);
seq_printf(seq, " [%s] Self booting\n",
(hwcap & 0x00000001) ? "+" : "-");
seq_printf(seq, " [%s] Upgradable IRTOS\n",
(hwcap & 0x00000002) ? "+" : "-");
seq_printf(seq, " [%s] Supports downloading DDMs\n",
(hwcap & 0x00000004) ? "+" : "-");
seq_printf(seq, " [%s] Supports installing DDMs\n",
(hwcap & 0x00000008) ? "+" : "-");
seq_printf(seq, " [%s] Battery-backed RAM\n",
(hwcap & 0x00000010) ? "+" : "-");
return 0;
}
/* Executive group 0003h - Executing DDM List (table) */
static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
{
struct i2o_controller *c = (struct i2o_controller *)seq->private;
int token;
int i;
typedef struct _i2o_exec_execute_ddm_table {
u16 ddm_tid;
u8 module_type;
u8 reserved;
u16 i2o_vendor_id;
u16 module_id;
u8 module_name_version[28];
u32 data_size;
u32 code_size;
} i2o_exec_execute_ddm_table;
struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
i2o_exec_execute_ddm_table ddm_table[I2O_MAX_MODULES];
} *result;
i2o_exec_execute_ddm_table ddm_table;
result = kmalloc(sizeof(*result), GFP_KERNEL);
if (!result)
return -ENOMEM;
token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0003, -1,
NULL, 0, result, sizeof(*result));
if (token < 0) {
i2o_report_query_status(seq, token,
"0x0003 Executing DDM List");
goto out;
}
seq_printf(seq,
"Tid Module_type Vendor Mod_id Module_name Vrs Data_size Code_size\n");
ddm_table = result->ddm_table[0];
for (i = 0; i < result->row_count; ddm_table = result->ddm_table[++i]) {
seq_printf(seq, "0x%03x ", ddm_table.ddm_tid & 0xFFF);
switch (ddm_table.module_type) {
case 0x01:
seq_printf(seq, "Downloaded DDM ");
break;
case 0x22:
seq_printf(seq, "Embedded DDM ");
break;
default:
seq_printf(seq, " ");
}
seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
seq_printf(seq, "%-#8x", ddm_table.module_id);
seq_printf(seq, "%-29s",
chtostr(ddm_table.module_name_version, 28));
seq_printf(seq, "%9d ", ddm_table.data_size);
seq_printf(seq, "%8d", ddm_table.code_size);
seq_printf(seq, "\n");
}
out:
kfree(result);
return 0;
}
/* Executive group 0004h - Driver Store (scalar) */
static int i2o_seq_show_driver_store(struct seq_file *seq, void *v)
{
struct i2o_controller *c = (struct i2o_controller *)seq->private;
u32 work32[8];
int token;
token =
i2o_parm_field_get(c->exec, 0x0004, -1, &work32, sizeof(work32));
if (token < 0) {
i2o_report_query_status(seq, token, "0x0004 Driver Store");
return 0;
}
seq_printf(seq, "Module limit : %d\n"
"Module count : %d\n"
"Current space : %d kB\n"
"Free space : %d kB\n",
work32[0], work32[1], work32[2] >> 10, work32[3] >> 10);
return 0;
}
/* Executive group 0005h - Driver Store Table (table) */
static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
{
typedef struct _i2o_driver_store {
u16 stored_ddm_index;
u8 module_type;
u8 reserved;
u16 i2o_vendor_id;
u16 module_id;
u8 module_name_version[28];
u8 date[8];
u32 module_size;
u32 mpb_size;
u32 module_flags;
} i2o_driver_store_table;
struct i2o_controller *c = (struct i2o_controller *)seq->private;
int token;
int i;
typedef struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
i2o_driver_store_table dst[I2O_MAX_MODULES];
} i2o_driver_result_table;
i2o_driver_result_table *result;
i2o_driver_store_table *dst;
result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
if (result == NULL)
return -ENOMEM;
token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0005, -1,
NULL, 0, result, sizeof(*result));
if (token < 0) {
i2o_report_query_status(seq, token,
"0x0005 DRIVER STORE TABLE");
kfree(result);
return 0;
}
seq_printf(seq,
"# Module_type Vendor Mod_id Module_name Vrs"
"Date Mod_size Par_size Flags\n");
for (i = 0, dst = &result->dst[0]; i < result->row_count;
dst = &result->dst[++i]) {
seq_printf(seq, "%-3d", dst->stored_ddm_index);
switch (dst->module_type) {
case 0x01:
seq_printf(seq, "Downloaded DDM ");
break;
case 0x22:
seq_printf(seq, "Embedded DDM ");
break;
default:
seq_printf(seq, " ");
}
seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
seq_printf(seq, "%-#8x", dst->module_id);
seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
seq_printf(seq, "%-9s", chtostr(dst->date, 8));
seq_printf(seq, "%8d ", dst->module_size);
seq_printf(seq, "%8d ", dst->mpb_size);
seq_printf(seq, "0x%04x", dst->module_flags);
seq_printf(seq, "\n");
}
kfree(result);
return 0;
}
/* Generic group F000h - Params Descriptor (table) */
static int i2o_seq_show_groups(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
int i;
u8 properties;
typedef struct _i2o_group_info {
u16 group_number;
u16 field_count;
u16 row_count;
u8 properties;
u8 reserved;
} i2o_group_info;
struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
i2o_group_info group[256];
} *result;
result = kmalloc(sizeof(*result), GFP_KERNEL);
if (!result)
return -ENOMEM;
token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0,
result, sizeof(*result));
if (token < 0) {
i2o_report_query_status(seq, token, "0xF000 Params Descriptor");
goto out;
}
seq_printf(seq,
"# Group FieldCount RowCount Type Add Del Clear\n");
for (i = 0; i < result->row_count; i++) {
seq_printf(seq, "%-3d", i);
seq_printf(seq, "0x%04X ", result->group[i].group_number);
seq_printf(seq, "%10d ", result->group[i].field_count);
seq_printf(seq, "%8d ", result->group[i].row_count);
properties = result->group[i].properties;
if (properties & 0x1)
seq_printf(seq, "Table ");
else
seq_printf(seq, "Scalar ");
if (properties & 0x2)
seq_printf(seq, " + ");
else
seq_printf(seq, " - ");
if (properties & 0x4)
seq_printf(seq, " + ");
else
seq_printf(seq, " - ");
if (properties & 0x8)
seq_printf(seq, " + ");
else
seq_printf(seq, " - ");
seq_printf(seq, "\n");
}
if (result->more_flag)
seq_printf(seq, "There is more...\n");
out:
kfree(result);
return 0;
}
/* Generic group F001h - Physical Device Table (table) */
static int i2o_seq_show_phys_device(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
int i;
struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
u32 adapter_id[64];
} result;
token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF001, -1, NULL, 0,
&result, sizeof(result));
if (token < 0) {
i2o_report_query_status(seq, token,
"0xF001 Physical Device Table");
return 0;
}
if (result.row_count)
seq_printf(seq, "# AdapterId\n");
for (i = 0; i < result.row_count; i++) {
seq_printf(seq, "%-2d", i);
seq_printf(seq, "%#7x\n", result.adapter_id[i]);
}
if (result.more_flag)
seq_printf(seq, "There is more...\n");
return 0;
}
/* Generic group F002h - Claimed Table (table) */
static int i2o_seq_show_claimed(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
int i;
struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
u16 claimed_tid[64];
} result;
token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF002, -1, NULL, 0,
&result, sizeof(result));
if (token < 0) {
i2o_report_query_status(seq, token, "0xF002 Claimed Table");
return 0;
}
if (result.row_count)
seq_printf(seq, "# ClaimedTid\n");
for (i = 0; i < result.row_count; i++) {
seq_printf(seq, "%-2d", i);
seq_printf(seq, "%#7x\n", result.claimed_tid[i]);
}
if (result.more_flag)
seq_printf(seq, "There is more...\n");
return 0;
}
/* Generic group F003h - User Table (table) */
static int i2o_seq_show_users(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
int i;
typedef struct _i2o_user_table {
u16 instance;
u16 user_tid;
u8 claim_type;
u8 reserved1;
u16 reserved2;
} i2o_user_table;
struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
i2o_user_table user[64];
} *result;
result = kmalloc(sizeof(*result), GFP_KERNEL);
if (!result)
return -ENOMEM;
token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF003, -1, NULL, 0,
result, sizeof(*result));
if (token < 0) {
i2o_report_query_status(seq, token, "0xF003 User Table");
goto out;
}
seq_printf(seq, "# Instance UserTid ClaimType\n");
for (i = 0; i < result->row_count; i++) {
seq_printf(seq, "%-3d", i);
seq_printf(seq, "%#8x ", result->user[i].instance);
seq_printf(seq, "%#7x ", result->user[i].user_tid);
seq_printf(seq, "%#9x\n", result->user[i].claim_type);
}
if (result->more_flag)
seq_printf(seq, "There is more...\n");
out:
kfree(result);
return 0;
}
/* Generic group F005h - Private message extensions (table) (optional) */
static int i2o_seq_show_priv_msgs(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
int i;
typedef struct _i2o_private {
u16 ext_instance;
u16 organization_id;
u16 x_function_code;
} i2o_private;
struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
i2o_private extension[64];
} result;
token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0,
&result, sizeof(result));
if (token < 0) {
i2o_report_query_status(seq, token,
"0xF005 Private Message Extensions (optional)");
return 0;
}
seq_printf(seq, "Instance# OrgId FunctionCode\n");
for (i = 0; i < result.row_count; i++) {
seq_printf(seq, "%0#9x ", result.extension[i].ext_instance);
seq_printf(seq, "%0#6x ", result.extension[i].organization_id);
seq_printf(seq, "%0#6x", result.extension[i].x_function_code);
seq_printf(seq, "\n");
}
if (result.more_flag)
seq_printf(seq, "There is more...\n");
return 0;
}
/* Generic group F006h - Authorized User Table (table) */
static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
int i;
struct {
u16 result_count;
u16 pad;
u16 block_size;
u8 block_status;
u8 error_info_size;
u16 row_count;
u16 more_flag;
u32 alternate_tid[64];
} result;
token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF006, -1, NULL, 0,
&result, sizeof(result));
if (token < 0) {
i2o_report_query_status(seq, token,
"0xF006 Autohorized User Table");
return 0;
}
if (result.row_count)
seq_printf(seq, "# AlternateTid\n");
for (i = 0; i < result.row_count; i++) {
seq_printf(seq, "%-2d", i);
seq_printf(seq, "%#7x ", result.alternate_tid[i]);
}
if (result.more_flag)
seq_printf(seq, "There is more...\n");
return 0;
}
/* Generic group F100h - Device Identity (scalar) */
static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
// == (allow) 512d bytes (max)
static u16 *work16 = (u16 *) work32;
int token;
token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
if (token < 0) {
i2o_report_query_status(seq, token, "0xF100 Device Identity");
return 0;
}
seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
seq_printf(seq, "Vendor info : %s\n",
chtostr((u8 *) (work32 + 2), 16));
seq_printf(seq, "Product info : %s\n",
chtostr((u8 *) (work32 + 6), 16));
seq_printf(seq, "Description : %s\n",
chtostr((u8 *) (work32 + 10), 16));
seq_printf(seq, "Product rev. : %s\n",
chtostr((u8 *) (work32 + 14), 8));
seq_printf(seq, "Serial number : ");
print_serial_number(seq, (u8 *) (work32 + 16),
/* allow for SNLen plus
* possible trailing '\0'
*/
sizeof(work32) - (16 * sizeof(u32)) - 2);
seq_printf(seq, "\n");
return 0;
}
static int i2o_seq_show_dev_name(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
seq_printf(seq, "%s\n", dev_name(&d->device));
return 0;
}
/* Generic group F101h - DDM Identity (scalar) */
static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
struct {
u16 ddm_tid;
u8 module_name[24];
u8 module_rev[8];
u8 sn_format;
u8 serial_number[12];
u8 pad[256]; // allow up to 256 byte (max) serial number
} result;
token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
if (token < 0) {
i2o_report_query_status(seq, token, "0xF101 DDM Identity");
return 0;
}
seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
seq_printf(seq, "Module name : %s\n",
chtostr(result.module_name, 24));
seq_printf(seq, "Module revision : %s\n",
chtostr(result.module_rev, 8));
seq_printf(seq, "Serial number : ");
print_serial_number(seq, result.serial_number, sizeof(result) - 36);
/* allow for SNLen plus possible trailing '\0' */
seq_printf(seq, "\n");
return 0;
}
/* Generic group F102h - User Information (scalar) */
static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
struct {
u8 device_name[64];
u8 service_name[64];
u8 physical_location[64];
u8 instance_number[4];
} result;
token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
if (token < 0) {
i2o_report_query_status(seq, token, "0xF102 User Information");
return 0;
}
seq_printf(seq, "Device name : %s\n",
chtostr(result.device_name, 64));
seq_printf(seq, "Service name : %s\n",
chtostr(result.service_name, 64));
seq_printf(seq, "Physical name : %s\n",
chtostr(result.physical_location, 64));
seq_printf(seq, "Instance number : %s\n",
chtostr(result.instance_number, 4));
return 0;
}
/* Generic group F103h - SGL Operating Limits (scalar) */
static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
static u32 work32[12];
static u16 *work16 = (u16 *) work32;
static u8 *work8 = (u8 *) work32;
int token;
token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
if (token < 0) {
i2o_report_query_status(seq, token,
"0xF103 SGL Operating Limits");
return 0;
}
seq_printf(seq, "SGL chain size : %d\n", work32[0]);
seq_printf(seq, "Max SGL chain size : %d\n", work32[1]);
seq_printf(seq, "SGL chain size target : %d\n", work32[2]);
seq_printf(seq, "SGL frag count : %d\n", work16[6]);
seq_printf(seq, "Max SGL frag count : %d\n", work16[7]);
seq_printf(seq, "SGL frag count target : %d\n", work16[8]);
/* FIXME
if (d->i2oversion == 0x02)
{
*/
seq_printf(seq, "SGL data alignment : %d\n", work16[8]);
seq_printf(seq, "SGL addr limit : %d\n", work8[20]);
seq_printf(seq, "SGL addr sizes supported : ");
if (work8[21] & 0x01)
seq_printf(seq, "32 bit ");
if (work8[21] & 0x02)
seq_printf(seq, "64 bit ");
if (work8[21] & 0x04)
seq_printf(seq, "96 bit ");
if (work8[21] & 0x08)
seq_printf(seq, "128 bit ");
seq_printf(seq, "\n");
/*
}
*/
return 0;
}
/* Generic group F200h - Sensors (scalar) */
static int i2o_seq_show_sensors(struct seq_file *seq, void *v)
{
struct i2o_device *d = (struct i2o_device *)seq->private;
int token;
struct {
u16 sensor_instance;
u8 component;
u16 component_instance;
u8 sensor_class;
u8 sensor_type;
u8 scaling_exponent;
u32 actual_reading;
u32 minimum_reading;
u32 low2lowcat_treshold;
u32 lowcat2low_treshold;
u32 lowwarn2low_treshold;
u32 low2lowwarn_treshold;
u32 norm2lowwarn_treshold;
u32 lowwarn2norm_treshold;
u32 nominal_reading;
u32 hiwarn2norm_treshold;
u32 norm2hiwarn_treshold;
u32 high2hiwarn_treshold;
u32 hiwarn2high_treshold;
u32 hicat2high_treshold;
u32 hi2hicat_treshold;
u32 maximum_reading;
u8 sensor_state;
u16 event_enable;
} result;
token = i2o_parm_field_get(d, 0xF200, -1, &result, sizeof(result));
if (token < 0) {
i2o_report_query_status(seq, token,
"0xF200 Sensors (optional)");
return 0;
}
seq_printf(seq, "Sensor instance : %d\n", result.sensor_instance);
seq_printf(seq, "Component : %d = ", result.component);
switch (result.component) {
case 0:
seq_printf(seq, "Other");
break;
case 1:
seq_printf(seq, "Planar logic Board");
break;
case 2:
seq_printf(seq, "CPU");
break;
case 3:
seq_printf(seq, "Chassis");
break;
case 4:
seq_printf(seq, "Power Supply");
break;
case 5:
seq_printf(seq, "Storage");
break;
case 6:
seq_printf(seq, "External");
break;
}
seq_printf(seq, "\n");
seq_printf(seq, "Component instance : %d\n",
result.component_instance);
seq_printf(seq, "Sensor class : %s\n",
result.sensor_class ? "Analog" : "Digital");
seq_printf(seq, "Sensor type : %d = ", result.sensor_type);
switch (result.sensor_type) {
case 0:
seq_printf(seq, "Other\n");
break;
case 1:
seq_printf(seq, "Thermal\n");
break;
case 2:
seq_printf(seq, "DC voltage (DC volts)\n");
break;
case 3:
seq_printf(seq, "AC voltage (AC volts)\n");
break;
case 4:
seq_printf(seq, "DC current (DC amps)\n");
break;
case 5:
seq_printf(seq, "AC current (AC volts)\n");
break;
case 6:
seq_printf(seq, "Door open\n");
break;
case 7:
seq_printf(seq, "Fan operational\n");
break;
}
seq_printf(seq, "Scaling exponent : %d\n",
result.scaling_exponent);
seq_printf(seq, "Actual reading : %d\n", result.actual_reading);
seq_printf(seq, "Minimum reading : %d\n", result.minimum_reading);
seq_printf(seq, "Low2LowCat treshold : %d\n",
result.low2lowcat_treshold);
seq_printf(seq, "LowCat2Low treshold : %d\n",
result.lowcat2low_treshold);
seq_printf(seq, "LowWarn2Low treshold : %d\n",
result.lowwarn2low_treshold);
seq_printf(seq, "Low2LowWarn treshold : %d\n",
result.low2lowwarn_treshold);
seq_printf(seq, "Norm2LowWarn treshold : %d\n",
result.norm2lowwarn_treshold);
seq_printf(seq, "LowWarn2Norm treshold : %d\n",
result.lowwarn2norm_treshold);
seq_printf(seq, "Nominal reading : %d\n", result.nominal_reading);
seq_printf(seq, "HiWarn2Norm treshold : %d\n",
result.hiwarn2norm_treshold);
seq_printf(seq, "Norm2HiWarn treshold : %d\n",
result.norm2hiwarn_treshold);
seq_printf(seq, "High2HiWarn treshold : %d\n",
result.high2hiwarn_treshold);
seq_printf(seq, "HiWarn2High treshold : %d\n",
result.hiwarn2high_treshold);
seq_printf(seq, "HiCat2High treshold : %d\n",
result.hicat2high_treshold);
seq_printf(seq, "High2HiCat treshold : %d\n",
result.hi2hicat_treshold);
seq_printf(seq, "Maximum reading : %d\n", result.maximum_reading);
seq_printf(seq, "Sensor state : %d = ", result.sensor_state);
switch (result.sensor_state) {
case 0:
seq_printf(seq, "Normal\n");
break;
case 1:
seq_printf(seq, "Abnormal\n");
break;
case 2:
seq_printf(seq, "Unknown\n");
break;
case 3:
seq_printf(seq, "Low Catastrophic (LoCat)\n");
break;
case 4:
seq_printf(seq, "Low (Low)\n");
break;
case 5:
seq_printf(seq, "Low Warning (LoWarn)\n");
break;
case 6:
seq_printf(seq, "High Warning (HiWarn)\n");
break;
case 7:
seq_printf(seq, "High (High)\n");
break;
case 8:
seq_printf(seq, "High Catastrophic (HiCat)\n");
break;
}
seq_printf(seq, "Event_enable : 0x%02X\n", result.event_enable);
seq_printf(seq, " [%s] Operational state change. \n",
(result.event_enable & 0x01) ? "+" : "-");
seq_printf(seq, " [%s] Low catastrophic. \n",
(result.event_enable & 0x02) ? "+" : "-");
seq_printf(seq, " [%s] Low reading. \n",
(result.event_enable & 0x04) ? "+" : "-");
seq_printf(seq, " [%s] Low warning. \n",
(result.event_enable & 0x08) ? "+" : "-");
seq_printf(seq,
" [%s] Change back to normal from out of range state. \n",
(result.event_enable & 0x10) ? "+" : "-");
seq_printf(seq, " [%s] High warning. \n",
(result.event_enable & 0x20) ? "+" : "-");
seq_printf(seq, " [%s] High reading. \n",
(result.event_enable & 0x40) ? "+" : "-");
seq_printf(seq, " [%s] High catastrophic. \n",
(result.event_enable & 0x80) ? "+" : "-");
return 0;
}
static int i2o_seq_open_hrt(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_hrt, PDE(inode)->data);
};
static int i2o_seq_open_lct(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_lct, PDE(inode)->data);
};
static int i2o_seq_open_status(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_status, PDE(inode)->data);
};
static int i2o_seq_open_hw(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_hw, PDE(inode)->data);
};
static int i2o_seq_open_ddm_table(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_ddm_table, PDE(inode)->data);
};
static int i2o_seq_open_driver_store(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_driver_store, PDE(inode)->data);
};
static int i2o_seq_open_drivers_stored(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_drivers_stored, PDE(inode)->data);
};
static int i2o_seq_open_groups(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_groups, PDE(inode)->data);
};
static int i2o_seq_open_phys_device(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_phys_device, PDE(inode)->data);
};
static int i2o_seq_open_claimed(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_claimed, PDE(inode)->data);
};
static int i2o_seq_open_users(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_users, PDE(inode)->data);
};
static int i2o_seq_open_priv_msgs(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_priv_msgs, PDE(inode)->data);
};
static int i2o_seq_open_authorized_users(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_authorized_users,
PDE(inode)->data);
};
static int i2o_seq_open_dev_identity(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_dev_identity, PDE(inode)->data);
};
static int i2o_seq_open_ddm_identity(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_ddm_identity, PDE(inode)->data);
};
static int i2o_seq_open_uinfo(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_uinfo, PDE(inode)->data);
};
static int i2o_seq_open_sgl_limits(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_sgl_limits, PDE(inode)->data);
};
static int i2o_seq_open_sensors(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_sensors, PDE(inode)->data);
};
static int i2o_seq_open_dev_name(struct inode *inode, struct file *file)
{
return single_open(file, i2o_seq_show_dev_name, PDE(inode)->data);
};
static const struct file_operations i2o_seq_fops_lct = {
.open = i2o_seq_open_lct,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_hrt = {
.open = i2o_seq_open_hrt,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_status = {
.open = i2o_seq_open_status,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_hw = {
.open = i2o_seq_open_hw,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_ddm_table = {
.open = i2o_seq_open_ddm_table,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_driver_store = {
.open = i2o_seq_open_driver_store,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_drivers_stored = {
.open = i2o_seq_open_drivers_stored,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_groups = {
.open = i2o_seq_open_groups,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_phys_device = {
.open = i2o_seq_open_phys_device,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_claimed = {
.open = i2o_seq_open_claimed,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_users = {
.open = i2o_seq_open_users,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_priv_msgs = {
.open = i2o_seq_open_priv_msgs,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_authorized_users = {
.open = i2o_seq_open_authorized_users,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_dev_name = {
.open = i2o_seq_open_dev_name,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_dev_identity = {
.open = i2o_seq_open_dev_identity,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_ddm_identity = {
.open = i2o_seq_open_ddm_identity,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_uinfo = {
.open = i2o_seq_open_uinfo,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_sgl_limits = {
.open = i2o_seq_open_sgl_limits,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations i2o_seq_fops_sensors = {
.open = i2o_seq_open_sensors,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* IOP specific entries...write field just in case someone
* ever wants one.
*/
static i2o_proc_entry i2o_proc_generic_iop_entries[] = {
{"hrt", S_IFREG | S_IRUGO, &i2o_seq_fops_hrt},
{"lct", S_IFREG | S_IRUGO, &i2o_seq_fops_lct},
{"status", S_IFREG | S_IRUGO, &i2o_seq_fops_status},
{"hw", S_IFREG | S_IRUGO, &i2o_seq_fops_hw},
{"ddm_table", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_table},
{"driver_store", S_IFREG | S_IRUGO, &i2o_seq_fops_driver_store},
{"drivers_stored", S_IFREG | S_IRUGO, &i2o_seq_fops_drivers_stored},
{NULL, 0, NULL}
};
/*
* Device specific entries
*/
static i2o_proc_entry generic_dev_entries[] = {
{"groups", S_IFREG | S_IRUGO, &i2o_seq_fops_groups},
{"phys_dev", S_IFREG | S_IRUGO, &i2o_seq_fops_phys_device},
{"claimed", S_IFREG | S_IRUGO, &i2o_seq_fops_claimed},
{"users", S_IFREG | S_IRUGO, &i2o_seq_fops_users},
{"priv_msgs", S_IFREG | S_IRUGO, &i2o_seq_fops_priv_msgs},
{"authorized_users", S_IFREG | S_IRUGO, &i2o_seq_fops_authorized_users},
{"dev_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_identity},
{"ddm_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_identity},
{"user_info", S_IFREG | S_IRUGO, &i2o_seq_fops_uinfo},
{"sgl_limits", S_IFREG | S_IRUGO, &i2o_seq_fops_sgl_limits},
{"sensors", S_IFREG | S_IRUGO, &i2o_seq_fops_sensors},
{NULL, 0, NULL}
};
/*
* Storage unit specific entries (SCSI Periph, BS) with device names
*/
static i2o_proc_entry rbs_dev_entries[] = {
{"dev_name", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_name},
{NULL, 0, NULL}
};
/**
* i2o_proc_create_entries - Creates proc dir entries
* @dir: proc dir entry under which the entries should be placed
* @i2o_pe: pointer to the entries which should be added
* @data: pointer to I2O controller or device
*
* Create proc dir entries for a I2O controller or I2O device.
*
* Returns 0 on success or negative error code on failure.
*/
static int i2o_proc_create_entries(struct proc_dir_entry *dir,
i2o_proc_entry * i2o_pe, void *data)
{
struct proc_dir_entry *tmp;
while (i2o_pe->name) {
tmp = proc_create_data(i2o_pe->name, i2o_pe->mode, dir,
i2o_pe->fops, data);
if (!tmp)
return -1;
i2o_pe++;
}
return 0;
}
/**
* i2o_proc_subdir_remove - Remove child entries from a proc entry
* @dir: proc dir entry from which the childs should be removed
*
* Iterate over each i2o proc entry under dir and remove it. If the child
* also has entries, remove them too.
*/
static void i2o_proc_subdir_remove(struct proc_dir_entry *dir)
{
struct proc_dir_entry *pe, *tmp;
pe = dir->subdir;
while (pe) {
tmp = pe->next;
i2o_proc_subdir_remove(pe);
remove_proc_entry(pe->name, dir);
pe = tmp;
}
};
/**
* i2o_proc_device_add - Add an I2O device to the proc dir
* @dir: proc dir entry to which the device should be added
* @dev: I2O device which should be added
*
* Add an I2O device to the proc dir entry dir and create the entries for
* the device depending on the class of the I2O device.
*/
static void i2o_proc_device_add(struct proc_dir_entry *dir,
struct i2o_device *dev)
{
char buff[10];
struct proc_dir_entry *devdir;
i2o_proc_entry *i2o_pe = NULL;
sprintf(buff, "%03x", dev->lct_data.tid);
osm_debug("adding device /proc/i2o/%s/%s\n", dev->iop->name, buff);
devdir = proc_mkdir(buff, dir);
if (!devdir) {
osm_warn("Could not allocate procdir!\n");
return;
}
devdir->data = dev;
i2o_proc_create_entries(devdir, generic_dev_entries, dev);
/* Inform core that we want updates about this device's status */
switch (dev->lct_data.class_id) {
case I2O_CLASS_SCSI_PERIPHERAL:
case I2O_CLASS_RANDOM_BLOCK_STORAGE:
i2o_pe = rbs_dev_entries;
break;
default:
break;
}
if (i2o_pe)
i2o_proc_create_entries(devdir, i2o_pe, dev);
}
/**
* i2o_proc_iop_add - Add an I2O controller to the i2o proc tree
* @dir: parent proc dir entry
* @c: I2O controller which should be added
*
* Add the entries to the parent proc dir entry. Also each device is added
* to the controllers proc dir entry.
*
* Returns 0 on success or negative error code on failure.
*/
static int i2o_proc_iop_add(struct proc_dir_entry *dir,
struct i2o_controller *c)
{
struct proc_dir_entry *iopdir;
struct i2o_device *dev;
osm_debug("adding IOP /proc/i2o/%s\n", c->name);
iopdir = proc_mkdir(c->name, dir);
if (!iopdir)
return -1;
iopdir->data = c;
i2o_proc_create_entries(iopdir, i2o_proc_generic_iop_entries, c);
list_for_each_entry(dev, &c->devices, list)
i2o_proc_device_add(iopdir, dev);
return 0;
}
/**
* i2o_proc_iop_remove - Removes an I2O controller from the i2o proc tree
* @dir: parent proc dir entry
* @c: I2O controller which should be removed
*
* Iterate over each i2o proc entry and search controller c. If it is found
* remove it from the tree.
*/
static void i2o_proc_iop_remove(struct proc_dir_entry *dir,
struct i2o_controller *c)
{
struct proc_dir_entry *pe, *tmp;
pe = dir->subdir;
while (pe) {
tmp = pe->next;
if (pe->data == c) {
i2o_proc_subdir_remove(pe);
remove_proc_entry(pe->name, dir);
}
osm_debug("removing IOP /proc/i2o/%s\n", c->name);
pe = tmp;
}
}
/**
* i2o_proc_fs_create - Create the i2o proc fs.
*
* Iterate over each I2O controller and create the entries for it.
*
* Returns 0 on success or negative error code on failure.
*/
static int __init i2o_proc_fs_create(void)
{
struct i2o_controller *c;
i2o_proc_dir_root = proc_mkdir("i2o", NULL);
if (!i2o_proc_dir_root)
return -1;
list_for_each_entry(c, &i2o_controllers, list)
i2o_proc_iop_add(i2o_proc_dir_root, c);
return 0;
};
/**
* i2o_proc_fs_destroy - Cleanup the all i2o proc entries
*
* Iterate over each I2O controller and remove the entries for it.
*
* Returns 0 on success or negative error code on failure.
*/
static int __exit i2o_proc_fs_destroy(void)
{
struct i2o_controller *c;
list_for_each_entry(c, &i2o_controllers, list)
i2o_proc_iop_remove(i2o_proc_dir_root, c);
remove_proc_entry("i2o", NULL);
return 0;
};
/**
* i2o_proc_init - Init function for procfs
*
* Registers Proc OSM and creates procfs entries.
*
* Returns 0 on success or negative error code on failure.
*/
static int __init i2o_proc_init(void)
{
int rc;
printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
rc = i2o_driver_register(&i2o_proc_driver);
if (rc)
return rc;
rc = i2o_proc_fs_create();
if (rc) {
i2o_driver_unregister(&i2o_proc_driver);
return rc;
}
return 0;
};
/**
* i2o_proc_exit - Exit function for procfs
*
* Unregisters Proc OSM and removes procfs entries.
*/
static void __exit i2o_proc_exit(void)
{
i2o_driver_unregister(&i2o_proc_driver);
i2o_proc_fs_destroy();
};
MODULE_AUTHOR("Deepak Saxena");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(OSM_DESCRIPTION);
MODULE_VERSION(OSM_VERSION);
module_init(i2o_proc_init);
module_exit(i2o_proc_exit);
| gpl-2.0 |
jiangdapeng/btrfs-next | drivers/gpu/drm/gma500/mmu.c | 7712 | 18536 | /**************************************************************************
* Copyright (c) 2007, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
**************************************************************************/
#include <drm/drmP.h>
#include "psb_drv.h"
#include "psb_reg.h"
/*
* Code for the SGX MMU:
*/
/*
* clflush on one processor only:
* clflush should apparently flush the cache line on all processors in an
* SMP system.
*/
/*
* kmap atomic:
* The usage of the slots must be completely encapsulated within a spinlock, and
* no other functions that may be using the locks for other purposed may be
* called from within the locked region.
* Since the slots are per processor, this will guarantee that we are the only
* user.
*/
/*
* TODO: Inserting ptes from an interrupt handler:
* This may be desirable for some SGX functionality where the GPU can fault in
* needed pages. For that, we need to make an atomic insert_pages function, that
* may fail.
* If it fails, the caller need to insert the page using a workqueue function,
* but on average it should be fast.
*/
struct psb_mmu_driver {
/* protects driver- and pd structures. Always take in read mode
* before taking the page table spinlock.
*/
struct rw_semaphore sem;
/* protects page tables, directory tables and pt tables.
* and pt structures.
*/
spinlock_t lock;
atomic_t needs_tlbflush;
uint8_t __iomem *register_map;
struct psb_mmu_pd *default_pd;
/*uint32_t bif_ctrl;*/
int has_clflush;
int clflush_add;
unsigned long clflush_mask;
struct drm_psb_private *dev_priv;
};
struct psb_mmu_pd;
struct psb_mmu_pt {
struct psb_mmu_pd *pd;
uint32_t index;
uint32_t count;
struct page *p;
uint32_t *v;
};
struct psb_mmu_pd {
struct psb_mmu_driver *driver;
int hw_context;
struct psb_mmu_pt **tables;
struct page *p;
struct page *dummy_pt;
struct page *dummy_page;
uint32_t pd_mask;
uint32_t invalid_pde;
uint32_t invalid_pte;
};
static inline uint32_t psb_mmu_pt_index(uint32_t offset)
{
return (offset >> PSB_PTE_SHIFT) & 0x3FF;
}
static inline uint32_t psb_mmu_pd_index(uint32_t offset)
{
return offset >> PSB_PDE_SHIFT;
}
static inline void psb_clflush(void *addr)
{
__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
}
static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
void *addr)
{
if (!driver->has_clflush)
return;
mb();
psb_clflush(addr);
mb();
}
static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
{
uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
uint32_t clflush_count = PAGE_SIZE / clflush_add;
int i;
uint8_t *clf;
clf = kmap_atomic(page);
mb();
for (i = 0; i < clflush_count; ++i) {
psb_clflush(clf);
clf += clflush_add;
}
mb();
kunmap_atomic(clf);
}
static void psb_pages_clflush(struct psb_mmu_driver *driver,
struct page *page[], unsigned long num_pages)
{
int i;
if (!driver->has_clflush)
return ;
for (i = 0; i < num_pages; i++)
psb_page_clflush(driver, *page++);
}
static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
int force)
{
atomic_set(&driver->needs_tlbflush, 0);
}
static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
{
down_write(&driver->sem);
psb_mmu_flush_pd_locked(driver, force);
up_write(&driver->sem);
}
void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot)
{
if (rc_prot)
down_write(&driver->sem);
if (rc_prot)
up_write(&driver->sem);
}
void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
{
/*ttm_tt_cache_flush(&pd->p, 1);*/
psb_pages_clflush(pd->driver, &pd->p, 1);
down_write(&pd->driver->sem);
wmb();
psb_mmu_flush_pd_locked(pd->driver, 1);
pd->hw_context = hw_context;
up_write(&pd->driver->sem);
}
static inline unsigned long psb_pd_addr_end(unsigned long addr,
unsigned long end)
{
addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
return (addr < end) ? addr : end;
}
static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
{
uint32_t mask = PSB_PTE_VALID;
if (type & PSB_MMU_CACHED_MEMORY)
mask |= PSB_PTE_CACHED;
if (type & PSB_MMU_RO_MEMORY)
mask |= PSB_PTE_RO;
if (type & PSB_MMU_WO_MEMORY)
mask |= PSB_PTE_WO;
return (pfn << PAGE_SHIFT) | mask;
}
struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
int trap_pagefaults, int invalid_type)
{
struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
uint32_t *v;
int i;
if (!pd)
return NULL;
pd->p = alloc_page(GFP_DMA32);
if (!pd->p)
goto out_err1;
pd->dummy_pt = alloc_page(GFP_DMA32);
if (!pd->dummy_pt)
goto out_err2;
pd->dummy_page = alloc_page(GFP_DMA32);
if (!pd->dummy_page)
goto out_err3;
if (!trap_pagefaults) {
pd->invalid_pde =
psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
invalid_type);
pd->invalid_pte =
psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
invalid_type);
} else {
pd->invalid_pde = 0;
pd->invalid_pte = 0;
}
v = kmap(pd->dummy_pt);
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
v[i] = pd->invalid_pte;
kunmap(pd->dummy_pt);
v = kmap(pd->p);
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
v[i] = pd->invalid_pde;
kunmap(pd->p);
clear_page(kmap(pd->dummy_page));
kunmap(pd->dummy_page);
pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
if (!pd->tables)
goto out_err4;
pd->hw_context = -1;
pd->pd_mask = PSB_PTE_VALID;
pd->driver = driver;
return pd;
out_err4:
__free_page(pd->dummy_page);
out_err3:
__free_page(pd->dummy_pt);
out_err2:
__free_page(pd->p);
out_err1:
kfree(pd);
return NULL;
}
static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
{
__free_page(pt->p);
kfree(pt);
}
void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
{
struct psb_mmu_driver *driver = pd->driver;
struct psb_mmu_pt *pt;
int i;
down_write(&driver->sem);
if (pd->hw_context != -1)
psb_mmu_flush_pd_locked(driver, 1);
/* Should take the spinlock here, but we don't need to do that
since we have the semaphore in write mode. */
for (i = 0; i < 1024; ++i) {
pt = pd->tables[i];
if (pt)
psb_mmu_free_pt(pt);
}
vfree(pd->tables);
__free_page(pd->dummy_page);
__free_page(pd->dummy_pt);
__free_page(pd->p);
kfree(pd);
up_write(&driver->sem);
}
static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
{
struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
void *v;
uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
uint32_t clflush_count = PAGE_SIZE / clflush_add;
spinlock_t *lock = &pd->driver->lock;
uint8_t *clf;
uint32_t *ptes;
int i;
if (!pt)
return NULL;
pt->p = alloc_page(GFP_DMA32);
if (!pt->p) {
kfree(pt);
return NULL;
}
spin_lock(lock);
v = kmap_atomic(pt->p);
clf = (uint8_t *) v;
ptes = (uint32_t *) v;
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
*ptes++ = pd->invalid_pte;
if (pd->driver->has_clflush && pd->hw_context != -1) {
mb();
for (i = 0; i < clflush_count; ++i) {
psb_clflush(clf);
clf += clflush_add;
}
mb();
}
kunmap_atomic(v);
spin_unlock(lock);
pt->count = 0;
pt->pd = pd;
pt->index = 0;
return pt;
}
static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
unsigned long addr)
{
uint32_t index = psb_mmu_pd_index(addr);
struct psb_mmu_pt *pt;
uint32_t *v;
spinlock_t *lock = &pd->driver->lock;
spin_lock(lock);
pt = pd->tables[index];
while (!pt) {
spin_unlock(lock);
pt = psb_mmu_alloc_pt(pd);
if (!pt)
return NULL;
spin_lock(lock);
if (pd->tables[index]) {
spin_unlock(lock);
psb_mmu_free_pt(pt);
spin_lock(lock);
pt = pd->tables[index];
continue;
}
v = kmap_atomic(pd->p);
pd->tables[index] = pt;
v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
pt->index = index;
kunmap_atomic((void *) v);
if (pd->hw_context != -1) {
psb_mmu_clflush(pd->driver, (void *) &v[index]);
atomic_set(&pd->driver->needs_tlbflush, 1);
}
}
pt->v = kmap_atomic(pt->p);
return pt;
}
static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
unsigned long addr)
{
uint32_t index = psb_mmu_pd_index(addr);
struct psb_mmu_pt *pt;
spinlock_t *lock = &pd->driver->lock;
spin_lock(lock);
pt = pd->tables[index];
if (!pt) {
spin_unlock(lock);
return NULL;
}
pt->v = kmap_atomic(pt->p);
return pt;
}
static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
{
struct psb_mmu_pd *pd = pt->pd;
uint32_t *v;
kunmap_atomic(pt->v);
if (pt->count == 0) {
v = kmap_atomic(pd->p);
v[pt->index] = pd->invalid_pde;
pd->tables[pt->index] = NULL;
if (pd->hw_context != -1) {
psb_mmu_clflush(pd->driver,
(void *) &v[pt->index]);
atomic_set(&pd->driver->needs_tlbflush, 1);
}
kunmap_atomic(pt->v);
spin_unlock(&pd->driver->lock);
psb_mmu_free_pt(pt);
return;
}
spin_unlock(&pd->driver->lock);
}
static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
unsigned long addr, uint32_t pte)
{
pt->v[psb_mmu_pt_index(addr)] = pte;
}
static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
unsigned long addr)
{
pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
}
void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
uint32_t mmu_offset, uint32_t gtt_start,
uint32_t gtt_pages)
{
uint32_t *v;
uint32_t start = psb_mmu_pd_index(mmu_offset);
struct psb_mmu_driver *driver = pd->driver;
int num_pages = gtt_pages;
down_read(&driver->sem);
spin_lock(&driver->lock);
v = kmap_atomic(pd->p);
v += start;
while (gtt_pages--) {
*v++ = gtt_start | pd->pd_mask;
gtt_start += PAGE_SIZE;
}
/*ttm_tt_cache_flush(&pd->p, num_pages);*/
psb_pages_clflush(pd->driver, &pd->p, num_pages);
kunmap_atomic(v);
spin_unlock(&driver->lock);
if (pd->hw_context != -1)
atomic_set(&pd->driver->needs_tlbflush, 1);
up_read(&pd->driver->sem);
psb_mmu_flush_pd(pd->driver, 0);
}
struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
{
struct psb_mmu_pd *pd;
/* down_read(&driver->sem); */
pd = driver->default_pd;
/* up_read(&driver->sem); */
return pd;
}
void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
{
psb_mmu_free_pagedir(driver->default_pd);
kfree(driver);
}
struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
int trap_pagefaults,
int invalid_type,
struct drm_psb_private *dev_priv)
{
struct psb_mmu_driver *driver;
driver = kmalloc(sizeof(*driver), GFP_KERNEL);
if (!driver)
return NULL;
driver->dev_priv = dev_priv;
driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
invalid_type);
if (!driver->default_pd)
goto out_err1;
spin_lock_init(&driver->lock);
init_rwsem(&driver->sem);
down_write(&driver->sem);
driver->register_map = registers;
atomic_set(&driver->needs_tlbflush, 1);
driver->has_clflush = 0;
if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
uint32_t tfms, misc, cap0, cap4, clflush_size;
/*
* clflush size is determined at kernel setup for x86_64
* but not for i386. We have to do it here.
*/
cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
clflush_size = ((misc >> 8) & 0xff) * 8;
driver->has_clflush = 1;
driver->clflush_add =
PAGE_SIZE * clflush_size / sizeof(uint32_t);
driver->clflush_mask = driver->clflush_add - 1;
driver->clflush_mask = ~driver->clflush_mask;
}
up_write(&driver->sem);
return driver;
out_err1:
kfree(driver);
return NULL;
}
static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
unsigned long address, uint32_t num_pages,
uint32_t desired_tile_stride,
uint32_t hw_tile_stride)
{
struct psb_mmu_pt *pt;
uint32_t rows = 1;
uint32_t i;
unsigned long addr;
unsigned long end;
unsigned long next;
unsigned long add;
unsigned long row_add;
unsigned long clflush_add = pd->driver->clflush_add;
unsigned long clflush_mask = pd->driver->clflush_mask;
if (!pd->driver->has_clflush) {
/*ttm_tt_cache_flush(&pd->p, num_pages);*/
psb_pages_clflush(pd->driver, &pd->p, num_pages);
return;
}
if (hw_tile_stride)
rows = num_pages / desired_tile_stride;
else
desired_tile_stride = num_pages;
add = desired_tile_stride << PAGE_SHIFT;
row_add = hw_tile_stride << PAGE_SHIFT;
mb();
for (i = 0; i < rows; ++i) {
addr = address;
end = addr + add;
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_map_lock(pd, addr);
if (!pt)
continue;
do {
psb_clflush(&pt->v
[psb_mmu_pt_index(addr)]);
} while (addr +=
clflush_add,
(addr & clflush_mask) < next);
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
address += row_add;
}
mb();
}
void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
unsigned long address, uint32_t num_pages)
{
struct psb_mmu_pt *pt;
unsigned long addr;
unsigned long end;
unsigned long next;
unsigned long f_address = address;
down_read(&pd->driver->sem);
addr = address;
end = addr + (num_pages << PAGE_SHIFT);
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_alloc_map_lock(pd, addr);
if (!pt)
goto out;
do {
psb_mmu_invalidate_pte(pt, addr);
--pt->count;
} while (addr += PAGE_SIZE, addr < next);
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
out:
if (pd->hw_context != -1)
psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
up_read(&pd->driver->sem);
if (pd->hw_context != -1)
psb_mmu_flush(pd->driver, 0);
return;
}
void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
uint32_t num_pages, uint32_t desired_tile_stride,
uint32_t hw_tile_stride)
{
struct psb_mmu_pt *pt;
uint32_t rows = 1;
uint32_t i;
unsigned long addr;
unsigned long end;
unsigned long next;
unsigned long add;
unsigned long row_add;
unsigned long f_address = address;
if (hw_tile_stride)
rows = num_pages / desired_tile_stride;
else
desired_tile_stride = num_pages;
add = desired_tile_stride << PAGE_SHIFT;
row_add = hw_tile_stride << PAGE_SHIFT;
/* down_read(&pd->driver->sem); */
/* Make sure we only need to flush this processor's cache */
for (i = 0; i < rows; ++i) {
addr = address;
end = addr + add;
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_map_lock(pd, addr);
if (!pt)
continue;
do {
psb_mmu_invalidate_pte(pt, addr);
--pt->count;
} while (addr += PAGE_SIZE, addr < next);
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
address += row_add;
}
if (pd->hw_context != -1)
psb_mmu_flush_ptes(pd, f_address, num_pages,
desired_tile_stride, hw_tile_stride);
/* up_read(&pd->driver->sem); */
if (pd->hw_context != -1)
psb_mmu_flush(pd->driver, 0);
}
int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
unsigned long address, uint32_t num_pages,
int type)
{
struct psb_mmu_pt *pt;
uint32_t pte;
unsigned long addr;
unsigned long end;
unsigned long next;
unsigned long f_address = address;
int ret = 0;
down_read(&pd->driver->sem);
addr = address;
end = addr + (num_pages << PAGE_SHIFT);
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_alloc_map_lock(pd, addr);
if (!pt) {
ret = -ENOMEM;
goto out;
}
do {
pte = psb_mmu_mask_pte(start_pfn++, type);
psb_mmu_set_pte(pt, addr, pte);
pt->count++;
} while (addr += PAGE_SIZE, addr < next);
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
out:
if (pd->hw_context != -1)
psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
up_read(&pd->driver->sem);
if (pd->hw_context != -1)
psb_mmu_flush(pd->driver, 1);
return ret;
}
int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long address, uint32_t num_pages,
uint32_t desired_tile_stride,
uint32_t hw_tile_stride, int type)
{
struct psb_mmu_pt *pt;
uint32_t rows = 1;
uint32_t i;
uint32_t pte;
unsigned long addr;
unsigned long end;
unsigned long next;
unsigned long add;
unsigned long row_add;
unsigned long f_address = address;
int ret = 0;
if (hw_tile_stride) {
if (num_pages % desired_tile_stride != 0)
return -EINVAL;
rows = num_pages / desired_tile_stride;
} else {
desired_tile_stride = num_pages;
}
add = desired_tile_stride << PAGE_SHIFT;
row_add = hw_tile_stride << PAGE_SHIFT;
down_read(&pd->driver->sem);
for (i = 0; i < rows; ++i) {
addr = address;
end = addr + add;
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_alloc_map_lock(pd, addr);
if (!pt) {
ret = -ENOMEM;
goto out;
}
do {
pte =
psb_mmu_mask_pte(page_to_pfn(*pages++),
type);
psb_mmu_set_pte(pt, addr, pte);
pt->count++;
} while (addr += PAGE_SIZE, addr < next);
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
address += row_add;
}
out:
if (pd->hw_context != -1)
psb_mmu_flush_ptes(pd, f_address, num_pages,
desired_tile_stride, hw_tile_stride);
up_read(&pd->driver->sem);
if (pd->hw_context != -1)
psb_mmu_flush(pd->driver, 1);
return ret;
}
int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
unsigned long *pfn)
{
int ret;
struct psb_mmu_pt *pt;
uint32_t tmp;
spinlock_t *lock = &pd->driver->lock;
down_read(&pd->driver->sem);
pt = psb_mmu_pt_map_lock(pd, virtual);
if (!pt) {
uint32_t *v;
spin_lock(lock);
v = kmap_atomic(pd->p);
tmp = v[psb_mmu_pd_index(virtual)];
kunmap_atomic(v);
spin_unlock(lock);
if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
!(pd->invalid_pte & PSB_PTE_VALID)) {
ret = -EINVAL;
goto out;
}
ret = 0;
*pfn = pd->invalid_pte >> PAGE_SHIFT;
goto out;
}
tmp = pt->v[psb_mmu_pt_index(virtual)];
if (!(tmp & PSB_PTE_VALID)) {
ret = -EINVAL;
} else {
ret = 0;
*pfn = tmp >> PAGE_SHIFT;
}
psb_mmu_pt_unmap_unlock(pt);
out:
up_read(&pd->driver->sem);
return ret;
}
| gpl-2.0 |
gingerboy92/android_kernel_motorola_msm8916 | lib/ioremap.c | 8480 | 2195 | /*
* Re-map IO memory to kernel address space so that we can access it.
* This is needed for high PCI addresses that aren't mapped in the
* 640k-1MB IO memory area on PC's
*
* (C) Copyright 1995 1996 Linus Torvalds
*/
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/io.h>
#include <linux/export.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pte_t *pte;
u64 pfn;
pfn = phys_addr >> PAGE_SHIFT;
pte = pte_alloc_kernel(pmd, addr);
if (!pte)
return -ENOMEM;
do {
BUG_ON(!pte_none(*pte));
set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
return 0;
}
static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pmd_t *pmd;
unsigned long next;
phys_addr -= addr;
pmd = pmd_alloc(&init_mm, pud, addr);
if (!pmd)
return -ENOMEM;
do {
next = pmd_addr_end(addr, end);
if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
return -ENOMEM;
} while (pmd++, addr = next, addr != end);
return 0;
}
static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pud_t *pud;
unsigned long next;
phys_addr -= addr;
pud = pud_alloc(&init_mm, pgd, addr);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
return -ENOMEM;
} while (pud++, addr = next, addr != end);
return 0;
}
int ioremap_page_range(unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pgd_t *pgd;
unsigned long start;
unsigned long next;
int err;
BUG_ON(addr >= end);
start = addr;
phys_addr -= addr;
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot);
if (err)
break;
} while (pgd++, addr = next, addr != end);
flush_cache_vmap(start, end);
return err;
}
EXPORT_SYMBOL_GPL(ioremap_page_range);
| gpl-2.0 |
ROM-Jeremy/android_kernel_x5 | arch/mips/txx9/rbtx4927/setup.c | 8736 | 10465 | /*
* Toshiba rbtx4927 specific setup
*
* Author: MontaVista Software, Inc.
* source@mvista.com
*
* Copyright 2001-2002 MontaVista Software Inc.
*
* Copyright (C) 1996, 97, 2001, 04 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2000 RidgeRun, Inc.
* Author: RidgeRun, Inc.
* glonnon@ridgerun.com, skranz@ridgerun.com, stevej@ridgerun.com
*
* Copyright 2001 MontaVista Software Inc.
* Author: jsun@mvista.com or jsun@junsun.net
*
* Copyright 2002 MontaVista Software Inc.
* Author: Michael Pruznick, michael_pruznick@mvista.com
*
* Copyright (C) 2000-2001 Toshiba Corporation
*
* Copyright (C) 2004 MontaVista Software Inc.
* Author: Manish Lachwani, mlachwani@mvista.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/leds.h>
#include <asm/io.h>
#include <asm/reboot.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/pci.h>
#include <asm/txx9/rbtx4927.h>
#include <asm/txx9/tx4938.h> /* for TX4937 */
#ifdef CONFIG_PCI
static void __init tx4927_pci_setup(void)
{
int extarb = !(__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCIARB);
struct pci_controller *c = &txx9_primary_pcic;
register_pci_controller(c);
if (__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCI66)
txx9_pci_option =
(txx9_pci_option & ~TXX9_PCI_OPT_CLK_MASK) |
TXX9_PCI_OPT_CLK_66; /* already configured */
/* Reset PCI Bus */
writeb(1, rbtx4927_pcireset_addr);
/* Reset PCIC */
txx9_set64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST);
if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
TXX9_PCI_OPT_CLK_66)
tx4927_pciclk66_setup();
mdelay(10);
/* clear PCIC reset */
txx9_clear64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST);
writeb(0, rbtx4927_pcireset_addr);
iob();
tx4927_report_pciclk();
tx4927_pcic_setup(tx4927_pcicptr, c, extarb);
if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
TXX9_PCI_OPT_CLK_AUTO &&
txx9_pci66_check(c, 0, 0)) {
/* Reset PCI Bus */
writeb(1, rbtx4927_pcireset_addr);
/* Reset PCIC */
txx9_set64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST);
tx4927_pciclk66_setup();
mdelay(10);
/* clear PCIC reset */
txx9_clear64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST);
writeb(0, rbtx4927_pcireset_addr);
iob();
/* Reinitialize PCIC */
tx4927_report_pciclk();
tx4927_pcic_setup(tx4927_pcicptr, c, extarb);
}
tx4927_setup_pcierr_irq();
}
static void __init tx4937_pci_setup(void)
{
int extarb = !(__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCIARB);
struct pci_controller *c = &txx9_primary_pcic;
register_pci_controller(c);
if (__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCI66)
txx9_pci_option =
(txx9_pci_option & ~TXX9_PCI_OPT_CLK_MASK) |
TXX9_PCI_OPT_CLK_66; /* already configured */
/* Reset PCI Bus */
writeb(1, rbtx4927_pcireset_addr);
/* Reset PCIC */
txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
TXX9_PCI_OPT_CLK_66)
tx4938_pciclk66_setup();
mdelay(10);
/* clear PCIC reset */
txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
writeb(0, rbtx4927_pcireset_addr);
iob();
tx4938_report_pciclk();
tx4927_pcic_setup(tx4938_pcicptr, c, extarb);
if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
TXX9_PCI_OPT_CLK_AUTO &&
txx9_pci66_check(c, 0, 0)) {
/* Reset PCI Bus */
writeb(1, rbtx4927_pcireset_addr);
/* Reset PCIC */
txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
tx4938_pciclk66_setup();
mdelay(10);
/* clear PCIC reset */
txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
writeb(0, rbtx4927_pcireset_addr);
iob();
/* Reinitialize PCIC */
tx4938_report_pciclk();
tx4927_pcic_setup(tx4938_pcicptr, c, extarb);
}
tx4938_setup_pcierr_irq();
}
static void __init rbtx4927_arch_init(void)
{
tx4927_pci_setup();
}
static void __init rbtx4937_arch_init(void)
{
tx4937_pci_setup();
}
#else
#define rbtx4927_arch_init NULL
#define rbtx4937_arch_init NULL
#endif /* CONFIG_PCI */
static void toshiba_rbtx4927_restart(char *command)
{
/* enable the s/w reset register */
writeb(1, rbtx4927_softresetlock_addr);
/* wait for enable to be seen */
while (!(readb(rbtx4927_softresetlock_addr) & 1))
;
/* do a s/w reset */
writeb(1, rbtx4927_softreset_addr);
/* fallback */
(*_machine_halt)();
}
static void __init rbtx4927_clock_init(void);
static void __init rbtx4937_clock_init(void);
static void __init rbtx4927_mem_setup(void)
{
if (TX4927_REV_PCODE() == 0x4927) {
rbtx4927_clock_init();
tx4927_setup();
} else {
rbtx4937_clock_init();
tx4938_setup();
}
_machine_restart = toshiba_rbtx4927_restart;
#ifdef CONFIG_PCI
txx9_alloc_pci_controller(&txx9_primary_pcic,
RBTX4927_PCIMEM, RBTX4927_PCIMEM_SIZE,
RBTX4927_PCIIO, RBTX4927_PCIIO_SIZE);
txx9_board_pcibios_setup = tx4927_pcibios_setup;
#else
set_io_port_base(KSEG1 + RBTX4927_ISA_IO_OFFSET);
#endif
/* TX4927-SIO DTR on (PIO[15]) */
gpio_request(15, "sio-dtr");
gpio_direction_output(15, 1);
tx4927_sio_init(0, 0);
}
static void __init rbtx4927_clock_init(void)
{
/*
* ASSUMPTION: PCIDIVMODE is configured for PCI 33MHz or 66MHz.
*
* For TX4927:
* PCIDIVMODE[12:11]'s initial value is given by S9[4:3] (ON:0, OFF:1).
* CPU 166MHz: PCI 66MHz : PCIDIVMODE: 00 (1/2.5)
* CPU 200MHz: PCI 66MHz : PCIDIVMODE: 01 (1/3)
* CPU 166MHz: PCI 33MHz : PCIDIVMODE: 10 (1/5)
* CPU 200MHz: PCI 33MHz : PCIDIVMODE: 11 (1/6)
* i.e. S9[3]: ON (83MHz), OFF (100MHz)
*/
switch ((unsigned long)__raw_readq(&tx4927_ccfgptr->ccfg) &
TX4927_CCFG_PCIDIVMODE_MASK) {
case TX4927_CCFG_PCIDIVMODE_2_5:
case TX4927_CCFG_PCIDIVMODE_5:
txx9_cpu_clock = 166666666; /* 166MHz */
break;
default:
txx9_cpu_clock = 200000000; /* 200MHz */
}
}
static void __init rbtx4937_clock_init(void)
{
/*
* ASSUMPTION: PCIDIVMODE is configured for PCI 33MHz or 66MHz.
*
* For TX4937:
* PCIDIVMODE[12:11]'s initial value is given by S1[5:4] (ON:0, OFF:1)
* PCIDIVMODE[10] is 0.
* CPU 266MHz: PCI 33MHz : PCIDIVMODE: 000 (1/8)
* CPU 266MHz: PCI 66MHz : PCIDIVMODE: 001 (1/4)
* CPU 300MHz: PCI 33MHz : PCIDIVMODE: 010 (1/9)
* CPU 300MHz: PCI 66MHz : PCIDIVMODE: 011 (1/4.5)
* CPU 333MHz: PCI 33MHz : PCIDIVMODE: 100 (1/10)
* CPU 333MHz: PCI 66MHz : PCIDIVMODE: 101 (1/5)
*/
switch ((unsigned long)__raw_readq(&tx4938_ccfgptr->ccfg) &
TX4938_CCFG_PCIDIVMODE_MASK) {
case TX4938_CCFG_PCIDIVMODE_8:
case TX4938_CCFG_PCIDIVMODE_4:
txx9_cpu_clock = 266666666; /* 266MHz */
break;
case TX4938_CCFG_PCIDIVMODE_9:
case TX4938_CCFG_PCIDIVMODE_4_5:
txx9_cpu_clock = 300000000; /* 300MHz */
break;
default:
txx9_cpu_clock = 333333333; /* 333MHz */
}
}
static void __init rbtx4927_time_init(void)
{
tx4927_time_init(0);
}
static void __init toshiba_rbtx4927_rtc_init(void)
{
struct resource res = {
.start = RBTX4927_BRAMRTC_BASE - IO_BASE,
.end = RBTX4927_BRAMRTC_BASE - IO_BASE + 0x800 - 1,
.flags = IORESOURCE_MEM,
};
platform_device_register_simple("rtc-ds1742", -1, &res, 1);
}
static void __init rbtx4927_ne_init(void)
{
struct resource res[] = {
{
.start = RBTX4927_RTL_8019_BASE,
.end = RBTX4927_RTL_8019_BASE + 0x20 - 1,
.flags = IORESOURCE_IO,
}, {
.start = RBTX4927_RTL_8019_IRQ,
.flags = IORESOURCE_IRQ,
}
};
platform_device_register_simple("ne", -1, res, ARRAY_SIZE(res));
}
static void __init rbtx4927_mtd_init(void)
{
int i;
for (i = 0; i < 2; i++)
tx4927_mtd_init(i);
}
static void __init rbtx4927_gpioled_init(void)
{
static struct gpio_led leds[] = {
{ .name = "gpioled:green:0", .gpio = 0, .active_low = 1, },
{ .name = "gpioled:green:1", .gpio = 1, .active_low = 1, },
};
static struct gpio_led_platform_data pdata = {
.num_leds = ARRAY_SIZE(leds),
.leds = leds,
};
struct platform_device *pdev = platform_device_alloc("leds-gpio", 0);
if (!pdev)
return;
pdev->dev.platform_data = &pdata;
if (platform_device_add(pdev))
platform_device_put(pdev);
}
static void __init rbtx4927_device_init(void)
{
toshiba_rbtx4927_rtc_init();
rbtx4927_ne_init();
tx4927_wdt_init();
rbtx4927_mtd_init();
if (TX4927_REV_PCODE() == 0x4927) {
tx4927_dmac_init(2);
tx4927_aclc_init(0, 1);
} else {
tx4938_dmac_init(0, 2);
tx4938_aclc_init();
}
platform_device_register_simple("txx9aclc-generic", -1, NULL, 0);
txx9_iocled_init(RBTX4927_LED_ADDR - IO_BASE, -1, 3, 1, "green", NULL);
rbtx4927_gpioled_init();
}
struct txx9_board_vec rbtx4927_vec __initdata = {
.system = "Toshiba RBTX4927",
.prom_init = rbtx4927_prom_init,
.mem_setup = rbtx4927_mem_setup,
.irq_setup = rbtx4927_irq_setup,
.time_init = rbtx4927_time_init,
.device_init = rbtx4927_device_init,
.arch_init = rbtx4927_arch_init,
#ifdef CONFIG_PCI
.pci_map_irq = rbtx4927_pci_map_irq,
#endif
};
struct txx9_board_vec rbtx4937_vec __initdata = {
.system = "Toshiba RBTX4937",
.prom_init = rbtx4927_prom_init,
.mem_setup = rbtx4927_mem_setup,
.irq_setup = rbtx4927_irq_setup,
.time_init = rbtx4927_time_init,
.device_init = rbtx4927_device_init,
.arch_init = rbtx4937_arch_init,
#ifdef CONFIG_PCI
.pci_map_irq = rbtx4927_pci_map_irq,
#endif
};
| gpl-2.0 |
Asure/Dropad-kernel-2.6.35.7 | arch/mips/txx9/rbtx4927/setup.c | 8736 | 10465 | /*
* Toshiba rbtx4927 specific setup
*
* Author: MontaVista Software, Inc.
* source@mvista.com
*
* Copyright 2001-2002 MontaVista Software Inc.
*
* Copyright (C) 1996, 97, 2001, 04 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2000 RidgeRun, Inc.
* Author: RidgeRun, Inc.
* glonnon@ridgerun.com, skranz@ridgerun.com, stevej@ridgerun.com
*
* Copyright 2001 MontaVista Software Inc.
* Author: jsun@mvista.com or jsun@junsun.net
*
* Copyright 2002 MontaVista Software Inc.
* Author: Michael Pruznick, michael_pruznick@mvista.com
*
* Copyright (C) 2000-2001 Toshiba Corporation
*
* Copyright (C) 2004 MontaVista Software Inc.
* Author: Manish Lachwani, mlachwani@mvista.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/leds.h>
#include <asm/io.h>
#include <asm/reboot.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/pci.h>
#include <asm/txx9/rbtx4927.h>
#include <asm/txx9/tx4938.h> /* for TX4937 */
#ifdef CONFIG_PCI
static void __init tx4927_pci_setup(void)
{
int extarb = !(__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCIARB);
struct pci_controller *c = &txx9_primary_pcic;
register_pci_controller(c);
if (__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCI66)
txx9_pci_option =
(txx9_pci_option & ~TXX9_PCI_OPT_CLK_MASK) |
TXX9_PCI_OPT_CLK_66; /* already configured */
/* Reset PCI Bus */
writeb(1, rbtx4927_pcireset_addr);
/* Reset PCIC */
txx9_set64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST);
if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
TXX9_PCI_OPT_CLK_66)
tx4927_pciclk66_setup();
mdelay(10);
/* clear PCIC reset */
txx9_clear64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST);
writeb(0, rbtx4927_pcireset_addr);
iob();
tx4927_report_pciclk();
tx4927_pcic_setup(tx4927_pcicptr, c, extarb);
if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
TXX9_PCI_OPT_CLK_AUTO &&
txx9_pci66_check(c, 0, 0)) {
/* Reset PCI Bus */
writeb(1, rbtx4927_pcireset_addr);
/* Reset PCIC */
txx9_set64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST);
tx4927_pciclk66_setup();
mdelay(10);
/* clear PCIC reset */
txx9_clear64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST);
writeb(0, rbtx4927_pcireset_addr);
iob();
/* Reinitialize PCIC */
tx4927_report_pciclk();
tx4927_pcic_setup(tx4927_pcicptr, c, extarb);
}
tx4927_setup_pcierr_irq();
}
static void __init tx4937_pci_setup(void)
{
int extarb = !(__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCIARB);
struct pci_controller *c = &txx9_primary_pcic;
register_pci_controller(c);
if (__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCI66)
txx9_pci_option =
(txx9_pci_option & ~TXX9_PCI_OPT_CLK_MASK) |
TXX9_PCI_OPT_CLK_66; /* already configured */
/* Reset PCI Bus */
writeb(1, rbtx4927_pcireset_addr);
/* Reset PCIC */
txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
TXX9_PCI_OPT_CLK_66)
tx4938_pciclk66_setup();
mdelay(10);
/* clear PCIC reset */
txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
writeb(0, rbtx4927_pcireset_addr);
iob();
tx4938_report_pciclk();
tx4927_pcic_setup(tx4938_pcicptr, c, extarb);
if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
TXX9_PCI_OPT_CLK_AUTO &&
txx9_pci66_check(c, 0, 0)) {
/* Reset PCI Bus */
writeb(1, rbtx4927_pcireset_addr);
/* Reset PCIC */
txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
tx4938_pciclk66_setup();
mdelay(10);
/* clear PCIC reset */
txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
writeb(0, rbtx4927_pcireset_addr);
iob();
/* Reinitialize PCIC */
tx4938_report_pciclk();
tx4927_pcic_setup(tx4938_pcicptr, c, extarb);
}
tx4938_setup_pcierr_irq();
}
static void __init rbtx4927_arch_init(void)
{
tx4927_pci_setup();
}
static void __init rbtx4937_arch_init(void)
{
tx4937_pci_setup();
}
#else
#define rbtx4927_arch_init NULL
#define rbtx4937_arch_init NULL
#endif /* CONFIG_PCI */
static void toshiba_rbtx4927_restart(char *command)
{
/* enable the s/w reset register */
writeb(1, rbtx4927_softresetlock_addr);
/* wait for enable to be seen */
while (!(readb(rbtx4927_softresetlock_addr) & 1))
;
/* do a s/w reset */
writeb(1, rbtx4927_softreset_addr);
/* fallback */
(*_machine_halt)();
}
static void __init rbtx4927_clock_init(void);
static void __init rbtx4937_clock_init(void);
static void __init rbtx4927_mem_setup(void)
{
if (TX4927_REV_PCODE() == 0x4927) {
rbtx4927_clock_init();
tx4927_setup();
} else {
rbtx4937_clock_init();
tx4938_setup();
}
_machine_restart = toshiba_rbtx4927_restart;
#ifdef CONFIG_PCI
txx9_alloc_pci_controller(&txx9_primary_pcic,
RBTX4927_PCIMEM, RBTX4927_PCIMEM_SIZE,
RBTX4927_PCIIO, RBTX4927_PCIIO_SIZE);
txx9_board_pcibios_setup = tx4927_pcibios_setup;
#else
set_io_port_base(KSEG1 + RBTX4927_ISA_IO_OFFSET);
#endif
/* TX4927-SIO DTR on (PIO[15]) */
gpio_request(15, "sio-dtr");
gpio_direction_output(15, 1);
tx4927_sio_init(0, 0);
}
static void __init rbtx4927_clock_init(void)
{
/*
* ASSUMPTION: PCIDIVMODE is configured for PCI 33MHz or 66MHz.
*
* For TX4927:
* PCIDIVMODE[12:11]'s initial value is given by S9[4:3] (ON:0, OFF:1).
* CPU 166MHz: PCI 66MHz : PCIDIVMODE: 00 (1/2.5)
* CPU 200MHz: PCI 66MHz : PCIDIVMODE: 01 (1/3)
* CPU 166MHz: PCI 33MHz : PCIDIVMODE: 10 (1/5)
* CPU 200MHz: PCI 33MHz : PCIDIVMODE: 11 (1/6)
* i.e. S9[3]: ON (83MHz), OFF (100MHz)
*/
switch ((unsigned long)__raw_readq(&tx4927_ccfgptr->ccfg) &
TX4927_CCFG_PCIDIVMODE_MASK) {
case TX4927_CCFG_PCIDIVMODE_2_5:
case TX4927_CCFG_PCIDIVMODE_5:
txx9_cpu_clock = 166666666; /* 166MHz */
break;
default:
txx9_cpu_clock = 200000000; /* 200MHz */
}
}
static void __init rbtx4937_clock_init(void)
{
/*
* ASSUMPTION: PCIDIVMODE is configured for PCI 33MHz or 66MHz.
*
* For TX4937:
* PCIDIVMODE[12:11]'s initial value is given by S1[5:4] (ON:0, OFF:1)
* PCIDIVMODE[10] is 0.
* CPU 266MHz: PCI 33MHz : PCIDIVMODE: 000 (1/8)
* CPU 266MHz: PCI 66MHz : PCIDIVMODE: 001 (1/4)
* CPU 300MHz: PCI 33MHz : PCIDIVMODE: 010 (1/9)
* CPU 300MHz: PCI 66MHz : PCIDIVMODE: 011 (1/4.5)
* CPU 333MHz: PCI 33MHz : PCIDIVMODE: 100 (1/10)
* CPU 333MHz: PCI 66MHz : PCIDIVMODE: 101 (1/5)
*/
switch ((unsigned long)__raw_readq(&tx4938_ccfgptr->ccfg) &
TX4938_CCFG_PCIDIVMODE_MASK) {
case TX4938_CCFG_PCIDIVMODE_8:
case TX4938_CCFG_PCIDIVMODE_4:
txx9_cpu_clock = 266666666; /* 266MHz */
break;
case TX4938_CCFG_PCIDIVMODE_9:
case TX4938_CCFG_PCIDIVMODE_4_5:
txx9_cpu_clock = 300000000; /* 300MHz */
break;
default:
txx9_cpu_clock = 333333333; /* 333MHz */
}
}
static void __init rbtx4927_time_init(void)
{
tx4927_time_init(0);
}
static void __init toshiba_rbtx4927_rtc_init(void)
{
struct resource res = {
.start = RBTX4927_BRAMRTC_BASE - IO_BASE,
.end = RBTX4927_BRAMRTC_BASE - IO_BASE + 0x800 - 1,
.flags = IORESOURCE_MEM,
};
platform_device_register_simple("rtc-ds1742", -1, &res, 1);
}
static void __init rbtx4927_ne_init(void)
{
struct resource res[] = {
{
.start = RBTX4927_RTL_8019_BASE,
.end = RBTX4927_RTL_8019_BASE + 0x20 - 1,
.flags = IORESOURCE_IO,
}, {
.start = RBTX4927_RTL_8019_IRQ,
.flags = IORESOURCE_IRQ,
}
};
platform_device_register_simple("ne", -1, res, ARRAY_SIZE(res));
}
static void __init rbtx4927_mtd_init(void)
{
int i;
for (i = 0; i < 2; i++)
tx4927_mtd_init(i);
}
static void __init rbtx4927_gpioled_init(void)
{
static struct gpio_led leds[] = {
{ .name = "gpioled:green:0", .gpio = 0, .active_low = 1, },
{ .name = "gpioled:green:1", .gpio = 1, .active_low = 1, },
};
static struct gpio_led_platform_data pdata = {
.num_leds = ARRAY_SIZE(leds),
.leds = leds,
};
struct platform_device *pdev = platform_device_alloc("leds-gpio", 0);
if (!pdev)
return;
pdev->dev.platform_data = &pdata;
if (platform_device_add(pdev))
platform_device_put(pdev);
}
static void __init rbtx4927_device_init(void)
{
toshiba_rbtx4927_rtc_init();
rbtx4927_ne_init();
tx4927_wdt_init();
rbtx4927_mtd_init();
if (TX4927_REV_PCODE() == 0x4927) {
tx4927_dmac_init(2);
tx4927_aclc_init(0, 1);
} else {
tx4938_dmac_init(0, 2);
tx4938_aclc_init();
}
platform_device_register_simple("txx9aclc-generic", -1, NULL, 0);
txx9_iocled_init(RBTX4927_LED_ADDR - IO_BASE, -1, 3, 1, "green", NULL);
rbtx4927_gpioled_init();
}
struct txx9_board_vec rbtx4927_vec __initdata = {
.system = "Toshiba RBTX4927",
.prom_init = rbtx4927_prom_init,
.mem_setup = rbtx4927_mem_setup,
.irq_setup = rbtx4927_irq_setup,
.time_init = rbtx4927_time_init,
.device_init = rbtx4927_device_init,
.arch_init = rbtx4927_arch_init,
#ifdef CONFIG_PCI
.pci_map_irq = rbtx4927_pci_map_irq,
#endif
};
struct txx9_board_vec rbtx4937_vec __initdata = {
.system = "Toshiba RBTX4937",
.prom_init = rbtx4927_prom_init,
.mem_setup = rbtx4927_mem_setup,
.irq_setup = rbtx4927_irq_setup,
.time_init = rbtx4927_time_init,
.device_init = rbtx4927_device_init,
.arch_init = rbtx4937_arch_init,
#ifdef CONFIG_PCI
.pci_map_irq = rbtx4927_pci_map_irq,
#endif
};
| gpl-2.0 |
forfivo/v500_kernel_aosp | tools/perf/util/pstack.c | 9248 | 1435 | /*
* Simple pointer stack
*
* (c) 2010 Arnaldo Carvalho de Melo <acme@redhat.com>
*/
#include "util.h"
#include "pstack.h"
#include <linux/kernel.h>
#include <stdlib.h>
struct pstack {
unsigned short top;
unsigned short max_nr_entries;
void *entries[0];
};
struct pstack *pstack__new(unsigned short max_nr_entries)
{
struct pstack *self = zalloc((sizeof(*self) +
max_nr_entries * sizeof(void *)));
if (self != NULL)
self->max_nr_entries = max_nr_entries;
return self;
}
void pstack__delete(struct pstack *self)
{
free(self);
}
bool pstack__empty(const struct pstack *self)
{
return self->top == 0;
}
void pstack__remove(struct pstack *self, void *key)
{
unsigned short i = self->top, last_index = self->top - 1;
while (i-- != 0) {
if (self->entries[i] == key) {
if (i < last_index)
memmove(self->entries + i,
self->entries + i + 1,
(last_index - i) * sizeof(void *));
--self->top;
return;
}
}
pr_err("%s: %p not on the pstack!\n", __func__, key);
}
void pstack__push(struct pstack *self, void *key)
{
if (self->top == self->max_nr_entries) {
pr_err("%s: top=%d, overflow!\n", __func__, self->top);
return;
}
self->entries[self->top++] = key;
}
void *pstack__pop(struct pstack *self)
{
void *ret;
if (self->top == 0) {
pr_err("%s: underflow!\n", __func__);
return NULL;
}
ret = self->entries[--self->top];
self->entries[self->top] = NULL;
return ret;
}
| gpl-2.0 |
tcp209/kernel_samsung_epic4gtouch | arch/arm/mach-s5pv310_bk/c1-regulator-consumer.c | 33 | 3659 | /* c1-regulator-consumer.c
*
* Copyright (C) 2011 Samsung Electronics
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/regulator/machine.h>
static int c1_enable_regulator_for_usb_mipi(bool enable)
{
struct regulator *mipi11_regulator;
struct regulator *mipi18_regulator;
struct regulator *hsic12_regulator;
struct regulator *usb33_regulator;
int ret = 0;
mipi11_regulator = regulator_get(NULL, "vmipi_1.1v");
if (IS_ERR(mipi11_regulator)) {
pr_err("%s: failed to get %s\n", __func__, "vmipi_1.1v");
ret = -ENODEV;
goto out4;
}
mipi18_regulator = regulator_get(NULL, "vmipi_1.8v");
if (IS_ERR(mipi18_regulator)) {
pr_err("%s: failed to get %s\n", __func__, "vmipi_1.8v");
ret = -ENODEV;
goto out3;
}
hsic12_regulator = regulator_get(NULL, "vhsic");
if (IS_ERR(hsic12_regulator)) {
pr_err("%s: failed to get %s\n", __func__, "vhsic 1.2v");
ret = -ENODEV;
goto out2;
}
usb33_regulator = regulator_get(NULL, "vusb_3.3v");
if (IS_ERR(usb33_regulator)) {
pr_err("%s: failed to get %s\n", __func__, "vusb_3.3v");
ret = -ENODEV;
goto out1;
}
if (enable) {
/* Power On Sequence
* MIPI 1.1V -> HSIC 1.2V -> MIPI 1.8V -> USB 3.3V
*/
pr_info("%s: enable LDOs\n", __func__);
if (!regulator_is_enabled(mipi11_regulator))
regulator_enable(mipi11_regulator);
if (!regulator_is_enabled(hsic12_regulator))
regulator_enable(hsic12_regulator);
if (!regulator_is_enabled(mipi18_regulator))
regulator_enable(mipi18_regulator);
if (!regulator_is_enabled(usb33_regulator))
regulator_enable(usb33_regulator);
} else {
/* Power Off Sequence
* USB 3.3V -> MIPI 18V -> HSIC 1.2V -> MIPI 1.1V
*/
pr_info("%s: disable LDOs\n", __func__);
regulator_force_disable(usb33_regulator);
regulator_force_disable(mipi18_regulator);
regulator_force_disable(hsic12_regulator);
regulator_force_disable(mipi11_regulator);
}
regulator_put(usb33_regulator);
out1:
regulator_put(hsic12_regulator);
out2:
regulator_put(mipi18_regulator);
out3:
regulator_put(mipi11_regulator);
out4:
return ret;
}
static int regulator_consumer_probe(struct platform_device *pdev)
{
pr_info("%s: loading c1-regulator-consumer\n", __func__);
return 0;
}
#ifdef CONFIG_PM
static int regulator_consumer_suspend(struct device *dev)
{
c1_enable_regulator_for_usb_mipi(false);
return 0;
}
static int regulator_consumer_resume(struct device *dev)
{
c1_enable_regulator_for_usb_mipi(true);
return 0;
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops regulator_consumer_pm_ops = {
#ifdef CONFIG_PM
.suspend = regulator_consumer_suspend,
.resume = regulator_consumer_resume,
#endif /* CONFIG_PM */
};
static struct platform_driver regulator_consumer_driver = {
.probe = regulator_consumer_probe,
.driver = {
.owner = THIS_MODULE,
.name = "c1-regulator-consumer",
.pm = ®ulator_consumer_pm_ops,
},
};
static int __init regulator_consumer_init(void)
{
return platform_driver_register(®ulator_consumer_driver);
}
module_init(regulator_consumer_init);
MODULE_AUTHOR("ms925.kim@samsung.com")
MODULE_DESCRIPTION("C1/U1 regulator consumer driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
hulkppw/openjdk | jdk/src/share/back/outStream.c | 33 | 13960 | /*
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "util.h"
#include "stream.h"
#include "outStream.h"
#include "inStream.h"
#include "transport.h"
#include "commonRef.h"
#include "bag.h"
#include "FrameID.h"
#define INITIAL_ID_ALLOC 50
#define SMALLEST(a, b) ((a) < (b)) ? (a) : (b)
static void
commonInit(PacketOutputStream *stream)
{
stream->current = &stream->initialSegment[0];
stream->left = sizeof(stream->initialSegment);
stream->segment = &stream->firstSegment;
stream->segment->length = 0;
stream->segment->data = &stream->initialSegment[0];
stream->segment->next = NULL;
stream->error = JDWP_ERROR(NONE);
stream->sent = JNI_FALSE;
stream->ids = bagCreateBag(sizeof(jlong), INITIAL_ID_ALLOC);
if (stream->ids == NULL) {
stream->error = JDWP_ERROR(OUT_OF_MEMORY);
}
}
void
outStream_initCommand(PacketOutputStream *stream, jint id,
jbyte flags, jbyte commandSet, jbyte command)
{
commonInit(stream);
/*
* Command-specific initialization
*/
stream->packet.type.cmd.id = id;
stream->packet.type.cmd.cmdSet = commandSet;
stream->packet.type.cmd.cmd = command;
stream->packet.type.cmd.flags = flags;
}
void
outStream_initReply(PacketOutputStream *stream, jint id)
{
commonInit(stream);
/*
* Reply-specific initialization
*/
stream->packet.type.reply.id = id;
stream->packet.type.reply.errorCode = 0x0;
stream->packet.type.cmd.flags = (jbyte)JDWPTRANSPORT_FLAGS_REPLY;
}
jint
outStream_id(PacketOutputStream *stream)
{
return stream->packet.type.cmd.id;
}
jbyte
outStream_command(PacketOutputStream *stream)
{
/* Only makes sense for commands */
JDI_ASSERT(!(stream->packet.type.cmd.flags & JDWPTRANSPORT_FLAGS_REPLY));
return stream->packet.type.cmd.cmd;
}
static jdwpError
writeBytes(PacketOutputStream *stream, void *source, int size)
{
jbyte *bytes = (jbyte *)source;
if (stream->error) {
return stream->error;
}
while (size > 0) {
jint count;
if (stream->left == 0) {
jint segSize = SMALLEST(2 * stream->segment->length, MAX_SEGMENT_SIZE);
jbyte *newSeg = jvmtiAllocate(segSize);
struct PacketData *newHeader = jvmtiAllocate(sizeof(*newHeader));
if ((newSeg == NULL) || (newHeader == NULL)) {
jvmtiDeallocate(newSeg);
jvmtiDeallocate(newHeader);
stream->error = JDWP_ERROR(OUT_OF_MEMORY);
return stream->error;
}
newHeader->length = 0;
newHeader->data = newSeg;
newHeader->next = NULL;
stream->segment->next = newHeader;
stream->segment = newHeader;
stream->current = newHeader->data;
stream->left = segSize;
}
count = SMALLEST(size, stream->left);
(void)memcpy(stream->current, bytes, count);
stream->current += count;
stream->left -= count;
stream->segment->length += count;
size -= count;
bytes += count;
}
return JDWP_ERROR(NONE);
}
jdwpError
outStream_writeBoolean(PacketOutputStream *stream, jboolean val)
{
jbyte byte = (val != 0) ? 1 : 0;
return writeBytes(stream, &byte, sizeof(byte));
}
jdwpError
outStream_writeByte(PacketOutputStream *stream, jbyte val)
{
return writeBytes(stream, &val, sizeof(val));
}
jdwpError
outStream_writeChar(PacketOutputStream *stream, jchar val)
{
val = HOST_TO_JAVA_CHAR(val);
return writeBytes(stream, &val, sizeof(val));
}
jdwpError
outStream_writeShort(PacketOutputStream *stream, jshort val)
{
val = HOST_TO_JAVA_SHORT(val);
return writeBytes(stream, &val, sizeof(val));
}
jdwpError
outStream_writeInt(PacketOutputStream *stream, jint val)
{
val = HOST_TO_JAVA_INT(val);
return writeBytes(stream, &val, sizeof(val));
}
jdwpError
outStream_writeLong(PacketOutputStream *stream, jlong val)
{
val = HOST_TO_JAVA_LONG(val);
return writeBytes(stream, &val, sizeof(val));
}
jdwpError
outStream_writeFloat(PacketOutputStream *stream, jfloat val)
{
val = HOST_TO_JAVA_FLOAT(val);
return writeBytes(stream, &val, sizeof(val));
}
jdwpError
outStream_writeDouble(PacketOutputStream *stream, jdouble val)
{
val = HOST_TO_JAVA_DOUBLE(val);
return writeBytes(stream, &val, sizeof(val));
}
jdwpError
outStream_writeObjectTag(JNIEnv *env, PacketOutputStream *stream, jobject val)
{
return outStream_writeByte(stream, specificTypeKey(env, val));
}
jdwpError
outStream_writeObjectRef(JNIEnv *env, PacketOutputStream *stream, jobject val)
{
jlong id;
jlong *idPtr;
if (stream->error) {
return stream->error;
}
if (val == NULL) {
id = NULL_OBJECT_ID;
} else {
/* Convert the object to an object id */
id = commonRef_refToID(env, val);
if (id == NULL_OBJECT_ID) {
stream->error = JDWP_ERROR(OUT_OF_MEMORY);
return stream->error;
}
/* Track the common ref in case we need to release it on a future error */
idPtr = bagAdd(stream->ids);
if (idPtr == NULL) {
commonRef_release(env, id);
stream->error = JDWP_ERROR(OUT_OF_MEMORY);
return stream->error;
} else {
*idPtr = id;
}
/* Add the encoded object id to the stream */
id = HOST_TO_JAVA_LONG(id);
}
return writeBytes(stream, &id, sizeof(id));
}
jdwpError
outStream_writeFrameID(PacketOutputStream *stream, FrameID val)
{
/*
* Not good - we're writing a pointer as a jint. Need
* to write as a jlong if sizeof(FrameID) == 8.
*/
if (sizeof(FrameID) == 8) {
/*LINTED*/
return outStream_writeLong(stream, (jlong)val);
} else {
/*LINTED*/
return outStream_writeInt(stream, (jint)val);
}
}
jdwpError
outStream_writeMethodID(PacketOutputStream *stream, jmethodID val)
{
/*
* Not good - we're writing a pointer as a jint. Need
* to write as a jlong if sizeof(jmethodID) == 8.
*/
if (sizeof(jmethodID) == 8) {
/*LINTED*/
return outStream_writeLong(stream, (jlong)(intptr_t)val);
} else {
/*LINTED*/
return outStream_writeInt(stream, (jint)(intptr_t)val);
}
}
jdwpError
outStream_writeFieldID(PacketOutputStream *stream, jfieldID val)
{
/*
* Not good - we're writing a pointer as a jint. Need
* to write as a jlong if sizeof(jfieldID) == 8.
*/
if (sizeof(jfieldID) == 8) {
/*LINTED*/
return outStream_writeLong(stream, (jlong)(intptr_t)val);
} else {
/*LINTED*/
return outStream_writeInt(stream, (jint)(intptr_t)val);
}
}
jdwpError
outStream_writeLocation(PacketOutputStream *stream, jlocation val)
{
return outStream_writeLong(stream, (jlong)val);
}
jdwpError
outStream_writeByteArray(PacketOutputStream*stream, jint length,
jbyte *bytes)
{
(void)outStream_writeInt(stream, length);
return writeBytes(stream, bytes, length);
}
jdwpError
outStream_writeString(PacketOutputStream *stream, char *string)
{
jdwpError error;
jint length;
/* Options utf8=y/n controls if we want Standard UTF-8 or Modified */
if ( gdata->modifiedUtf8 ) {
length = (int)strlen(string);
(void)outStream_writeInt(stream, length);
error = writeBytes(stream, (jbyte *)string, length);
} else {
jint new_length;
length = (int)strlen(string);
new_length = (gdata->npt->utf8mToUtf8sLength)
(gdata->npt->utf, (jbyte*)string, length);
if ( new_length == length ) {
(void)outStream_writeInt(stream, length);
error = writeBytes(stream, (jbyte *)string, length);
} else {
char *new_string;
new_string = jvmtiAllocate(new_length+1);
(gdata->npt->utf8mToUtf8s)
(gdata->npt->utf, (jbyte*)string, length,
(jbyte*)new_string, new_length);
(void)outStream_writeInt(stream, new_length);
error = writeBytes(stream, (jbyte *)new_string, new_length);
jvmtiDeallocate(new_string);
}
}
return error;
}
jdwpError
outStream_writeValue(JNIEnv *env, PacketOutputStream *out,
jbyte typeKey, jvalue value)
{
if (typeKey == JDWP_TAG(OBJECT)) {
(void)outStream_writeByte(out, specificTypeKey(env, value.l));
} else {
(void)outStream_writeByte(out, typeKey);
}
if (isObjectTag(typeKey)) {
(void)outStream_writeObjectRef(env, out, value.l);
} else {
switch (typeKey) {
case JDWP_TAG(BYTE):
return outStream_writeByte(out, value.b);
case JDWP_TAG(CHAR):
return outStream_writeChar(out, value.c);
case JDWP_TAG(FLOAT):
return outStream_writeFloat(out, value.f);
case JDWP_TAG(DOUBLE):
return outStream_writeDouble(out, value.d);
case JDWP_TAG(INT):
return outStream_writeInt(out, value.i);
case JDWP_TAG(LONG):
return outStream_writeLong(out, value.j);
case JDWP_TAG(SHORT):
return outStream_writeShort(out, value.s);
case JDWP_TAG(BOOLEAN):
return outStream_writeBoolean(out, value.z);
case JDWP_TAG(VOID): /* happens with function return values */
/* write nothing */
return JDWP_ERROR(NONE);
default:
EXIT_ERROR(AGENT_ERROR_INVALID_OBJECT,"Invalid type key");
break;
}
}
return JDWP_ERROR(NONE);
}
jdwpError
outStream_skipBytes(PacketOutputStream *stream, jint count)
{
int i;
for (i = 0; i < count; i++) {
(void)outStream_writeByte(stream, 0);
}
return stream->error;
}
jdwpError
outStream_error(PacketOutputStream *stream)
{
return stream->error;
}
void
outStream_setError(PacketOutputStream *stream, jdwpError error)
{
if (stream->error == JDWP_ERROR(NONE)) {
stream->error = error;
LOG_MISC(("outStream_setError error=%s(%d)", jdwpErrorText(error), error));
}
}
static jint
outStream_send(PacketOutputStream *stream) {
jint rc;
jint len = 0;
PacketData *segment;
jbyte *data, *posP;
/*
* If there's only 1 segment then we just send the
* packet.
*/
if (stream->firstSegment.next == NULL) {
stream->packet.type.cmd.len = 11 + stream->firstSegment.length;
stream->packet.type.cmd.data = stream->firstSegment.data;
rc = transport_sendPacket(&stream->packet);
return rc;
}
/*
* Multiple segments
*/
len = 0;
segment = (PacketData *)&(stream->firstSegment);
do {
len += segment->length;
segment = segment->next;
} while (segment != NULL);
data = jvmtiAllocate(len);
if (data == NULL) {
return JDWP_ERROR(OUT_OF_MEMORY);
}
posP = data;
segment = (PacketData *)&(stream->firstSegment);
while (segment != NULL) {
(void)memcpy(posP, segment->data, segment->length);
posP += segment->length;
segment = segment->next;
}
stream->packet.type.cmd.len = 11 + len;
stream->packet.type.cmd.data = data;
rc = transport_sendPacket(&stream->packet);
stream->packet.type.cmd.data = NULL;
jvmtiDeallocate(data);
return rc;
}
void
outStream_sendReply(PacketOutputStream *stream)
{
jint rc;
if (stream->error) {
/*
* Don't send any collected stream data on an error reply
*/
stream->packet.type.reply.len = 0;
stream->packet.type.reply.errorCode = (jshort)stream->error;
}
rc = outStream_send(stream);
if (rc == 0) {
stream->sent = JNI_TRUE;
}
}
void
outStream_sendCommand(PacketOutputStream *stream)
{
jint rc;
if (!stream->error) {
rc = outStream_send(stream);
if (rc == 0) {
stream->sent = JNI_TRUE;
}
}
}
static jboolean
releaseID(void *elementPtr, void *arg)
{
jlong *idPtr = elementPtr;
commonRef_release(getEnv(), *idPtr);
return JNI_TRUE;
}
void
outStream_destroy(PacketOutputStream *stream)
{
struct PacketData *next;
if (stream->error || !stream->sent) {
(void)bagEnumerateOver(stream->ids, releaseID, NULL);
}
next = stream->firstSegment.next;
while (next != NULL) {
struct PacketData *p = next;
next = p->next;
jvmtiDeallocate(p->data);
jvmtiDeallocate(p);
}
bagDestroyBag(stream->ids);
}
| gpl-2.0 |
walac/linux | fs/xfs/xfs_bmap_util.c | 33 | 47681 | // SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
* Copyright (c) 2012 Red Hat, Inc.
* All Rights Reserved.
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_trans.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
#include "xfs_bmap_btree.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_trans_space.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_iomap.h"
#include "xfs_reflink.h"
/* Kernel only BMAP related definitions and functions */
/*
* Convert the given file system block to a disk block. We have to treat it
* differently based on whether the file is a real time file or not, because the
* bmap code does.
*/
xfs_daddr_t
xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
{
if (XFS_IS_REALTIME_INODE(ip))
return XFS_FSB_TO_BB(ip->i_mount, fsb);
return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
}
/*
* Routine to zero an extent on disk allocated to the specific inode.
*
* The VFS functions take a linearised filesystem block offset, so we have to
* convert the sparse xfs fsb to the right format first.
* VFS types are real funky, too.
*/
int
xfs_zero_extent(
struct xfs_inode *ip,
xfs_fsblock_t start_fsb,
xfs_off_t count_fsb)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_buftarg *target = xfs_inode_buftarg(ip);
xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
sector_t block = XFS_BB_TO_FSBT(mp, sector);
return blkdev_issue_zeroout(target->bt_bdev,
block << (mp->m_super->s_blocksize_bits - 9),
count_fsb << (mp->m_super->s_blocksize_bits - 9),
GFP_NOFS, 0);
}
#ifdef CONFIG_XFS_RT
int
xfs_bmap_rtalloc(
struct xfs_bmalloca *ap) /* bmap alloc argument struct */
{
int error; /* error return value */
xfs_mount_t *mp; /* mount point structure */
xfs_extlen_t prod = 0; /* product factor for allocators */
xfs_extlen_t mod = 0; /* product factor for allocators */
xfs_extlen_t ralen = 0; /* realtime allocation length */
xfs_extlen_t align; /* minimum allocation alignment */
xfs_rtblock_t rtb;
mp = ap->ip->i_mount;
align = xfs_get_extsz_hint(ap->ip);
prod = align / mp->m_sb.sb_rextsize;
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
align, 1, ap->eof, 0,
ap->conv, &ap->offset, &ap->length);
if (error)
return error;
ASSERT(ap->length);
ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
/*
* If the offset & length are not perfectly aligned
* then kill prod, it will just get us in trouble.
*/
div_u64_rem(ap->offset, align, &mod);
if (mod || ap->length % align)
prod = 1;
/*
* Set ralen to be the actual requested length in rtextents.
*/
ralen = ap->length / mp->m_sb.sb_rextsize;
/*
* If the old value was close enough to MAXEXTLEN that
* we rounded up to it, cut it back so it's valid again.
* Note that if it's a really large request (bigger than
* MAXEXTLEN), we don't hear about that number, and can't
* adjust the starting point to match it.
*/
if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
/*
* Lock out modifications to both the RT bitmap and summary inodes
*/
xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
/*
* If it's an allocation to an empty file at offset 0,
* pick an extent that will space things out in the rt area.
*/
if (ap->eof && ap->offset == 0) {
xfs_rtblock_t rtx; /* realtime extent no */
error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
if (error)
return error;
ap->blkno = rtx * mp->m_sb.sb_rextsize;
} else {
ap->blkno = 0;
}
xfs_bmap_adjacent(ap);
/*
* Realtime allocation, done through xfs_rtallocate_extent.
*/
do_div(ap->blkno, mp->m_sb.sb_rextsize);
rtb = ap->blkno;
ap->length = ralen;
error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
&ralen, ap->wasdel, prod, &rtb);
if (error)
return error;
ap->blkno = rtb;
if (ap->blkno != NULLFSBLOCK) {
ap->blkno *= mp->m_sb.sb_rextsize;
ralen *= mp->m_sb.sb_rextsize;
ap->length = ralen;
ap->ip->i_d.di_nblocks += ralen;
xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
if (ap->wasdel)
ap->ip->i_delayed_blks -= ralen;
/*
* Adjust the disk quota also. This was reserved
* earlier.
*/
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
} else {
ap->length = 0;
}
return 0;
}
#endif /* CONFIG_XFS_RT */
/*
* Extent tree block counting routines.
*/
/*
* Count leaf blocks given a range of extent records. Delayed allocation
* extents are not counted towards the totals.
*/
xfs_extnum_t
xfs_bmap_count_leaves(
struct xfs_ifork *ifp,
xfs_filblks_t *count)
{
struct xfs_iext_cursor icur;
struct xfs_bmbt_irec got;
xfs_extnum_t numrecs = 0;
for_each_xfs_iext(ifp, &icur, &got) {
if (!isnullstartblock(got.br_startblock)) {
*count += got.br_blockcount;
numrecs++;
}
}
return numrecs;
}
/*
* Count fsblocks of the given fork. Delayed allocation extents are
* not counted towards the totals.
*/
int
xfs_bmap_count_blocks(
struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
xfs_extnum_t *nextents,
xfs_filblks_t *count)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_btree_cur *cur;
xfs_extlen_t btblocks = 0;
int error;
*nextents = 0;
*count = 0;
if (!ifp)
return 0;
switch (ifp->if_format) {
case XFS_DINODE_FMT_BTREE:
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(tp, ip, whichfork);
if (error)
return error;
}
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
error = xfs_btree_count_blocks(cur, &btblocks);
xfs_btree_del_cursor(cur, error);
if (error)
return error;
/*
* xfs_btree_count_blocks includes the root block contained in
* the inode fork in @btblocks, so subtract one because we're
* only interested in allocated disk blocks.
*/
*count += btblocks - 1;
/* fall through */
case XFS_DINODE_FMT_EXTENTS:
*nextents = xfs_bmap_count_leaves(ifp, count);
break;
}
return 0;
}
static int
xfs_getbmap_report_one(
struct xfs_inode *ip,
struct getbmapx *bmv,
struct kgetbmap *out,
int64_t bmv_end,
struct xfs_bmbt_irec *got)
{
struct kgetbmap *p = out + bmv->bmv_entries;
bool shared = false;
int error;
error = xfs_reflink_trim_around_shared(ip, got, &shared);
if (error)
return error;
if (isnullstartblock(got->br_startblock) ||
got->br_startblock == DELAYSTARTBLOCK) {
/*
* Delalloc extents that start beyond EOF can occur due to
* speculative EOF allocation when the delalloc extent is larger
* than the largest freespace extent at conversion time. These
* extents cannot be converted by data writeback, so can exist
* here even if we are not supposed to be finding delalloc
* extents.
*/
if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
p->bmv_oflags |= BMV_OF_DELALLOC;
p->bmv_block = -2;
} else {
p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
}
if (got->br_state == XFS_EXT_UNWRITTEN &&
(bmv->bmv_iflags & BMV_IF_PREALLOC))
p->bmv_oflags |= BMV_OF_PREALLOC;
if (shared)
p->bmv_oflags |= BMV_OF_SHARED;
p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
bmv->bmv_offset = p->bmv_offset + p->bmv_length;
bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
bmv->bmv_entries++;
return 0;
}
static void
xfs_getbmap_report_hole(
struct xfs_inode *ip,
struct getbmapx *bmv,
struct kgetbmap *out,
int64_t bmv_end,
xfs_fileoff_t bno,
xfs_fileoff_t end)
{
struct kgetbmap *p = out + bmv->bmv_entries;
if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
return;
p->bmv_block = -1;
p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
bmv->bmv_offset = p->bmv_offset + p->bmv_length;
bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
bmv->bmv_entries++;
}
static inline bool
xfs_getbmap_full(
struct getbmapx *bmv)
{
return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
}
static bool
xfs_getbmap_next_rec(
struct xfs_bmbt_irec *rec,
xfs_fileoff_t total_end)
{
xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount;
if (end == total_end)
return false;
rec->br_startoff += rec->br_blockcount;
if (!isnullstartblock(rec->br_startblock) &&
rec->br_startblock != DELAYSTARTBLOCK)
rec->br_startblock += rec->br_blockcount;
rec->br_blockcount = total_end - end;
return true;
}
/*
* Get inode's extents as described in bmv, and format for output.
* Calls formatter to fill the user's buffer until all extents
* are mapped, until the passed-in bmv->bmv_count slots have
* been filled, or until the formatter short-circuits the loop,
* if it is tracking filled-in extents on its own.
*/
int /* error code */
xfs_getbmap(
struct xfs_inode *ip,
struct getbmapx *bmv, /* user bmap structure */
struct kgetbmap *out)
{
struct xfs_mount *mp = ip->i_mount;
int iflags = bmv->bmv_iflags;
int whichfork, lock, error = 0;
int64_t bmv_end, max_len;
xfs_fileoff_t bno, first_bno;
struct xfs_ifork *ifp;
struct xfs_bmbt_irec got, rec;
xfs_filblks_t len;
struct xfs_iext_cursor icur;
if (bmv->bmv_iflags & ~BMV_IF_VALID)
return -EINVAL;
#ifndef DEBUG
/* Only allow CoW fork queries if we're debugging. */
if (iflags & BMV_IF_COWFORK)
return -EINVAL;
#endif
if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
return -EINVAL;
if (bmv->bmv_length < -1)
return -EINVAL;
bmv->bmv_entries = 0;
if (bmv->bmv_length == 0)
return 0;
if (iflags & BMV_IF_ATTRFORK)
whichfork = XFS_ATTR_FORK;
else if (iflags & BMV_IF_COWFORK)
whichfork = XFS_COW_FORK;
else
whichfork = XFS_DATA_FORK;
ifp = XFS_IFORK_PTR(ip, whichfork);
xfs_ilock(ip, XFS_IOLOCK_SHARED);
switch (whichfork) {
case XFS_ATTR_FORK:
if (!XFS_IFORK_Q(ip))
goto out_unlock_iolock;
max_len = 1LL << 32;
lock = xfs_ilock_attr_map_shared(ip);
break;
case XFS_COW_FORK:
/* No CoW fork? Just return */
if (!ifp)
goto out_unlock_iolock;
if (xfs_get_cowextsz_hint(ip))
max_len = mp->m_super->s_maxbytes;
else
max_len = XFS_ISIZE(ip);
lock = XFS_ILOCK_SHARED;
xfs_ilock(ip, lock);
break;
case XFS_DATA_FORK:
if (!(iflags & BMV_IF_DELALLOC) &&
(ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
if (error)
goto out_unlock_iolock;
/*
* Even after flushing the inode, there can still be
* delalloc blocks on the inode beyond EOF due to
* speculative preallocation. These are not removed
* until the release function is called or the inode
* is inactivated. Hence we cannot assert here that
* ip->i_delayed_blks == 0.
*/
}
if (xfs_get_extsz_hint(ip) ||
(ip->i_d.di_flags &
(XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
max_len = mp->m_super->s_maxbytes;
else
max_len = XFS_ISIZE(ip);
lock = xfs_ilock_data_map_shared(ip);
break;
}
switch (ifp->if_format) {
case XFS_DINODE_FMT_EXTENTS:
case XFS_DINODE_FMT_BTREE:
break;
case XFS_DINODE_FMT_LOCAL:
/* Local format inode forks report no extents. */
goto out_unlock_ilock;
default:
error = -EINVAL;
goto out_unlock_ilock;
}
if (bmv->bmv_length == -1) {
max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
}
bmv_end = bmv->bmv_offset + bmv->bmv_length;
first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(NULL, ip, whichfork);
if (error)
goto out_unlock_ilock;
}
if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
/*
* Report a whole-file hole if the delalloc flag is set to
* stay compatible with the old implementation.
*/
if (iflags & BMV_IF_DELALLOC)
xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
goto out_unlock_ilock;
}
while (!xfs_getbmap_full(bmv)) {
xfs_trim_extent(&got, first_bno, len);
/*
* Report an entry for a hole if this extent doesn't directly
* follow the previous one.
*/
if (got.br_startoff > bno) {
xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
got.br_startoff);
if (xfs_getbmap_full(bmv))
break;
}
/*
* In order to report shared extents accurately, we report each
* distinct shared / unshared part of a single bmbt record with
* an individual getbmapx record.
*/
bno = got.br_startoff + got.br_blockcount;
rec = got;
do {
error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
&rec);
if (error || xfs_getbmap_full(bmv))
goto out_unlock_ilock;
} while (xfs_getbmap_next_rec(&rec, bno));
if (!xfs_iext_next_extent(ifp, &icur, &got)) {
xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
if (whichfork != XFS_ATTR_FORK && bno < end &&
!xfs_getbmap_full(bmv)) {
xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
bno, end);
}
break;
}
if (bno >= first_bno + len)
break;
}
out_unlock_ilock:
xfs_iunlock(ip, lock);
out_unlock_iolock:
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return error;
}
/*
* Dead simple method of punching delalyed allocation blocks from a range in
* the inode. This will always punch out both the start and end blocks, even
* if the ranges only partially overlap them, so it is up to the caller to
* ensure that partial blocks are not passed in.
*/
int
xfs_bmap_punch_delalloc_range(
struct xfs_inode *ip,
xfs_fileoff_t start_fsb,
xfs_fileoff_t length)
{
struct xfs_ifork *ifp = &ip->i_df;
xfs_fileoff_t end_fsb = start_fsb + length;
struct xfs_bmbt_irec got, del;
struct xfs_iext_cursor icur;
int error = 0;
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
goto out_unlock;
while (got.br_startoff + got.br_blockcount > start_fsb) {
del = got;
xfs_trim_extent(&del, start_fsb, length);
/*
* A delete can push the cursor forward. Step back to the
* previous extent on non-delalloc or extents outside the
* target range.
*/
if (!del.br_blockcount ||
!isnullstartblock(del.br_startblock)) {
if (!xfs_iext_prev_extent(ifp, &icur, &got))
break;
continue;
}
error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
&got, &del);
if (error || !xfs_iext_get_extent(ifp, &icur, &got))
break;
}
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
/*
* Test whether it is appropriate to check an inode for and free post EOF
* blocks. The 'force' parameter determines whether we should also consider
* regular files that are marked preallocated or append-only.
*/
bool
xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
{
/* prealloc/delalloc exists only on regular files */
if (!S_ISREG(VFS_I(ip)->i_mode))
return false;
/*
* Zero sized files with no cached pages and delalloc blocks will not
* have speculative prealloc/delalloc blocks to remove.
*/
if (VFS_I(ip)->i_size == 0 &&
VFS_I(ip)->i_mapping->nrpages == 0 &&
ip->i_delayed_blks == 0)
return false;
/* If we haven't read in the extent list, then don't do it now. */
if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
return false;
/*
* Do not free real preallocated or append-only files unless the file
* has delalloc blocks and we are forced to remove them.
*/
if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
if (!force || ip->i_delayed_blks == 0)
return false;
return true;
}
/*
* This is called to free any blocks beyond eof. The caller must hold
* IOLOCK_EXCL unless we are in the inode reclaim path and have the only
* reference to the inode.
*/
int
xfs_free_eofblocks(
struct xfs_inode *ip)
{
struct xfs_trans *tp;
int error;
xfs_fileoff_t end_fsb;
xfs_fileoff_t last_fsb;
xfs_filblks_t map_len;
int nimaps;
struct xfs_bmbt_irec imap;
struct xfs_mount *mp = ip->i_mount;
/*
* Figure out if there are any blocks beyond the end
* of the file. If not, then there is nothing to do.
*/
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
if (last_fsb <= end_fsb)
return 0;
map_len = last_fsb - end_fsb;
nimaps = 1;
xfs_ilock(ip, XFS_ILOCK_SHARED);
error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
/*
* If there are blocks after the end of file, truncate the file to its
* current size to free them up.
*/
if (!error && (nimaps != 0) &&
(imap.br_startblock != HOLESTARTBLOCK ||
ip->i_delayed_blks)) {
/*
* Attach the dquots to the inode up front.
*/
error = xfs_qm_dqattach(ip);
if (error)
return error;
/* wait on dio to ensure i_size has settled */
inode_dio_wait(VFS_I(ip));
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
&tp);
if (error) {
ASSERT(XFS_FORCED_SHUTDOWN(mp));
return error;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
/*
* Do not update the on-disk file size. If we update the
* on-disk file size and then the system crashes before the
* contents of the file are flushed to disk then the files
* may be full of holes (ie NULL files bug).
*/
error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
if (error) {
/*
* If we get an error at this point we simply don't
* bother truncating the file.
*/
xfs_trans_cancel(tp);
} else {
error = xfs_trans_commit(tp);
if (!error)
xfs_inode_clear_eofblocks_tag(ip);
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
return error;
}
int
xfs_alloc_file_space(
struct xfs_inode *ip,
xfs_off_t offset,
xfs_off_t len,
int alloc_type)
{
xfs_mount_t *mp = ip->i_mount;
xfs_off_t count;
xfs_filblks_t allocated_fsb;
xfs_filblks_t allocatesize_fsb;
xfs_extlen_t extsz, temp;
xfs_fileoff_t startoffset_fsb;
xfs_fileoff_t endoffset_fsb;
int nimaps;
int quota_flag;
int rt;
xfs_trans_t *tp;
xfs_bmbt_irec_t imaps[1], *imapp;
uint qblocks, resblks, resrtextents;
int error;
trace_xfs_alloc_file_space(ip);
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
error = xfs_qm_dqattach(ip);
if (error)
return error;
if (len <= 0)
return -EINVAL;
rt = XFS_IS_REALTIME_INODE(ip);
extsz = xfs_get_extsz_hint(ip);
count = len;
imapp = &imaps[0];
nimaps = 1;
startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
allocatesize_fsb = endoffset_fsb - startoffset_fsb;
/*
* Allocate file space until done or until there is an error
*/
while (allocatesize_fsb && !error) {
xfs_fileoff_t s, e;
/*
* Determine space reservations for data/realtime.
*/
if (unlikely(extsz)) {
s = startoffset_fsb;
do_div(s, extsz);
s *= extsz;
e = startoffset_fsb + allocatesize_fsb;
div_u64_rem(startoffset_fsb, extsz, &temp);
if (temp)
e += temp;
div_u64_rem(e, extsz, &temp);
if (temp)
e += extsz - temp;
} else {
s = 0;
e = allocatesize_fsb;
}
/*
* The transaction reservation is limited to a 32-bit block
* count, hence we need to limit the number of blocks we are
* trying to reserve to avoid an overflow. We can't allocate
* more than @nimaps extents, and an extent is limited on disk
* to MAXEXTLEN (21 bits), so use that to enforce the limit.
*/
resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
if (unlikely(rt)) {
resrtextents = qblocks = resblks;
resrtextents /= mp->m_sb.sb_rextsize;
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
quota_flag = XFS_QMOPT_RES_RTBLKS;
} else {
resrtextents = 0;
resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
quota_flag = XFS_QMOPT_RES_REGBLKS;
}
/*
* Allocate and setup the transaction.
*/
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
resrtextents, 0, &tp);
/*
* Check for running out of space
*/
if (error) {
/*
* Free the transaction structure.
*/
ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
break;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
0, quota_flag);
if (error)
goto error1;
xfs_trans_ijoin(tp, ip, 0);
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
allocatesize_fsb, alloc_type, 0, imapp,
&nimaps);
if (error)
goto error0;
/*
* Complete the transaction
*/
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error)
break;
allocated_fsb = imapp->br_blockcount;
if (nimaps == 0) {
error = -ENOSPC;
break;
}
startoffset_fsb += allocated_fsb;
allocatesize_fsb -= allocated_fsb;
}
return error;
error0: /* unlock inode, unreserve quota blocks, cancel trans */
xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
error1: /* Just cancel transaction */
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
static int
xfs_unmap_extent(
struct xfs_inode *ip,
xfs_fileoff_t startoffset_fsb,
xfs_filblks_t len_fsb,
int *done)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
int error;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
if (error) {
ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
return error;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
if (error)
goto out_trans_cancel;
xfs_trans_ijoin(tp, ip, 0);
error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
if (error)
goto out_trans_cancel;
error = xfs_trans_commit(tp);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
out_trans_cancel:
xfs_trans_cancel(tp);
goto out_unlock;
}
/* Caller must first wait for the completion of any pending DIOs if required. */
int
xfs_flush_unmap_range(
struct xfs_inode *ip,
xfs_off_t offset,
xfs_off_t len)
{
struct xfs_mount *mp = ip->i_mount;
struct inode *inode = VFS_I(ip);
xfs_off_t rounding, start, end;
int error;
rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
start = round_down(offset, rounding);
end = round_up(offset + len, rounding) - 1;
error = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (error)
return error;
truncate_pagecache_range(inode, start, end);
return 0;
}
int
xfs_free_file_space(
struct xfs_inode *ip,
xfs_off_t offset,
xfs_off_t len)
{
struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t startoffset_fsb;
xfs_fileoff_t endoffset_fsb;
int done = 0, error;
trace_xfs_free_file_space(ip);
error = xfs_qm_dqattach(ip);
if (error)
return error;
if (len <= 0) /* if nothing being freed */
return 0;
startoffset_fsb = XFS_B_TO_FSB(mp, offset);
endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
/* We can only free complete realtime extents. */
if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) {
startoffset_fsb = roundup_64(startoffset_fsb,
mp->m_sb.sb_rextsize);
endoffset_fsb = rounddown_64(endoffset_fsb,
mp->m_sb.sb_rextsize);
}
/*
* Need to zero the stuff we're not freeing, on disk.
*/
if (endoffset_fsb > startoffset_fsb) {
while (!done) {
error = xfs_unmap_extent(ip, startoffset_fsb,
endoffset_fsb - startoffset_fsb, &done);
if (error)
return error;
}
}
/*
* Now that we've unmap all full blocks we'll have to zero out any
* partial block at the beginning and/or end. iomap_zero_range is smart
* enough to skip any holes, including those we just created, but we
* must take care not to zero beyond EOF and enlarge i_size.
*/
if (offset >= XFS_ISIZE(ip))
return 0;
if (offset + len > XFS_ISIZE(ip))
len = XFS_ISIZE(ip) - offset;
error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
&xfs_buffered_write_iomap_ops);
if (error)
return error;
/*
* If we zeroed right up to EOF and EOF straddles a page boundary we
* must make sure that the post-EOF area is also zeroed because the
* page could be mmap'd and iomap_zero_range doesn't do that for us.
* Writeback of the eof page will do this, albeit clumsily.
*/
if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
round_down(offset + len, PAGE_SIZE), LLONG_MAX);
}
return error;
}
static int
xfs_prepare_shift(
struct xfs_inode *ip,
loff_t offset)
{
struct xfs_mount *mp = ip->i_mount;
int error;
/*
* Trim eofblocks to avoid shifting uninitialized post-eof preallocation
* into the accessible region of the file.
*/
if (xfs_can_free_eofblocks(ip, true)) {
error = xfs_free_eofblocks(ip);
if (error)
return error;
}
/*
* Shift operations must stabilize the start block offset boundary along
* with the full range of the operation. If we don't, a COW writeback
* completion could race with an insert, front merge with the start
* extent (after split) during the shift and corrupt the file. Start
* with the block just prior to the start to stabilize the boundary.
*/
offset = round_down(offset, 1 << mp->m_sb.sb_blocklog);
if (offset)
offset -= (1 << mp->m_sb.sb_blocklog);
/*
* Writeback and invalidate cache for the remainder of the file as we're
* about to shift down every extent from offset to EOF.
*/
error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
if (error)
return error;
/*
* Clean out anything hanging around in the cow fork now that
* we've flushed all the dirty data out to disk to avoid having
* CoW extents at the wrong offsets.
*/
if (xfs_inode_has_cow_data(ip)) {
error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
true);
if (error)
return error;
}
return 0;
}
/*
* xfs_collapse_file_space()
* This routine frees disk space and shift extent for the given file.
* The first thing we do is to free data blocks in the specified range
* by calling xfs_free_file_space(). It would also sync dirty data
* and invalidate page cache over the region on which collapse range
* is working. And Shift extent records to the left to cover a hole.
* RETURNS:
* 0 on success
* errno on error
*
*/
int
xfs_collapse_file_space(
struct xfs_inode *ip,
xfs_off_t offset,
xfs_off_t len)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error;
xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
bool done = false;
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
trace_xfs_collapse_file_space(ip);
error = xfs_free_file_space(ip, offset, len);
if (error)
return error;
error = xfs_prepare_shift(ip, offset);
if (error)
return error;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
if (error)
return error;
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
while (!done) {
error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
&done);
if (error)
goto out_trans_cancel;
if (done)
break;
/* finish any deferred frees and roll the transaction */
error = xfs_defer_finish(&tp);
if (error)
goto out_trans_cancel;
}
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
out_trans_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
/*
* xfs_insert_file_space()
* This routine create hole space by shifting extents for the given file.
* The first thing we do is to sync dirty data and invalidate page cache
* over the region on which insert range is working. And split an extent
* to two extents at given offset by calling xfs_bmap_split_extent.
* And shift all extent records which are laying between [offset,
* last allocated extent] to the right to reserve hole range.
* RETURNS:
* 0 on success
* errno on error
*/
int
xfs_insert_file_space(
struct xfs_inode *ip,
loff_t offset,
loff_t len)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error;
xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
xfs_fileoff_t next_fsb = NULLFSBLOCK;
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
bool done = false;
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
trace_xfs_insert_file_space(ip);
error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
if (error)
return error;
error = xfs_prepare_shift(ip, offset);
if (error)
return error;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
if (error)
return error;
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
/*
* The extent shifting code works on extent granularity. So, if stop_fsb
* is not the starting block of extent, we need to split the extent at
* stop_fsb.
*/
error = xfs_bmap_split_extent(tp, ip, stop_fsb);
if (error)
goto out_trans_cancel;
do {
error = xfs_defer_finish(&tp);
if (error)
goto out_trans_cancel;
error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
&done, stop_fsb);
if (error)
goto out_trans_cancel;
} while (!done);
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
out_trans_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
/*
* We need to check that the format of the data fork in the temporary inode is
* valid for the target inode before doing the swap. This is not a problem with
* attr1 because of the fixed fork offset, but attr2 has a dynamically sized
* data fork depending on the space the attribute fork is taking so we can get
* invalid formats on the target inode.
*
* E.g. target has space for 7 extents in extent format, temp inode only has
* space for 6. If we defragment down to 7 extents, then the tmp format is a
* btree, but when swapped it needs to be in extent format. Hence we can't just
* blindly swap data forks on attr2 filesystems.
*
* Note that we check the swap in both directions so that we don't end up with
* a corrupt temporary inode, either.
*
* Note that fixing the way xfs_fsr sets up the attribute fork in the source
* inode will prevent this situation from occurring, so all we do here is
* reject and log the attempt. basically we are putting the responsibility on
* userspace to get this right.
*/
static int
xfs_swap_extents_check_format(
struct xfs_inode *ip, /* target inode */
struct xfs_inode *tip) /* tmp inode */
{
struct xfs_ifork *ifp = &ip->i_df;
struct xfs_ifork *tifp = &tip->i_df;
/* User/group/project quota ids must match if quotas are enforced. */
if (XFS_IS_QUOTA_ON(ip->i_mount) &&
(!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
!gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
ip->i_d.di_projid != tip->i_d.di_projid))
return -EINVAL;
/* Should never get a local format */
if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
tifp->if_format == XFS_DINODE_FMT_LOCAL)
return -EINVAL;
/*
* if the target inode has less extents that then temporary inode then
* why did userspace call us?
*/
if (ifp->if_nextents < tifp->if_nextents)
return -EINVAL;
/*
* If we have to use the (expensive) rmap swap method, we can
* handle any number of extents and any format.
*/
if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
return 0;
/*
* if the target inode is in extent form and the temp inode is in btree
* form then we will end up with the target inode in the wrong format
* as we already know there are less extents in the temp inode.
*/
if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
tifp->if_format == XFS_DINODE_FMT_BTREE)
return -EINVAL;
/* Check temp in extent form to max in target */
if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
return -EINVAL;
/* Check target in extent form to max in temp */
if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
return -EINVAL;
/*
* If we are in a btree format, check that the temp root block will fit
* in the target and that it has enough extents to be in btree format
* in the target.
*
* Note that we have to be careful to allow btree->extent conversions
* (a common defrag case) which will occur when the temp inode is in
* extent format...
*/
if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
if (XFS_IFORK_Q(ip) &&
XFS_BMAP_BMDR_SPACE(tifp->if_broot) > XFS_IFORK_BOFF(ip))
return -EINVAL;
if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
return -EINVAL;
}
/* Reciprocal target->temp btree format checks */
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
if (XFS_IFORK_Q(tip) &&
XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
return -EINVAL;
if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
return -EINVAL;
}
return 0;
}
static int
xfs_swap_extent_flush(
struct xfs_inode *ip)
{
int error;
error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
if (error)
return error;
truncate_pagecache_range(VFS_I(ip), 0, -1);
/* Verify O_DIRECT for ftmp */
if (VFS_I(ip)->i_mapping->nrpages)
return -EINVAL;
return 0;
}
/*
* Move extents from one file to another, when rmap is enabled.
*/
STATIC int
xfs_swap_extent_rmap(
struct xfs_trans **tpp,
struct xfs_inode *ip,
struct xfs_inode *tip)
{
struct xfs_trans *tp = *tpp;
struct xfs_bmbt_irec irec;
struct xfs_bmbt_irec uirec;
struct xfs_bmbt_irec tirec;
xfs_fileoff_t offset_fsb;
xfs_fileoff_t end_fsb;
xfs_filblks_t count_fsb;
int error;
xfs_filblks_t ilen;
xfs_filblks_t rlen;
int nimaps;
uint64_t tip_flags2;
/*
* If the source file has shared blocks, we must flag the donor
* file as having shared blocks so that we get the shared-block
* rmap functions when we go to fix up the rmaps. The flags
* will be switch for reals later.
*/
tip_flags2 = tip->i_d.di_flags2;
if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
offset_fsb = 0;
end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
while (count_fsb) {
/* Read extent from the donor file */
nimaps = 1;
error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
&nimaps, 0);
if (error)
goto out;
ASSERT(nimaps == 1);
ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
trace_xfs_swap_extent_rmap_remap(tip, &tirec);
ilen = tirec.br_blockcount;
/* Unmap the old blocks in the source file. */
while (tirec.br_blockcount) {
ASSERT(tp->t_firstblock == NULLFSBLOCK);
trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
/* Read extent from the source file */
nimaps = 1;
error = xfs_bmapi_read(ip, tirec.br_startoff,
tirec.br_blockcount, &irec,
&nimaps, 0);
if (error)
goto out;
ASSERT(nimaps == 1);
ASSERT(tirec.br_startoff == irec.br_startoff);
trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
/* Trim the extent. */
uirec = tirec;
uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
tirec.br_blockcount,
irec.br_blockcount);
trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
/* Remove the mapping from the donor file. */
xfs_bmap_unmap_extent(tp, tip, &uirec);
/* Remove the mapping from the source file. */
xfs_bmap_unmap_extent(tp, ip, &irec);
/* Map the donor file's blocks into the source file. */
xfs_bmap_map_extent(tp, ip, &uirec);
/* Map the source file's blocks into the donor file. */
xfs_bmap_map_extent(tp, tip, &irec);
error = xfs_defer_finish(tpp);
tp = *tpp;
if (error)
goto out;
tirec.br_startoff += rlen;
if (tirec.br_startblock != HOLESTARTBLOCK &&
tirec.br_startblock != DELAYSTARTBLOCK)
tirec.br_startblock += rlen;
tirec.br_blockcount -= rlen;
}
/* Roll on... */
count_fsb -= ilen;
offset_fsb += ilen;
}
tip->i_d.di_flags2 = tip_flags2;
return 0;
out:
trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
tip->i_d.di_flags2 = tip_flags2;
return error;
}
/* Swap the extents of two files by swapping data forks. */
STATIC int
xfs_swap_extent_forks(
struct xfs_trans *tp,
struct xfs_inode *ip,
struct xfs_inode *tip,
int *src_log_flags,
int *target_log_flags)
{
xfs_filblks_t aforkblks = 0;
xfs_filblks_t taforkblks = 0;
xfs_extnum_t junk;
uint64_t tmp;
int error;
/*
* Count the number of extended attribute blocks
*/
if (XFS_IFORK_Q(ip) && ip->i_afp->if_nextents > 0 &&
ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
&aforkblks);
if (error)
return error;
}
if (XFS_IFORK_Q(tip) && tip->i_afp->if_nextents > 0 &&
tip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
&taforkblks);
if (error)
return error;
}
/*
* Btree format (v3) inodes have the inode number stamped in the bmbt
* block headers. We can't start changing the bmbt blocks until the
* inode owner change is logged so recovery does the right thing in the
* event of a crash. Set the owner change log flags now and leave the
* bmbt scan as the last step.
*/
if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
(*target_log_flags) |= XFS_ILOG_DOWNER;
if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
(*src_log_flags) |= XFS_ILOG_DOWNER;
}
/*
* Swap the data forks of the inodes
*/
swap(ip->i_df, tip->i_df);
/*
* Fix the on-disk inode values
*/
tmp = (uint64_t)ip->i_d.di_nblocks;
ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
/*
* The extents in the source inode could still contain speculative
* preallocation beyond EOF (e.g. the file is open but not modified
* while defrag is in progress). In that case, we need to copy over the
* number of delalloc blocks the data fork in the source inode is
* tracking beyond EOF so that when the fork is truncated away when the
* temporary inode is unlinked we don't underrun the i_delayed_blks
* counter on that inode.
*/
ASSERT(tip->i_delayed_blks == 0);
tip->i_delayed_blks = ip->i_delayed_blks;
ip->i_delayed_blks = 0;
switch (ip->i_df.if_format) {
case XFS_DINODE_FMT_EXTENTS:
(*src_log_flags) |= XFS_ILOG_DEXT;
break;
case XFS_DINODE_FMT_BTREE:
ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
(*src_log_flags & XFS_ILOG_DOWNER));
(*src_log_flags) |= XFS_ILOG_DBROOT;
break;
}
switch (tip->i_df.if_format) {
case XFS_DINODE_FMT_EXTENTS:
(*target_log_flags) |= XFS_ILOG_DEXT;
break;
case XFS_DINODE_FMT_BTREE:
(*target_log_flags) |= XFS_ILOG_DBROOT;
ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
(*target_log_flags & XFS_ILOG_DOWNER));
break;
}
return 0;
}
/*
* Fix up the owners of the bmbt blocks to refer to the current inode. The
* change owner scan attempts to order all modified buffers in the current
* transaction. In the event of ordered buffer failure, the offending buffer is
* physically logged as a fallback and the scan returns -EAGAIN. We must roll
* the transaction in this case to replenish the fallback log reservation and
* restart the scan. This process repeats until the scan completes.
*/
static int
xfs_swap_change_owner(
struct xfs_trans **tpp,
struct xfs_inode *ip,
struct xfs_inode *tmpip)
{
int error;
struct xfs_trans *tp = *tpp;
do {
error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
NULL);
/* success or fatal error */
if (error != -EAGAIN)
break;
error = xfs_trans_roll(tpp);
if (error)
break;
tp = *tpp;
/*
* Redirty both inodes so they can relog and keep the log tail
* moving forward.
*/
xfs_trans_ijoin(tp, ip, 0);
xfs_trans_ijoin(tp, tmpip, 0);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
} while (true);
return error;
}
int
xfs_swap_extents(
struct xfs_inode *ip, /* target inode */
struct xfs_inode *tip, /* tmp inode */
struct xfs_swapext *sxp)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
struct xfs_bstat *sbp = &sxp->sx_stat;
int src_log_flags, target_log_flags;
int error = 0;
int lock_flags;
uint64_t f;
int resblks = 0;
unsigned int flags = 0;
/*
* Lock the inodes against other IO, page faults and truncate to
* begin with. Then we can ensure the inodes are flushed and have no
* page cache safely. Once we have done this we can take the ilocks and
* do the rest of the checks.
*/
lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
lock_flags = XFS_MMAPLOCK_EXCL;
xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
/* Verify that both files have the same format */
if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
error = -EINVAL;
goto out_unlock;
}
/* Verify both files are either real-time or non-realtime */
if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
error = -EINVAL;
goto out_unlock;
}
error = xfs_qm_dqattach(ip);
if (error)
goto out_unlock;
error = xfs_qm_dqattach(tip);
if (error)
goto out_unlock;
error = xfs_swap_extent_flush(ip);
if (error)
goto out_unlock;
error = xfs_swap_extent_flush(tip);
if (error)
goto out_unlock;
if (xfs_inode_has_cow_data(tip)) {
error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
if (error)
goto out_unlock;
}
/*
* Extent "swapping" with rmap requires a permanent reservation and
* a block reservation because it's really just a remap operation
* performed with log redo items!
*/
if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
int w = XFS_DATA_FORK;
uint32_t ipnext = ip->i_df.if_nextents;
uint32_t tipnext = tip->i_df.if_nextents;
/*
* Conceptually this shouldn't affect the shape of either bmbt,
* but since we atomically move extents one by one, we reserve
* enough space to rebuild both trees.
*/
resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
/*
* If either inode straddles a bmapbt block allocation boundary,
* the rmapbt algorithm triggers repeated allocs and frees as
* extents are remapped. This can exhaust the block reservation
* prematurely and cause shutdown. Return freed blocks to the
* transaction reservation to counter this behavior.
*/
flags |= XFS_TRANS_RES_FDBLKS;
}
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
&tp);
if (error)
goto out_unlock;
/*
* Lock and join the inodes to the tansaction so that transaction commit
* or cancel will unlock the inodes from this point onwards.
*/
xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
lock_flags |= XFS_ILOCK_EXCL;
xfs_trans_ijoin(tp, ip, 0);
xfs_trans_ijoin(tp, tip, 0);
/* Verify all data are being swapped */
if (sxp->sx_offset != 0 ||
sxp->sx_length != ip->i_d.di_size ||
sxp->sx_length != tip->i_d.di_size) {
error = -EFAULT;
goto out_trans_cancel;
}
trace_xfs_swap_extent_before(ip, 0);
trace_xfs_swap_extent_before(tip, 1);
/* check inode formats now that data is flushed */
error = xfs_swap_extents_check_format(ip, tip);
if (error) {
xfs_notice(mp,
"%s: inode 0x%llx format is incompatible for exchanging.",
__func__, ip->i_ino);
goto out_trans_cancel;
}
/*
* Compare the current change & modify times with that
* passed in. If they differ, we abort this swap.
* This is the mechanism used to ensure the calling
* process that the file was not changed out from
* under it.
*/
if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
(sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
(sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
(sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
error = -EBUSY;
goto out_trans_cancel;
}
/*
* Note the trickiness in setting the log flags - we set the owner log
* flag on the opposite inode (i.e. the inode we are setting the new
* owner to be) because once we swap the forks and log that, log
* recovery is going to see the fork as owned by the swapped inode,
* not the pre-swapped inodes.
*/
src_log_flags = XFS_ILOG_CORE;
target_log_flags = XFS_ILOG_CORE;
if (xfs_sb_version_hasrmapbt(&mp->m_sb))
error = xfs_swap_extent_rmap(&tp, ip, tip);
else
error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
&target_log_flags);
if (error)
goto out_trans_cancel;
/* Do we have to swap reflink flags? */
if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
(tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
}
/* Swap the cow forks. */
if (xfs_sb_version_hasreflink(&mp->m_sb)) {
ASSERT(!ip->i_cowfp ||
ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
ASSERT(!tip->i_cowfp ||
tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
swap(ip->i_cowfp, tip->i_cowfp);
if (ip->i_cowfp && ip->i_cowfp->if_bytes)
xfs_inode_set_cowblocks_tag(ip);
else
xfs_inode_clear_cowblocks_tag(ip);
if (tip->i_cowfp && tip->i_cowfp->if_bytes)
xfs_inode_set_cowblocks_tag(tip);
else
xfs_inode_clear_cowblocks_tag(tip);
}
xfs_trans_log_inode(tp, ip, src_log_flags);
xfs_trans_log_inode(tp, tip, target_log_flags);
/*
* The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
* have inode number owner values in the bmbt blocks that still refer to
* the old inode. Scan each bmbt to fix up the owner values with the
* inode number of the current inode.
*/
if (src_log_flags & XFS_ILOG_DOWNER) {
error = xfs_swap_change_owner(&tp, ip, tip);
if (error)
goto out_trans_cancel;
}
if (target_log_flags & XFS_ILOG_DOWNER) {
error = xfs_swap_change_owner(&tp, tip, ip);
if (error)
goto out_trans_cancel;
}
/*
* If this is a synchronous mount, make sure that the
* transaction goes to disk before returning to the user.
*/
if (mp->m_flags & XFS_MOUNT_WSYNC)
xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp);
trace_xfs_swap_extent_after(ip, 0);
trace_xfs_swap_extent_after(tip, 1);
out_unlock:
xfs_iunlock(ip, lock_flags);
xfs_iunlock(tip, lock_flags);
unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
return error;
out_trans_cancel:
xfs_trans_cancel(tp);
goto out_unlock;
}
| gpl-2.0 |
razrqcom-dev-team/android_kernel_motorola_msm8226 | drivers/mmc/core/cd-gpio.c | 33 | 2673 | /*
* Generic GPIO card-detect helper
*
* Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/mmc/cd-gpio.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/slab.h>
struct mmc_cd_gpio {
unsigned int gpio;
char label[0];
bool status;
};
static int mmc_cd_get_status(struct mmc_host *host)
{
int ret = -ENOSYS;
struct mmc_cd_gpio *cd = host->hotplug.handler_priv;
if (!cd || !gpio_is_valid(cd->gpio))
goto out;
ret = !gpio_get_value_cansleep(cd->gpio) ^
!!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
out:
return ret;
}
static irqreturn_t mmc_cd_gpio_irqt(int irq, void *dev_id)
{
struct mmc_host *host = dev_id;
struct mmc_cd_gpio *cd = host->hotplug.handler_priv;
int status;
status = mmc_cd_get_status(host);
if (unlikely(status < 0))
goto out;
if (status ^ cd->status) {
pr_info("%s: slot status change detected (%d -> %d), GPIO_ACTIVE_%s\n",
mmc_hostname(host), cd->status, status,
(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) ?
"HIGH" : "LOW");
cd->status = status;
host->card_bad = 0;
/* Schedule a card detection after a debounce timeout */
mmc_detect_change(host, msecs_to_jiffies(100));
}
out:
return IRQ_HANDLED;
}
int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio)
{
size_t len = strlen(dev_name(host->parent)) + 4;
struct mmc_cd_gpio *cd;
int irq = gpio_to_irq(gpio);
int ret;
if (irq < 0)
return irq;
cd = kmalloc(sizeof(*cd) + len, GFP_KERNEL);
if (!cd)
return -ENOMEM;
snprintf(cd->label, len, "%s cd", dev_name(host->parent));
ret = gpio_request_one(gpio, GPIOF_DIR_IN, cd->label);
if (ret < 0)
goto egpioreq;
cd->gpio = gpio;
host->hotplug.irq = irq;
host->hotplug.handler_priv = cd;
ret = mmc_cd_get_status(host);
if (ret < 0)
goto eirqreq;
cd->status = ret;
ret = request_threaded_irq(irq, NULL, mmc_cd_gpio_irqt,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
cd->label, host);
if (ret < 0)
goto eirqreq;
return 0;
eirqreq:
gpio_free(gpio);
egpioreq:
kfree(cd);
return ret;
}
EXPORT_SYMBOL(mmc_cd_gpio_request);
void mmc_cd_gpio_free(struct mmc_host *host)
{
struct mmc_cd_gpio *cd = host->hotplug.handler_priv;
if (!cd || !gpio_is_valid(cd->gpio))
return;
free_irq(host->hotplug.irq, host);
gpio_free(cd->gpio);
cd->gpio = -EINVAL;
kfree(cd);
}
EXPORT_SYMBOL(mmc_cd_gpio_free);
| gpl-2.0 |
skalk/linux | drivers/soc/qcom/rpmh-rsc.c | 33 | 31561 | // SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
#include <linux/atomic.h>
#include <linux/cpu_pm.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <soc/qcom/cmd-db.h>
#include <soc/qcom/tcs.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
#include "rpmh-internal.h"
#define CREATE_TRACE_POINTS
#include "trace-rpmh.h"
#define RSC_DRV_TCS_OFFSET 672
#define RSC_DRV_CMD_OFFSET 20
/* DRV HW Solver Configuration Information Register */
#define DRV_SOLVER_CONFIG 0x04
#define DRV_HW_SOLVER_MASK 1
#define DRV_HW_SOLVER_SHIFT 24
/* DRV TCS Configuration Information Register */
#define DRV_PRNT_CHLD_CONFIG 0x0C
#define DRV_NUM_TCS_MASK 0x3F
#define DRV_NUM_TCS_SHIFT 6
#define DRV_NCPT_MASK 0x1F
#define DRV_NCPT_SHIFT 27
/* Offsets for common TCS Registers, one bit per TCS */
#define RSC_DRV_IRQ_ENABLE 0x00
#define RSC_DRV_IRQ_STATUS 0x04
#define RSC_DRV_IRQ_CLEAR 0x08 /* w/o; write 1 to clear */
/*
* Offsets for per TCS Registers.
*
* TCSes start at 0x10 from tcs_base and are stored one after another.
* Multiply tcs_id by RSC_DRV_TCS_OFFSET to find a given TCS and add one
* of the below to find a register.
*/
#define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10 /* 1 bit per command */
#define RSC_DRV_CONTROL 0x14
#define RSC_DRV_STATUS 0x18 /* zero if tcs is busy */
#define RSC_DRV_CMD_ENABLE 0x1C /* 1 bit per command */
/*
* Offsets for per command in a TCS.
*
* Commands (up to 16) start at 0x30 in a TCS; multiply command index
* by RSC_DRV_CMD_OFFSET and add one of the below to find a register.
*/
#define RSC_DRV_CMD_MSGID 0x30
#define RSC_DRV_CMD_ADDR 0x34
#define RSC_DRV_CMD_DATA 0x38
#define RSC_DRV_CMD_STATUS 0x3C
#define RSC_DRV_CMD_RESP_DATA 0x40
#define TCS_AMC_MODE_ENABLE BIT(16)
#define TCS_AMC_MODE_TRIGGER BIT(24)
/* TCS CMD register bit mask */
#define CMD_MSGID_LEN 8
#define CMD_MSGID_RESP_REQ BIT(8)
#define CMD_MSGID_WRITE BIT(16)
#define CMD_STATUS_ISSUED BIT(8)
#define CMD_STATUS_COMPL BIT(16)
/*
* Here's a high level overview of how all the registers in RPMH work
* together:
*
* - The main rpmh-rsc address is the base of a register space that can
* be used to find overall configuration of the hardware
* (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register
* space are all the TCS blocks. The offset of the TCS blocks is
* specified in the device tree by "qcom,tcs-offset" and used to
* compute tcs_base.
* - TCS blocks come one after another. Type, count, and order are
* specified by the device tree as "qcom,tcs-config".
* - Each TCS block has some registers, then space for up to 16 commands.
* Note that though address space is reserved for 16 commands, fewer
* might be present. See ncpt (num cmds per TCS).
*
* Here's a picture:
*
* +---------------------------------------------------+
* |RSC |
* | ctrl |
* | |
* | Drvs: |
* | +-----------------------------------------------+ |
* | |DRV0 | |
* | | ctrl/config | |
* | | IRQ | |
* | | | |
* | | TCSes: | |
* | | +------------------------------------------+ | |
* | | |TCS0 | | | | | | | | | | | | | | |
* | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
* | | | | | | | | | | | | | | | | | |
* | | +------------------------------------------+ | |
* | | +------------------------------------------+ | |
* | | |TCS1 | | | | | | | | | | | | | | |
* | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
* | | | | | | | | | | | | | | | | | |
* | | +------------------------------------------+ | |
* | | +------------------------------------------+ | |
* | | |TCS2 | | | | | | | | | | | | | | |
* | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
* | | | | | | | | | | | | | | | | | |
* | | +------------------------------------------+ | |
* | | ...... | |
* | +-----------------------------------------------+ |
* | +-----------------------------------------------+ |
* | |DRV1 | |
* | | (same as DRV0) | |
* | +-----------------------------------------------+ |
* | ...... |
* +---------------------------------------------------+
*/
static inline void __iomem *
tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id)
{
return drv->tcs_base + RSC_DRV_TCS_OFFSET * tcs_id + reg;
}
static inline void __iomem *
tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
{
return tcs_reg_addr(drv, reg, tcs_id) + RSC_DRV_CMD_OFFSET * cmd_id;
}
static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
int cmd_id)
{
return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
}
static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id)
{
return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id));
}
static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
int cmd_id, u32 data)
{
writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
}
static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id,
u32 data)
{
writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id));
}
static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
u32 data)
{
u32 new_data;
writel(data, tcs_reg_addr(drv, reg, tcs_id));
if (readl_poll_timeout_atomic(tcs_reg_addr(drv, reg, tcs_id), new_data,
new_data == data, 1, USEC_PER_SEC))
pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
data, tcs_id, reg);
}
/**
* tcs_is_free() - Return if a TCS is totally free.
* @drv: The RSC controller.
* @tcs_id: The global ID of this TCS.
*
* Returns true if nobody has claimed this TCS (by setting tcs_in_use).
*
* Context: Must be called with the drv->lock held.
*
* Return: true if the given TCS is free.
*/
static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
{
return !test_bit(tcs_id, drv->tcs_in_use);
}
/**
* tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake).
* @drv: The RSC controller.
* @type: SLEEP_TCS or WAKE_TCS
*
* This will clear the "slots" variable of the given tcs_group and also
* tell the hardware to forget about all entries.
*
* The caller must ensure that no other RPMH actions are happening when this
* function is called, since otherwise the device may immediately become
* used again even before this function exits.
*/
static void tcs_invalidate(struct rsc_drv *drv, int type)
{
int m;
struct tcs_group *tcs = &drv->tcs[type];
/* Caller ensures nobody else is running so no lock */
if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
return;
for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
}
bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
}
/**
* rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes.
* @drv: The RSC controller.
*
* The caller must ensure that no other RPMH actions are happening when this
* function is called, since otherwise the device may immediately become
* used again even before this function exits.
*/
void rpmh_rsc_invalidate(struct rsc_drv *drv)
{
tcs_invalidate(drv, SLEEP_TCS);
tcs_invalidate(drv, WAKE_TCS);
}
/**
* get_tcs_for_msg() - Get the tcs_group used to send the given message.
* @drv: The RSC controller.
* @msg: The message we want to send.
*
* This is normally pretty straightforward except if we are trying to send
* an ACTIVE_ONLY message but don't have any active_only TCSes.
*
* Return: A pointer to a tcs_group or an ERR_PTR.
*/
static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
const struct tcs_request *msg)
{
int type;
struct tcs_group *tcs;
switch (msg->state) {
case RPMH_ACTIVE_ONLY_STATE:
type = ACTIVE_TCS;
break;
case RPMH_WAKE_ONLY_STATE:
type = WAKE_TCS;
break;
case RPMH_SLEEP_STATE:
type = SLEEP_TCS;
break;
default:
return ERR_PTR(-EINVAL);
}
/*
* If we are making an active request on a RSC that does not have a
* dedicated TCS for active state use, then re-purpose a wake TCS to
* send active votes. This is safe because we ensure any active-only
* transfers have finished before we use it (maybe by running from
* the last CPU in PM code).
*/
tcs = &drv->tcs[type];
if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
tcs = &drv->tcs[WAKE_TCS];
return tcs;
}
/**
* get_req_from_tcs() - Get a stashed request that was xfering on the given TCS.
* @drv: The RSC controller.
* @tcs_id: The global ID of this TCS.
*
* For ACTIVE_ONLY transfers we want to call back into the client when the
* transfer finishes. To do this we need the "request" that the client
* originally provided us. This function grabs the request that we stashed
* when we started the transfer.
*
* This only makes sense for ACTIVE_ONLY transfers since those are the only
* ones we track sending (the only ones we enable interrupts for and the only
* ones we call back to the client for).
*
* Return: The stashed request.
*/
static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
int tcs_id)
{
struct tcs_group *tcs;
int i;
for (i = 0; i < TCS_TYPE_NR; i++) {
tcs = &drv->tcs[i];
if (tcs->mask & BIT(tcs_id))
return tcs->req[tcs_id - tcs->offset];
}
return NULL;
}
/**
* __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS
* @drv: The controller.
* @tcs_id: The global ID of this TCS.
* @trigger: If true then untrigger/retrigger. If false then just untrigger.
*
* In the normal case we only ever call with "trigger=true" to start a
* transfer. That will un-trigger/disable the TCS from the last transfer
* then trigger/enable for this transfer.
*
* If we borrowed a wake TCS for an active-only transfer we'll also call
* this function with "trigger=false" to just do the un-trigger/disable
* before using the TCS for wake purposes again.
*
* Note that the AP is only in charge of triggering active-only transfers.
* The AP never triggers sleep/wake values using this function.
*/
static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
{
u32 enable;
/*
* HW req: Clear the DRV_CONTROL and enable TCS again
* While clearing ensure that the AMC mode trigger is cleared
* and then the mode enable is cleared.
*/
enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id);
enable &= ~TCS_AMC_MODE_TRIGGER;
write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
enable &= ~TCS_AMC_MODE_ENABLE;
write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
if (trigger) {
/* Enable the AMC mode on the TCS and then trigger the TCS */
enable = TCS_AMC_MODE_ENABLE;
write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
enable |= TCS_AMC_MODE_TRIGGER;
write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
}
}
/**
* enable_tcs_irq() - Enable or disable interrupts on the given TCS.
* @drv: The controller.
* @tcs_id: The global ID of this TCS.
* @enable: If true then enable; if false then disable
*
* We only ever call this when we borrow a wake TCS for an active-only
* transfer. For active-only TCSes interrupts are always left enabled.
*/
static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
{
u32 data;
data = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_ENABLE);
if (enable)
data |= BIT(tcs_id);
else
data &= ~BIT(tcs_id);
writel_relaxed(data, drv->tcs_base + RSC_DRV_IRQ_ENABLE);
}
/**
* tcs_tx_done() - TX Done interrupt handler.
* @irq: The IRQ number (ignored).
* @p: Pointer to "struct rsc_drv".
*
* Called for ACTIVE_ONLY transfers (those are the only ones we enable the
* IRQ for) when a transfer is done.
*
* Return: IRQ_HANDLED
*/
static irqreturn_t tcs_tx_done(int irq, void *p)
{
struct rsc_drv *drv = p;
int i, j, err = 0;
unsigned long irq_status;
const struct tcs_request *req;
struct tcs_cmd *cmd;
irq_status = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_STATUS);
for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
req = get_req_from_tcs(drv, i);
if (!req) {
WARN_ON(1);
goto skip;
}
err = 0;
for (j = 0; j < req->num_cmds; j++) {
u32 sts;
cmd = &req->cmds[j];
sts = read_tcs_cmd(drv, RSC_DRV_CMD_STATUS, i, j);
if (!(sts & CMD_STATUS_ISSUED) ||
((req->wait_for_compl || cmd->wait) &&
!(sts & CMD_STATUS_COMPL))) {
pr_err("Incomplete request: %s: addr=%#x data=%#x",
drv->name, cmd->addr, cmd->data);
err = -EIO;
}
}
trace_rpmh_tx_done(drv, i, req, err);
/*
* If wake tcs was re-purposed for sending active
* votes, clear AMC trigger & enable modes and
* disable interrupt for this TCS
*/
if (!drv->tcs[ACTIVE_TCS].num_tcs)
__tcs_set_trigger(drv, i, false);
skip:
/* Reclaim the TCS */
write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0);
writel_relaxed(BIT(i), drv->tcs_base + RSC_DRV_IRQ_CLEAR);
spin_lock(&drv->lock);
clear_bit(i, drv->tcs_in_use);
/*
* Disable interrupt for WAKE TCS to avoid being
* spammed with interrupts coming when the solver
* sends its wake votes.
*/
if (!drv->tcs[ACTIVE_TCS].num_tcs)
enable_tcs_irq(drv, i, false);
spin_unlock(&drv->lock);
if (req)
rpmh_tx_done(req, err);
}
return IRQ_HANDLED;
}
/**
* __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger.
* @drv: The controller.
* @tcs_id: The global ID of this TCS.
* @cmd_id: The index within the TCS to start writing.
* @msg: The message we want to send, which will contain several addr/data
* pairs to program (but few enough that they all fit in one TCS).
*
* This is used for all types of transfers (active, sleep, and wake).
*/
static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
const struct tcs_request *msg)
{
u32 msgid, cmd_msgid;
u32 cmd_enable = 0;
u32 cmd_complete;
struct tcs_cmd *cmd;
int i, j;
cmd_msgid = CMD_MSGID_LEN;
cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
cmd_msgid |= CMD_MSGID_WRITE;
cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id);
for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
cmd = &msg->cmds[i];
cmd_enable |= BIT(j);
cmd_complete |= cmd->wait << j;
msgid = cmd_msgid;
msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd);
}
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
}
/**
* check_for_req_inflight() - Look to see if conflicting cmds are in flight.
* @drv: The controller.
* @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers.
* @msg: The message we want to send, which will contain several addr/data
* pairs to program (but few enough that they all fit in one TCS).
*
* This will walk through the TCSes in the group and check if any of them
* appear to be sending to addresses referenced in the message. If it finds
* one it'll return -EBUSY.
*
* Only for use for active-only transfers.
*
* Must be called with the drv->lock held since that protects tcs_in_use.
*
* Return: 0 if nothing in flight or -EBUSY if we should try again later.
* The caller must re-enable interrupts between tries since that's
* the only way tcs_is_free() will ever return true and the only way
* RSC_DRV_CMD_ENABLE will ever be cleared.
*/
static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
const struct tcs_request *msg)
{
unsigned long curr_enabled;
u32 addr;
int i, j, k;
int tcs_id = tcs->offset;
for (i = 0; i < tcs->num_tcs; i++, tcs_id++) {
if (tcs_is_free(drv, tcs_id))
continue;
curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
addr = read_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
for (k = 0; k < msg->num_cmds; k++) {
if (addr == msg->cmds[k].addr)
return -EBUSY;
}
}
}
return 0;
}
/**
* find_free_tcs() - Find free tcs in the given tcs_group; only for active.
* @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if
* we borrowed it because there are zero active-only ones).
*
* Must be called with the drv->lock held since that protects tcs_in_use.
*
* Return: The first tcs that's free.
*/
static int find_free_tcs(struct tcs_group *tcs)
{
int i;
for (i = 0; i < tcs->num_tcs; i++) {
if (tcs_is_free(tcs->drv, tcs->offset + i))
return tcs->offset + i;
}
return -EBUSY;
}
/**
* tcs_write() - Store messages into a TCS right now, or return -EBUSY.
* @drv: The controller.
* @msg: The data to be sent.
*
* Grabs a TCS for ACTIVE_ONLY transfers and writes the messages to it.
*
* If there are no free TCSes for ACTIVE_ONLY transfers or if a command for
* the same address is already transferring returns -EBUSY which means the
* client should retry shortly.
*
* Return: 0 on success, -EBUSY if client should retry, or an error.
* Client should have interrupts enabled for a bit before retrying.
*/
static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
{
struct tcs_group *tcs;
int tcs_id;
unsigned long flags;
int ret;
tcs = get_tcs_for_msg(drv, msg);
if (IS_ERR(tcs))
return PTR_ERR(tcs);
spin_lock_irqsave(&drv->lock, flags);
/*
* The h/w does not like if we send a request to the same address,
* when one is already in-flight or being processed.
*/
ret = check_for_req_inflight(drv, tcs, msg);
if (ret)
goto unlock;
ret = find_free_tcs(tcs);
if (ret < 0)
goto unlock;
tcs_id = ret;
tcs->req[tcs_id - tcs->offset] = msg;
set_bit(tcs_id, drv->tcs_in_use);
if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
/*
* Clear previously programmed WAKE commands in selected
* repurposed TCS to avoid triggering them. tcs->slots will be
* cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
*/
write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
enable_tcs_irq(drv, tcs_id, true);
}
spin_unlock_irqrestore(&drv->lock, flags);
/*
* These two can be done after the lock is released because:
* - We marked "tcs_in_use" under lock.
* - Once "tcs_in_use" has been marked nobody else could be writing
* to these registers until the interrupt goes off.
* - The interrupt can't go off until we trigger w/ the last line
* of __tcs_set_trigger() below.
*/
__tcs_buffer_write(drv, tcs_id, 0, msg);
__tcs_set_trigger(drv, tcs_id, true);
return 0;
unlock:
spin_unlock_irqrestore(&drv->lock, flags);
return ret;
}
/**
* rpmh_rsc_send_data() - Write / trigger active-only message.
* @drv: The controller.
* @msg: The data to be sent.
*
* NOTES:
* - This is only used for "ACTIVE_ONLY" since the limitations of this
* function don't make sense for sleep/wake cases.
* - To do the transfer, we will grab a whole TCS for ourselves--we don't
* try to share. If there are none available we'll wait indefinitely
* for a free one.
* - This function will not wait for the commands to be finished, only for
* data to be programmed into the RPMh. See rpmh_tx_done() which will
* be called when the transfer is fully complete.
* - This function must be called with interrupts enabled. If the hardware
* is busy doing someone else's transfer we need that transfer to fully
* finish so that we can have the hardware, and to fully finish it needs
* the interrupt handler to run. If the interrupts is set to run on the
* active CPU this can never happen if interrupts are disabled.
*
* Return: 0 on success, -EINVAL on error.
*/
int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
{
int ret;
do {
ret = tcs_write(drv, msg);
if (ret == -EBUSY) {
pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
msg->cmds[0].addr);
udelay(10);
}
} while (ret == -EBUSY);
return ret;
}
/**
* find_slots() - Find a place to write the given message.
* @tcs: The tcs group to search.
* @msg: The message we want to find room for.
* @tcs_id: If we return 0 from the function, we return the global ID of the
* TCS to write to here.
* @cmd_id: If we return 0 from the function, we return the index of
* the command array of the returned TCS where the client should
* start writing the message.
*
* Only for use on sleep/wake TCSes since those are the only ones we maintain
* tcs->slots for.
*
* Return: -ENOMEM if there was no room, else 0.
*/
static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
int *tcs_id, int *cmd_id)
{
int slot, offset;
int i = 0;
/* Do over, until we can fit the full payload in a single TCS */
do {
slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
i, msg->num_cmds, 0);
if (slot >= tcs->num_tcs * tcs->ncpt)
return -ENOMEM;
i += tcs->ncpt;
} while (slot + msg->num_cmds - 1 >= i);
bitmap_set(tcs->slots, slot, msg->num_cmds);
offset = slot / tcs->ncpt;
*tcs_id = offset + tcs->offset;
*cmd_id = slot % tcs->ncpt;
return 0;
}
/**
* rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger.
* @drv: The controller.
* @msg: The data to be written to the controller.
*
* This should only be called for for sleep/wake state, never active-only
* state.
*
* The caller must ensure that no other RPMH actions are happening and the
* controller is idle when this function is called since it runs lockless.
*
* Return: 0 if no error; else -error.
*/
int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
{
struct tcs_group *tcs;
int tcs_id = 0, cmd_id = 0;
int ret;
tcs = get_tcs_for_msg(drv, msg);
if (IS_ERR(tcs))
return PTR_ERR(tcs);
/* find the TCS id and the command in the TCS to write to */
ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
if (!ret)
__tcs_buffer_write(drv, tcs_id, cmd_id, msg);
return ret;
}
/**
* rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy.
* @drv: The controller
*
* Checks if any of the AMCs are busy in handling ACTIVE sets.
* This is called from the last cpu powering down before flushing
* SLEEP and WAKE sets. If AMCs are busy, controller can not enter
* power collapse, so deny from the last cpu's pm notification.
*
* Context: Must be called with the drv->lock held.
*
* Return:
* * False - AMCs are idle
* * True - AMCs are busy
*/
static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
{
int m;
struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS];
/*
* If we made an active request on a RSC that does not have a
* dedicated TCS for active state use, then re-purposed wake TCSes
* should be checked for not busy, because we used wake TCSes for
* active requests in this case.
*/
if (!tcs->num_tcs)
tcs = &drv->tcs[WAKE_TCS];
for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
if (!tcs_is_free(drv, m))
return true;
}
return false;
}
/**
* rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy.
* @nfb: Pointer to the notifier block in struct rsc_drv.
* @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT.
* @v: Unused
*
* This function is given to cpu_pm_register_notifier so we can be informed
* about when CPUs go down. When all CPUs go down we know no more active
* transfers will be started so we write sleep/wake sets. This function gets
* called from cpuidle code paths and also at system suspend time.
*
* If its last CPU going down and AMCs are not busy then writes cached sleep
* and wake messages to TCSes. The firmware then takes care of triggering
* them when entering deepest low power modes.
*
* Return: See cpu_pm_register_notifier()
*/
static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb,
unsigned long action, void *v)
{
struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm);
int ret = NOTIFY_OK;
int cpus_in_pm;
switch (action) {
case CPU_PM_ENTER:
cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm);
/*
* NOTE: comments for num_online_cpus() point out that it's
* only a snapshot so we need to be careful. It should be OK
* for us to use, though. It's important for us not to miss
* if we're the last CPU going down so it would only be a
* problem if a CPU went offline right after we did the check
* AND that CPU was not idle AND that CPU was the last non-idle
* CPU. That can't happen. CPUs would have to come out of idle
* before the CPU could go offline.
*/
if (cpus_in_pm < num_online_cpus())
return NOTIFY_OK;
break;
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
atomic_dec(&drv->cpus_in_pm);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
/*
* It's likely we're on the last CPU. Grab the drv->lock and write
* out the sleep/wake commands to RPMH hardware. Grabbing the lock
* means that if we race with another CPU coming up we are still
* guaranteed to be safe. If another CPU came up just after we checked
* and has grabbed the lock or started an active transfer then we'll
* notice we're busy and abort. If another CPU comes up after we start
* flushing it will be blocked from starting an active transfer until
* we're done flushing. If another CPU starts an active transfer after
* we release the lock we're still OK because we're no longer the last
* CPU.
*/
if (spin_trylock(&drv->lock)) {
if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))
ret = NOTIFY_BAD;
spin_unlock(&drv->lock);
} else {
/* Another CPU must be up */
return NOTIFY_OK;
}
if (ret == NOTIFY_BAD) {
/* Double-check if we're here because someone else is up */
if (cpus_in_pm < num_online_cpus())
ret = NOTIFY_OK;
else
/* We won't be called w/ CPU_PM_ENTER_FAILED */
atomic_dec(&drv->cpus_in_pm);
}
return ret;
}
static int rpmh_probe_tcs_config(struct platform_device *pdev,
struct rsc_drv *drv, void __iomem *base)
{
struct tcs_type_config {
u32 type;
u32 n;
} tcs_cfg[TCS_TYPE_NR] = { { 0 } };
struct device_node *dn = pdev->dev.of_node;
u32 config, max_tcs, ncpt, offset;
int i, ret, n, st = 0;
struct tcs_group *tcs;
ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
if (ret)
return ret;
drv->tcs_base = base + offset;
config = readl_relaxed(base + DRV_PRNT_CHLD_CONFIG);
max_tcs = config;
max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
ncpt = ncpt >> DRV_NCPT_SHIFT;
n = of_property_count_u32_elems(dn, "qcom,tcs-config");
if (n != 2 * TCS_TYPE_NR)
return -EINVAL;
for (i = 0; i < TCS_TYPE_NR; i++) {
ret = of_property_read_u32_index(dn, "qcom,tcs-config",
i * 2, &tcs_cfg[i].type);
if (ret)
return ret;
if (tcs_cfg[i].type >= TCS_TYPE_NR)
return -EINVAL;
ret = of_property_read_u32_index(dn, "qcom,tcs-config",
i * 2 + 1, &tcs_cfg[i].n);
if (ret)
return ret;
if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
return -EINVAL;
}
for (i = 0; i < TCS_TYPE_NR; i++) {
tcs = &drv->tcs[tcs_cfg[i].type];
if (tcs->drv)
return -EINVAL;
tcs->drv = drv;
tcs->type = tcs_cfg[i].type;
tcs->num_tcs = tcs_cfg[i].n;
tcs->ncpt = ncpt;
if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
continue;
if (st + tcs->num_tcs > max_tcs ||
st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
return -EINVAL;
tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
tcs->offset = st;
st += tcs->num_tcs;
}
drv->num_tcs = st;
return 0;
}
static int rpmh_rsc_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct rsc_drv *drv;
struct resource *res;
char drv_id[10] = {0};
int ret, irq;
u32 solver_config;
void __iomem *base;
/*
* Even though RPMh doesn't directly use cmd-db, all of its children
* do. To avoid adding this check to our children we'll do it now.
*/
ret = cmd_db_ready();
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "Command DB not available (%d)\n",
ret);
return ret;
}
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
return -ENOMEM;
ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
if (ret)
return ret;
drv->name = of_get_property(dn, "label", NULL);
if (!drv->name)
drv->name = dev_name(&pdev->dev);
snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
ret = rpmh_probe_tcs_config(pdev, drv, base);
if (ret)
return ret;
spin_lock_init(&drv->lock);
bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
irq = platform_get_irq(pdev, drv->id);
if (irq < 0)
return irq;
ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
drv->name, drv);
if (ret)
return ret;
/*
* CPU PM notification are not required for controllers that support
* 'HW solver' mode where they can be in autonomous mode executing low
* power mode to power down.
*/
solver_config = readl_relaxed(base + DRV_SOLVER_CONFIG);
solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
solver_config = solver_config >> DRV_HW_SOLVER_SHIFT;
if (!solver_config) {
drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback;
cpu_pm_register_notifier(&drv->rsc_pm);
}
/* Enable the active TCS to send requests immediately */
writel_relaxed(drv->tcs[ACTIVE_TCS].mask,
drv->tcs_base + RSC_DRV_IRQ_ENABLE);
spin_lock_init(&drv->client.cache_lock);
INIT_LIST_HEAD(&drv->client.cache);
INIT_LIST_HEAD(&drv->client.batch_cache);
dev_set_drvdata(&pdev->dev, drv);
return devm_of_platform_populate(&pdev->dev);
}
static const struct of_device_id rpmh_drv_match[] = {
{ .compatible = "qcom,rpmh-rsc", },
{ }
};
static struct platform_driver rpmh_driver = {
.probe = rpmh_rsc_probe,
.driver = {
.name = "rpmh",
.of_match_table = rpmh_drv_match,
},
};
static int __init rpmh_driver_init(void)
{
return platform_driver_register(&rpmh_driver);
}
arch_initcall(rpmh_driver_init);
| gpl-2.0 |
ashish-17/linux-2.4.27-CS518 | arch/m68k/atari/config.c | 33 | 24395 | /*
* linux/arch/m68k/atari/config.c
*
* Copyright (C) 1994 Bjoern Brauel
*
* 5/2/94 Roman Hodek:
* Added setting of time_adj to get a better clock.
*
* 5/14/94 Roman Hodek:
* gettod() for TT
*
* 5/15/94 Roman Hodek:
* hard_reset_now() for Atari (and others?)
*
* 94/12/30 Andreas Schwab:
* atari_sched_init fixed to get precise clock.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/*
* Miscellaneous atari stuff
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/vt_kern.h>
#include <asm/bootinfo.h>
#include <asm/setup.h>
#include <asm/atarihw.h>
#include <asm/atariints.h>
#include <asm/atari_stram.h>
#include <asm/system.h>
#include <asm/keyboard.h>
#include <asm/machdep.h>
#include <asm/hwtest.h>
#include <asm/io.h>
u_long atari_mch_cookie;
u_long atari_mch_type = 0;
struct atari_hw_present atari_hw_present;
u_long atari_switches = 0;
int atari_dont_touch_floppy_select = 0;
int atari_rtc_year_offset;
/* local function prototypes */
static void atari_reset( void );
#ifdef CONFIG_ATARI_FLOPPY
extern void atari_floppy_setup(char *, int *);
#endif
static void atari_get_model(char *model);
static int atari_get_hardware_list(char *buffer);
/* atari specific keyboard functions */
extern int atari_keyb_init(void);
extern int atari_kbdrate (struct kbd_repeat *);
extern int atari_kbd_translate(unsigned char keycode, unsigned char *keycodep,
char raw_mode);
extern void atari_kbd_leds (unsigned int);
/* atari specific irq functions */
extern void atari_init_IRQ (void);
extern int atari_request_irq (unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
unsigned long flags, const char *devname, void *dev_id);
extern void atari_free_irq (unsigned int irq, void *dev_id);
extern void atari_enable_irq (unsigned int);
extern void atari_disable_irq (unsigned int);
extern int atari_get_irq_list (char *buf);
extern void atari_mksound( unsigned int count, unsigned int ticks );
#ifdef CONFIG_HEARTBEAT
static void atari_heartbeat( int on );
#endif
/* atari specific timer functions (in time.c) */
extern void atari_sched_init(void (*)(int, void *, struct pt_regs *));
extern unsigned long atari_gettimeoffset (void);
extern void atari_mste_gettod (int *, int *, int *, int *, int *, int *);
extern void atari_tt_gettod (int *, int *, int *, int *, int *, int *);
extern int atari_mste_hwclk (int, struct rtc_time *);
extern int atari_tt_hwclk (int, struct rtc_time *);
extern int atari_mste_set_clock_mmss (unsigned long);
extern int atari_tt_set_clock_mmss (unsigned long);
/* atari specific debug functions (in debug.c) */
extern void atari_debug_init(void);
#ifdef CONFIG_MAGIC_SYSRQ
static char atari_sysrq_xlate[128] =
"\000\0331234567890-=\177\t" /* 0x00 - 0x0f */
"qwertyuiop[]\r\000as" /* 0x10 - 0x1f */
"dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */
"bnm,./\000\000\000 \000\201\202\203\204\205" /* 0x30 - 0x3f */
"\206\207\210\211\212\000\000\000\000\000-\000\000\000+\000"/* 0x40 - 0x4f */
"\000\000\000\177\000\000\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */
"\000\000\000()/*789456123" /* 0x60 - 0x6f */
"0.\r\000\000\000\000\000\000\000\000\000\000\000\000\000"; /* 0x70 - 0x7f */
#endif
/* I've moved hwreg_present() and hwreg_present_bywrite() out into
* mm/hwtest.c, to avoid having multiple copies of the same routine
* in the kernel [I wanted them in hp300 and they were already used
* in the nubus code. NB: I don't have an Atari so this might (just
* conceivably) break something.
* I've preserved the #if 0 version of hwreg_present_bywrite() here
* for posterity.
* -- Peter Maydell <pmaydell@chiark.greenend.org.uk>, 05/1998
*/
#if 0
static int __init
hwreg_present_bywrite(volatile void *regp, unsigned char val)
{
int ret;
long save_sp, save_vbr;
static long tmp_vectors[3] = { 0, 0, (long)&&after_test };
__asm__ __volatile__
( "movec %/vbr,%2\n\t" /* save vbr value */
"movec %4,%/vbr\n\t" /* set up temporary vectors */
"movel %/sp,%1\n\t" /* save sp */
"moveq #0,%0\n\t" /* assume not present */
"moveb %5,%3@\n\t" /* write the hardware reg */
"cmpb %3@,%5\n\t" /* compare it */
"seq %0" /* comes here only if reg */
/* is present */
: "=d&" (ret), "=r&" (save_sp), "=r&" (save_vbr)
: "a" (regp), "r" (tmp_vectors), "d" (val)
);
after_test:
__asm__ __volatile__
( "movel %0,%/sp\n\t" /* restore sp */
"movec %1,%/vbr" /* restore vbr */
: : "r" (save_sp), "r" (save_vbr) : "sp"
);
return( ret );
}
#endif
/* ++roman: This is a more elaborate test for an SCC chip, since the plain
* Medusa board generates DTACK at the SCC's standard addresses, but a SCC
* board in the Medusa is possible. Also, the addresses where the ST_ESCC
* resides generate DTACK without the chip, too.
* The method is to write values into the interrupt vector register, that
* should be readable without trouble (from channel A!).
*/
static int __init scc_test( volatile char *ctla )
{
if (!hwreg_present( ctla ))
return( 0 );
MFPDELAY();
*ctla = 2; MFPDELAY();
*ctla = 0x40; MFPDELAY();
*ctla = 2; MFPDELAY();
if (*ctla != 0x40) return( 0 );
MFPDELAY();
*ctla = 2; MFPDELAY();
*ctla = 0x60; MFPDELAY();
*ctla = 2; MFPDELAY();
if (*ctla != 0x60) return( 0 );
return( 1 );
}
/*
* Parse an Atari-specific record in the bootinfo
*/
int __init atari_parse_bootinfo(const struct bi_record *record)
{
int unknown = 0;
const u_long *data = record->data;
switch (record->tag) {
case BI_ATARI_MCH_COOKIE:
atari_mch_cookie = *data;
break;
case BI_ATARI_MCH_TYPE:
atari_mch_type = *data;
break;
default:
unknown = 1;
}
return(unknown);
}
/* Parse the Atari-specific switches= option. */
void __init atari_switches_setup( const char *str, unsigned len )
{
char switches[len+1];
char *p;
int ovsc_shift;
/* copy string to local array, strtok works destructively... */
strncpy( switches, str, len );
switches[len] = 0;
atari_switches = 0;
/* parse the options */
for( p = strtok( switches, "," ); p; p = strtok( NULL, "," ) ) {
ovsc_shift = 0;
if (strncmp( p, "ov_", 3 ) == 0) {
p += 3;
ovsc_shift = ATARI_SWITCH_OVSC_SHIFT;
}
if (strcmp( p, "ikbd" ) == 0) {
/* RTS line of IKBD ACIA */
atari_switches |= ATARI_SWITCH_IKBD << ovsc_shift;
}
else if (strcmp( p, "midi" ) == 0) {
/* RTS line of MIDI ACIA */
atari_switches |= ATARI_SWITCH_MIDI << ovsc_shift;
}
else if (strcmp( p, "snd6" ) == 0) {
atari_switches |= ATARI_SWITCH_SND6 << ovsc_shift;
}
else if (strcmp( p, "snd7" ) == 0) {
atari_switches |= ATARI_SWITCH_SND7 << ovsc_shift;
}
}
}
/*
* Setup the Atari configuration info
*/
void __init config_atari(void)
{
unsigned short tos_version;
memset(&atari_hw_present, 0, sizeof(atari_hw_present));
atari_debug_init();
ioport_resource.end = 0xFFFFFFFF; /* Change size of I/O space from 64KB
to 4GB. */
mach_sched_init = atari_sched_init;
#ifdef CONFIG_VT
mach_keyb_init = atari_keyb_init;
mach_kbdrate = atari_kbdrate;
mach_kbd_translate = atari_kbd_translate;
mach_kbd_leds = atari_kbd_leds;
kd_mksound = atari_mksound;
#endif
mach_init_IRQ = atari_init_IRQ;
mach_request_irq = atari_request_irq;
mach_free_irq = atari_free_irq;
enable_irq = atari_enable_irq;
disable_irq = atari_disable_irq;
mach_get_model = atari_get_model;
mach_get_hardware_list = atari_get_hardware_list;
mach_get_irq_list = atari_get_irq_list;
mach_gettimeoffset = atari_gettimeoffset;
mach_reset = atari_reset;
#ifdef CONFIG_ATARI_FLOPPY
mach_floppy_setup = atari_floppy_setup;
#endif
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
mach_max_dma_address = 0xffffff;
#ifdef CONFIG_MAGIC_SYSRQ
SYSRQ_KEY = 0xff;
mach_sysrq_key = 98; /* HELP */
mach_sysrq_shift_state = 8; /* Alt */
mach_sysrq_shift_mask = 0xff; /* all modifiers except CapsLock */
mach_sysrq_xlate = atari_sysrq_xlate;
#endif
#ifdef CONFIG_HEARTBEAT
mach_heartbeat = atari_heartbeat;
#endif
/* Set switches as requested by the user */
if (atari_switches & ATARI_SWITCH_IKBD)
acia.key_ctrl = ACIA_DIV64 | ACIA_D8N1S | ACIA_RHTID;
if (atari_switches & ATARI_SWITCH_MIDI)
acia.mid_ctrl = ACIA_DIV16 | ACIA_D8N1S | ACIA_RHTID;
if (atari_switches & (ATARI_SWITCH_SND6|ATARI_SWITCH_SND7)) {
sound_ym.rd_data_reg_sel = 14;
sound_ym.wd_data = sound_ym.rd_data_reg_sel |
((atari_switches&ATARI_SWITCH_SND6) ? 0x40 : 0) |
((atari_switches&ATARI_SWITCH_SND7) ? 0x80 : 0);
}
/* ++bjoern:
* Determine hardware present
*/
printk( "Atari hardware found: " );
if (MACH_IS_MEDUSA || MACH_IS_HADES) {
/* There's no Atari video hardware on the Medusa, but all the
* addresses below generate a DTACK so no bus error occurs! */
}
else if (hwreg_present( f030_xreg )) {
ATARIHW_SET(VIDEL_SHIFTER);
printk( "VIDEL " );
/* This is a temporary hack: If there is Falcon video
* hardware, we assume that the ST-DMA serves SCSI instead of
* ACSI. In the future, there should be a better method for
* this...
*/
ATARIHW_SET(ST_SCSI);
printk( "STDMA-SCSI " );
}
else if (hwreg_present( tt_palette )) {
ATARIHW_SET(TT_SHIFTER);
printk( "TT_SHIFTER " );
}
else if (hwreg_present( &shifter.bas_hi )) {
if (hwreg_present( &shifter.bas_lo ) &&
(shifter.bas_lo = 0x0aau, shifter.bas_lo == 0x0aau)) {
ATARIHW_SET(EXTD_SHIFTER);
printk( "EXTD_SHIFTER " );
}
else {
ATARIHW_SET(STND_SHIFTER);
printk( "STND_SHIFTER " );
}
}
if (hwreg_present( &mfp.par_dt_reg )) {
ATARIHW_SET(ST_MFP);
printk( "ST_MFP " );
}
if (hwreg_present( &tt_mfp.par_dt_reg )) {
ATARIHW_SET(TT_MFP);
printk( "TT_MFP " );
}
if (hwreg_present( &tt_scsi_dma.dma_addr_hi )) {
ATARIHW_SET(SCSI_DMA);
printk( "TT_SCSI_DMA " );
}
if (!MACH_IS_HADES && hwreg_present( &st_dma.dma_hi )) {
ATARIHW_SET(STND_DMA);
printk( "STND_DMA " );
}
if (MACH_IS_MEDUSA || /* The ST-DMA address registers aren't readable
* on all Medusas, so the test below may fail */
(hwreg_present( &st_dma.dma_vhi ) &&
(st_dma.dma_vhi = 0x55) && (st_dma.dma_hi = 0xaa) &&
st_dma.dma_vhi == 0x55 && st_dma.dma_hi == 0xaa &&
(st_dma.dma_vhi = 0xaa) && (st_dma.dma_hi = 0x55) &&
st_dma.dma_vhi == 0xaa && st_dma.dma_hi == 0x55)) {
ATARIHW_SET(EXTD_DMA);
printk( "EXTD_DMA " );
}
if (hwreg_present( &tt_scsi.scsi_data )) {
ATARIHW_SET(TT_SCSI);
printk( "TT_SCSI " );
}
if (hwreg_present( &sound_ym.rd_data_reg_sel )) {
ATARIHW_SET(YM_2149);
printk( "YM2149 " );
}
if (!MACH_IS_MEDUSA && !MACH_IS_HADES &&
hwreg_present( &tt_dmasnd.ctrl )) {
ATARIHW_SET(PCM_8BIT);
printk( "PCM " );
}
if (!MACH_IS_HADES && hwreg_present( &codec.unused5 )) {
ATARIHW_SET(CODEC);
printk( "CODEC " );
}
if (hwreg_present( &dsp56k_host_interface.icr )) {
ATARIHW_SET(DSP56K);
printk( "DSP56K " );
}
if (hwreg_present( &tt_scc_dma.dma_ctrl ) &&
#if 0
/* This test sucks! Who knows some better? */
(tt_scc_dma.dma_ctrl = 0x01, (tt_scc_dma.dma_ctrl & 1) == 1) &&
(tt_scc_dma.dma_ctrl = 0x00, (tt_scc_dma.dma_ctrl & 1) == 0)
#else
!MACH_IS_MEDUSA && !MACH_IS_HADES
#endif
) {
ATARIHW_SET(SCC_DMA);
printk( "SCC_DMA " );
}
if (scc_test( &scc.cha_a_ctrl )) {
ATARIHW_SET(SCC);
printk( "SCC " );
}
if (scc_test( &st_escc.cha_b_ctrl )) {
ATARIHW_SET( ST_ESCC );
printk( "ST_ESCC " );
}
if (MACH_IS_HADES)
{
ATARIHW_SET( VME );
printk( "VME " );
}
else if (hwreg_present( &tt_scu.sys_mask )) {
ATARIHW_SET(SCU);
/* Assume a VME bus if there's a SCU */
ATARIHW_SET( VME );
printk( "VME SCU " );
}
if (hwreg_present( (void *)(0xffff9210) )) {
ATARIHW_SET(ANALOG_JOY);
printk( "ANALOG_JOY " );
}
if (!MACH_IS_HADES && hwreg_present( blitter.halftone )) {
ATARIHW_SET(BLITTER);
printk( "BLITTER " );
}
if (hwreg_present((void *)0xfff00039)) {
ATARIHW_SET(IDE);
printk( "IDE " );
}
#if 1 /* This maybe wrong */
if (!MACH_IS_MEDUSA && !MACH_IS_HADES &&
hwreg_present( &tt_microwire.data ) &&
hwreg_present( &tt_microwire.mask ) &&
(tt_microwire.mask = 0x7ff,
udelay(1),
tt_microwire.data = MW_LM1992_PSG_HIGH | MW_LM1992_ADDR,
udelay(1),
tt_microwire.data != 0)) {
ATARIHW_SET(MICROWIRE);
while (tt_microwire.mask != 0x7ff) ;
printk( "MICROWIRE " );
}
#endif
if (hwreg_present( &tt_rtc.regsel )) {
ATARIHW_SET(TT_CLK);
printk( "TT_CLK " );
mach_gettod = atari_tt_gettod;
mach_hwclk = atari_tt_hwclk;
mach_set_clock_mmss = atari_tt_set_clock_mmss;
}
if (!MACH_IS_HADES && hwreg_present( &mste_rtc.sec_ones)) {
ATARIHW_SET(MSTE_CLK);
printk( "MSTE_CLK ");
mach_gettod = atari_mste_gettod;
mach_hwclk = atari_mste_hwclk;
mach_set_clock_mmss = atari_mste_set_clock_mmss;
}
if (!MACH_IS_MEDUSA && !MACH_IS_HADES &&
hwreg_present( &dma_wd.fdc_speed ) &&
hwreg_write( &dma_wd.fdc_speed, 0 )) {
ATARIHW_SET(FDCSPEED);
printk( "FDC_SPEED ");
}
if (!MACH_IS_HADES && !ATARIHW_PRESENT(ST_SCSI)) {
ATARIHW_SET(ACSI);
printk( "ACSI " );
}
printk("\n");
if (CPU_IS_040_OR_060)
/* Now it seems to be safe to turn of the tt0 transparent
* translation (the one that must not be turned off in
* head.S...)
*/
__asm__ volatile ("moveq #0,%/d0\n\t"
".chip 68040\n\t"
"movec %%d0,%%itt0\n\t"
"movec %%d0,%%dtt0\n\t"
".chip 68k"
: /* no outputs */
: /* no inputs */
: "d0");
/* allocator for memory that must reside in st-ram */
atari_stram_init ();
/* Set up a mapping for the VMEbus address region:
*
* VME is either at phys. 0xfexxxxxx (TT) or 0xa00000..0xdfffff
* (MegaSTE) In both cases, the whole 16 MB chunk is mapped at
* 0xfe000000 virt., because this can be done with a single
* transparent translation. On the 68040, lots of often unused
* page tables would be needed otherwise. On a MegaSTE or similar,
* the highest byte is stripped off by hardware due to the 24 bit
* design of the bus.
*/
if (CPU_IS_020_OR_030) {
unsigned long tt1_val;
tt1_val = 0xfe008543; /* Translate 0xfexxxxxx, enable, cache
* inhibit, read and write, FDC mask = 3,
* FDC val = 4 -> Supervisor only */
__asm__ __volatile__ ( ".chip 68030\n\t"
"pmove %0@,%/tt1\n\t"
".chip 68k"
: : "a" (&tt1_val) );
}
else {
__asm__ __volatile__
( "movel %0,%/d0\n\t"
".chip 68040\n\t"
"movec %%d0,%%itt1\n\t"
"movec %%d0,%%dtt1\n\t"
".chip 68k"
:
: "g" (0xfe00a040) /* Translate 0xfexxxxxx, enable,
* supervisor only, non-cacheable/
* serialized, writable */
: "d0" );
}
/* Fetch tos version at Physical 2 */
/* We my not be able to access this address if the kernel is
loaded to st ram, since the first page is unmapped. On the
Medusa this is always the case and there is nothing we can do
about this, so we just assume the smaller offset. For the TT
we use the fact that in head.S we have set up a mapping
0xFFxxxxxx -> 0x00xxxxxx, so that the first 16MB is accessible
in the last 16MB of the address space. */
tos_version = (MACH_IS_MEDUSA || MACH_IS_HADES) ?
0xfff : *(unsigned short *)0xff000002;
atari_rtc_year_offset = (tos_version < 0x306) ? 70 : 68;
}
#ifdef CONFIG_HEARTBEAT
static void atari_heartbeat( int on )
{
unsigned char tmp;
unsigned long flags;
if (atari_dont_touch_floppy_select)
return;
save_flags(flags);
cli();
sound_ym.rd_data_reg_sel = 14; /* Select PSG Port A */
tmp = sound_ym.rd_data_reg_sel;
sound_ym.wd_data = on ? (tmp & ~0x02) : (tmp | 0x02);
restore_flags(flags);
}
#endif
/* ++roman:
*
* This function does a reset on machines that lack the ability to
* assert the processor's _RESET signal somehow via hardware. It is
* based on the fact that you can find the initial SP and PC values
* after a reset at physical addresses 0 and 4. This works pretty well
* for Atari machines, since the lowest 8 bytes of physical memory are
* really ROM (mapped by hardware). For other 680x0 machines: don't
* know if it works...
*
* To get the values at addresses 0 and 4, the MMU better is turned
* off first. After that, we have to jump into physical address space
* (the PC before the pmove statement points to the virtual address of
* the code). Getting that physical address is not hard, but the code
* becomes a bit complex since I've tried to ensure that the jump
* statement after the pmove is in the cache already (otherwise the
* processor can't fetch it!). For that, the code first jumps to the
* jump statement with the (virtual) address of the pmove section in
* an address register . The jump statement is surely in the cache
* now. After that, that physical address of the reset code is loaded
* into the same address register, pmove is done and the same jump
* statements goes to the reset code. Since there are not many
* statements between the two jumps, I hope it stays in the cache.
*
* The C code makes heavy use of the GCC features that you can get the
* address of a C label. No hope to compile this with another compiler
* than GCC!
*/
/* ++andreas: no need for complicated code, just depend on prefetch */
static void atari_reset (void)
{
long tc_val = 0;
long reset_addr;
/* On the Medusa, phys. 0x4 may contain garbage because it's no
ROM. See above for explanation why we cannot use PTOV(4). */
reset_addr = MACH_IS_HADES ? 0x7fe00030 :
MACH_IS_MEDUSA || MACH_IS_AB40 ? 0xe00030 :
*(unsigned long *) 0xff000004;
/* reset ACIA for switch off OverScan, if it's active */
if (atari_switches & ATARI_SWITCH_OVSC_IKBD)
acia.key_ctrl = ACIA_RESET;
if (atari_switches & ATARI_SWITCH_OVSC_MIDI)
acia.mid_ctrl = ACIA_RESET;
/* processor independent: turn off interrupts and reset the VBR;
* the caches must be left enabled, else prefetching the final jump
* instruction doesn't work. */
cli();
__asm__ __volatile__
("moveq #0,%/d0\n\t"
"movec %/d0,%/vbr"
: : : "d0" );
if (CPU_IS_040_OR_060) {
unsigned long jmp_addr040 = virt_to_phys(&&jmp_addr_label040);
if (CPU_IS_060) {
/* 68060: clear PCR to turn off superscalar operation */
__asm__ __volatile__
("moveq #0,%/d0\n\t"
".chip 68060\n\t"
"movec %%d0,%%pcr\n\t"
".chip 68k"
: : : "d0" );
}
__asm__ __volatile__
("movel %0,%/d0\n\t"
"andl #0xff000000,%/d0\n\t"
"orw #0xe020,%/d0\n\t" /* map 16 MB, enable, cacheable */
".chip 68040\n\t"
"movec %%d0,%%itt0\n\t"
"movec %%d0,%%dtt0\n\t"
".chip 68k\n\t"
"jmp %0@\n\t"
: /* no outputs */
: "a" (jmp_addr040)
: "d0" );
jmp_addr_label040:
__asm__ __volatile__
("moveq #0,%/d0\n\t"
"nop\n\t"
".chip 68040\n\t"
"cinva %%bc\n\t"
"nop\n\t"
"pflusha\n\t"
"nop\n\t"
"movec %%d0,%%tc\n\t"
"nop\n\t"
/* the following setup of transparent translations is needed on the
* Afterburner040 to successfully reboot. Other machines shouldn't
* care about a different tt regs setup, they also didn't care in
* the past that the regs weren't turned off. */
"movel #0xffc000,%%d0\n\t" /* whole insn space cacheable */
"movec %%d0,%%itt0\n\t"
"movec %%d0,%%itt1\n\t"
"orw #0x40,%/d0\n\t" /* whole data space non-cacheable/ser. */
"movec %%d0,%%dtt0\n\t"
"movec %%d0,%%dtt1\n\t"
".chip 68k\n\t"
"jmp %0@"
: /* no outputs */
: "a" (reset_addr)
: "d0");
}
else
__asm__ __volatile__
("pmove %0@,%/tc\n\t"
"jmp %1@"
: /* no outputs */
: "a" (&tc_val), "a" (reset_addr));
}
static void atari_get_model(char *model)
{
strcpy(model, "Atari ");
switch (atari_mch_cookie >> 16) {
case ATARI_MCH_ST:
if (ATARIHW_PRESENT(MSTE_CLK))
strcat (model, "Mega ST");
else
strcat (model, "ST");
break;
case ATARI_MCH_STE:
if (MACH_IS_MSTE)
strcat (model, "Mega STE");
else
strcat (model, "STE");
break;
case ATARI_MCH_TT:
if (MACH_IS_MEDUSA)
/* Medusa has TT _MCH cookie */
strcat (model, "Medusa");
else if (MACH_IS_HADES)
strcat(model, "Hades");
else
strcat (model, "TT");
break;
case ATARI_MCH_FALCON:
strcat (model, "Falcon");
if (MACH_IS_AB40)
strcat (model, " (with Afterburner040)");
break;
default:
sprintf (model + strlen (model), "(unknown mach cookie 0x%lx)",
atari_mch_cookie);
break;
}
}
static int atari_get_hardware_list(char *buffer)
{
int len = 0, i;
for (i = 0; i < m68k_num_memory; i++)
len += sprintf (buffer+len, "\t%3ld MB at 0x%08lx (%s)\n",
m68k_memory[i].size >> 20, m68k_memory[i].addr,
(m68k_memory[i].addr & 0xff000000 ?
"alternate RAM" : "ST-RAM"));
#define ATARIHW_ANNOUNCE(name,str) \
if (ATARIHW_PRESENT(name)) \
len += sprintf (buffer + len, "\t%s\n", str)
len += sprintf (buffer + len, "Detected hardware:\n");
ATARIHW_ANNOUNCE(STND_SHIFTER, "ST Shifter");
ATARIHW_ANNOUNCE(EXTD_SHIFTER, "STe Shifter");
ATARIHW_ANNOUNCE(TT_SHIFTER, "TT Shifter");
ATARIHW_ANNOUNCE(VIDEL_SHIFTER, "Falcon Shifter");
ATARIHW_ANNOUNCE(YM_2149, "Programmable Sound Generator");
ATARIHW_ANNOUNCE(PCM_8BIT, "PCM 8 Bit Sound");
ATARIHW_ANNOUNCE(CODEC, "CODEC Sound");
ATARIHW_ANNOUNCE(TT_SCSI, "SCSI Controller NCR5380 (TT style)");
ATARIHW_ANNOUNCE(ST_SCSI, "SCSI Controller NCR5380 (Falcon style)");
ATARIHW_ANNOUNCE(ACSI, "ACSI Interface");
ATARIHW_ANNOUNCE(IDE, "IDE Interface");
ATARIHW_ANNOUNCE(FDCSPEED, "8/16 Mhz Switch for FDC");
ATARIHW_ANNOUNCE(ST_MFP, "Multi Function Peripheral MFP 68901");
ATARIHW_ANNOUNCE(TT_MFP, "Second Multi Function Peripheral MFP 68901");
ATARIHW_ANNOUNCE(SCC, "Serial Communications Controller SCC 8530");
ATARIHW_ANNOUNCE(ST_ESCC, "Extended Serial Communications Controller SCC 85230");
ATARIHW_ANNOUNCE(ANALOG_JOY, "Paddle Interface");
ATARIHW_ANNOUNCE(MICROWIRE, "MICROWIRE(tm) Interface");
ATARIHW_ANNOUNCE(STND_DMA, "DMA Controller (24 bit)");
ATARIHW_ANNOUNCE(EXTD_DMA, "DMA Controller (32 bit)");
ATARIHW_ANNOUNCE(SCSI_DMA, "DMA Controller for NCR5380");
ATARIHW_ANNOUNCE(SCC_DMA, "DMA Controller for SCC");
ATARIHW_ANNOUNCE(TT_CLK, "Clock Chip MC146818A");
ATARIHW_ANNOUNCE(MSTE_CLK, "Clock Chip RP5C15");
ATARIHW_ANNOUNCE(SCU, "System Control Unit");
ATARIHW_ANNOUNCE(BLITTER, "Blitter");
ATARIHW_ANNOUNCE(VME, "VME Bus");
ATARIHW_ANNOUNCE(DSP56K, "DSP56001 processor");
return(len);
}
/*
* Local variables:
* c-indent-level: 4
* tab-width: 8
* End:
*/
| gpl-2.0 |
stevezuo/ak98_kernel | arch/arm/mach-mx2/eukrea_mbimx27-baseboard.c | 545 | 5878 | /*
* Copyright (C) 2009 Eric Benard - eric@eukrea.com
*
* Based on pcm970-baseboard.c which is :
* Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <asm/mach/arch.h>
#include <mach/common.h>
#include <mach/iomux.h>
#include <mach/imxfb.h>
#include <mach/hardware.h>
#include <mach/mmc.h>
#include <mach/imx-uart.h>
#include "devices.h"
static int eukrea_mbimx27_pins[] = {
/* UART2 */
PE3_PF_UART2_CTS,
PE4_PF_UART2_RTS,
PE6_PF_UART2_TXD,
PE7_PF_UART2_RXD,
/* UART3 */
PE8_PF_UART3_TXD,
PE9_PF_UART3_RXD,
PE10_PF_UART3_CTS,
PE11_PF_UART3_RTS,
/* UART4 */
PB26_AF_UART4_RTS,
PB28_AF_UART4_TXD,
PB29_AF_UART4_CTS,
PB31_AF_UART4_RXD,
/* SDHC1*/
PE18_PF_SD1_D0,
PE19_PF_SD1_D1,
PE20_PF_SD1_D2,
PE21_PF_SD1_D3,
PE22_PF_SD1_CMD,
PE23_PF_SD1_CLK,
/* display */
PA5_PF_LSCLK,
PA6_PF_LD0,
PA7_PF_LD1,
PA8_PF_LD2,
PA9_PF_LD3,
PA10_PF_LD4,
PA11_PF_LD5,
PA12_PF_LD6,
PA13_PF_LD7,
PA14_PF_LD8,
PA15_PF_LD9,
PA16_PF_LD10,
PA17_PF_LD11,
PA18_PF_LD12,
PA19_PF_LD13,
PA20_PF_LD14,
PA21_PF_LD15,
PA22_PF_LD16,
PA23_PF_LD17,
PA28_PF_HSYNC,
PA29_PF_VSYNC,
PA30_PF_CONTRAST,
PA31_PF_OE_ACD,
/* SPI1 */
PD28_PF_CSPI1_SS0,
PD29_PF_CSPI1_SCLK,
PD30_PF_CSPI1_MISO,
PD31_PF_CSPI1_MOSI,
};
static struct gpio_led gpio_leds[] = {
{
.name = "led1",
.default_trigger = "heartbeat",
.active_low = 1,
.gpio = GPIO_PORTF | 16,
},
{
.name = "led2",
.default_trigger = "none",
.active_low = 1,
.gpio = GPIO_PORTF | 19,
},
{
.name = "backlight",
.default_trigger = "backlight",
.active_low = 0,
.gpio = GPIO_PORTE | 5,
},
};
static struct gpio_led_platform_data gpio_led_info = {
.leds = gpio_leds,
.num_leds = ARRAY_SIZE(gpio_leds),
};
static struct platform_device leds_gpio = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &gpio_led_info,
},
};
static struct imx_fb_videomode eukrea_mbimx27_modes[] = {
{
.mode = {
.name = "CMO-QGVA",
.refresh = 60,
.xres = 320,
.yres = 240,
.pixclock = 156000,
.hsync_len = 30,
.left_margin = 38,
.right_margin = 20,
.vsync_len = 3,
.upper_margin = 15,
.lower_margin = 4,
},
.pcr = 0xFAD08B80,
.bpp = 16,
},
};
static struct imx_fb_platform_data eukrea_mbimx27_fb_data = {
.mode = eukrea_mbimx27_modes,
.num_modes = ARRAY_SIZE(eukrea_mbimx27_modes),
.pwmr = 0x00A903FF,
.lscr1 = 0x00120300,
.dmacr = 0x00040060,
};
static struct imxuart_platform_data uart_pdata[] = {
{
.flags = IMXUART_HAVE_RTSCTS,
},
{
.flags = IMXUART_HAVE_RTSCTS,
},
};
#if defined(CONFIG_TOUCHSCREEN_ADS7846)
|| defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
#define ADS7846_PENDOWN (GPIO_PORTD | 25)
static void ads7846_dev_init(void)
{
if (gpio_request(ADS7846_PENDOWN, "ADS7846 pendown") < 0) {
printk(KERN_ERR "can't get ads746 pen down GPIO\n");
return;
}
gpio_direction_input(ADS7846_PENDOWN);
}
static int ads7846_get_pendown_state(void)
{
return !gpio_get_value(ADS7846_PENDOWN);
}
static struct ads7846_platform_data ads7846_config __initdata = {
.get_pendown_state = ads7846_get_pendown_state,
.keep_vref_on = 1,
};
static struct spi_board_info eukrea_mbimx27_spi_board_info[] __initdata = {
[0] = {
.modalias = "ads7846",
.bus_num = 0,
.chip_select = 0,
.max_speed_hz = 1500000,
.irq = IRQ_GPIOD(25),
.platform_data = &ads7846_config,
.mode = SPI_MODE_2,
},
};
static int eukrea_mbimx27_spi_cs[] = {GPIO_PORTD | 28};
static struct spi_imx_master eukrea_mbimx27_spi_0_data = {
.chipselect = eukrea_mbimx27_spi_cs,
.num_chipselect = ARRAY_SIZE(eukrea_mbimx27_spi_cs),
};
#endif
static struct platform_device *platform_devices[] __initdata = {
&leds_gpio,
};
/*
* system init for baseboard usage. Will be called by cpuimx27 init.
*
* Add platform devices present on this baseboard and init
* them from CPU side as far as required to use them later on
*/
void __init eukrea_mbimx27_baseboard_init(void)
{
mxc_gpio_setup_multiple_pins(eukrea_mbimx27_pins,
ARRAY_SIZE(eukrea_mbimx27_pins), "MBIMX27");
mxc_register_device(&mxc_uart_device1, &uart_pdata[0]);
mxc_register_device(&mxc_uart_device2, &uart_pdata[1]);
mxc_register_device(&mxc_fb_device, &eukrea_mbimx27_fb_data);
mxc_register_device(&mxc_sdhc_device0, NULL);
#if defined(CONFIG_TOUCHSCREEN_ADS7846)
|| defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
/* SPI and ADS7846 Touchscreen controler init */
mxc_gpio_mode(GPIO_PORTD | 28 | GPIO_GPIO | GPIO_OUT);
mxc_gpio_mode(GPIO_PORTD | 25 | GPIO_GPIO | GPIO_IN);
mxc_register_device(&mxc_spi_device0, &eukrea_mbimx27_spi_0_data);
spi_register_board_info(eukrea_mbimx27_spi_board_info,
ARRAY_SIZE(eukrea_mbimx27_spi_board_info));
ads7846_dev_init();
#endif
/* Leds configuration */
mxc_gpio_mode(GPIO_PORTF | 16 | GPIO_GPIO | GPIO_OUT);
mxc_gpio_mode(GPIO_PORTF | 19 | GPIO_GPIO | GPIO_OUT);
/* Backlight */
mxc_gpio_mode(GPIO_PORTE | 5 | GPIO_GPIO | GPIO_OUT);
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
}
| gpl-2.0 |
notro/linux-staging | drivers/media/pci/bt8xx/dst_ca.c | 545 | 21411 | /*
CA-driver for TwinHan DST Frontend/Card
Copyright (C) 2004, 2005 Manu Abraham (manu@kromtek.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/dvb/ca.h>
#include "dvbdev.h"
#include "dvb_frontend.h"
#include "dst_ca.h"
#include "dst_common.h"
#define DST_CA_ERROR 0
#define DST_CA_NOTICE 1
#define DST_CA_INFO 2
#define DST_CA_DEBUG 3
#define dprintk(x, y, z, format, arg...) do { \
if (z) { \
if ((x > DST_CA_ERROR) && (x > y)) \
printk(KERN_ERR "%s: " format "\n", __func__ , ##arg); \
else if ((x > DST_CA_NOTICE) && (x > y)) \
printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg); \
else if ((x > DST_CA_INFO) && (x > y)) \
printk(KERN_INFO "%s: " format "\n", __func__ , ##arg); \
else if ((x > DST_CA_DEBUG) && (x > y)) \
printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg); \
} else { \
if (x > y) \
printk(format, ## arg); \
} \
} while(0)
static DEFINE_MUTEX(dst_ca_mutex);
static unsigned int verbose = 5;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)");
/* Need some more work */
static int ca_set_slot_descr(void)
{
/* We could make this more graceful ? */
return -EOPNOTSUPP;
}
/* Need some more work */
static int ca_set_pid(void)
{
/* We could make this more graceful ? */
return -EOPNOTSUPP;
}
static void put_command_and_length(u8 *data, int command, int length)
{
data[0] = (command >> 16) & 0xff;
data[1] = (command >> 8) & 0xff;
data[2] = command & 0xff;
data[3] = length;
}
static void put_checksum(u8 *check_string, int length)
{
dprintk(verbose, DST_CA_DEBUG, 1, " Computing string checksum.");
dprintk(verbose, DST_CA_DEBUG, 1, " -> string length : 0x%02x", length);
check_string[length] = dst_check_sum (check_string, length);
dprintk(verbose, DST_CA_DEBUG, 1, " -> checksum : 0x%02x", check_string[length]);
}
static int dst_ci_command(struct dst_state* state, u8 * data, u8 *ca_string, u8 len, int read)
{
u8 reply;
mutex_lock(&state->dst_mutex);
dst_comm_init(state);
msleep(65);
if (write_dst(state, data, len)) {
dprintk(verbose, DST_CA_INFO, 1, " Write not successful, trying to recover");
dst_error_recovery(state);
goto error;
}
if ((dst_pio_disable(state)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " DST PIO disable failed.");
goto error;
}
if (read_dst(state, &reply, GET_ACK) < 0) {
dprintk(verbose, DST_CA_INFO, 1, " Read not successful, trying to recover");
dst_error_recovery(state);
goto error;
}
if (read) {
if (! dst_wait_dst_ready(state, LONG_DELAY)) {
dprintk(verbose, DST_CA_NOTICE, 1, " 8820 not ready");
goto error;
}
if (read_dst(state, ca_string, 128) < 0) { /* Try to make this dynamic */
dprintk(verbose, DST_CA_INFO, 1, " Read not successful, trying to recover");
dst_error_recovery(state);
goto error;
}
}
mutex_unlock(&state->dst_mutex);
return 0;
error:
mutex_unlock(&state->dst_mutex);
return -EIO;
}
static int dst_put_ci(struct dst_state *state, u8 *data, int len, u8 *ca_string, int read)
{
u8 dst_ca_comm_err = 0;
while (dst_ca_comm_err < RETRIES) {
dprintk(verbose, DST_CA_NOTICE, 1, " Put Command");
if (dst_ci_command(state, data, ca_string, len, read)) { // If error
dst_error_recovery(state);
dst_ca_comm_err++; // work required here.
} else {
break;
}
}
if(dst_ca_comm_err == RETRIES)
return -1;
return 0;
}
static int ca_get_app_info(struct dst_state *state)
{
int length, str_length;
static u8 command[8] = {0x07, 0x40, 0x01, 0x00, 0x01, 0x00, 0x00, 0xff};
put_checksum(&command[0], command[0]);
if ((dst_put_ci(state, command, sizeof(command), state->messages, GET_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !");
return -1;
}
dprintk(verbose, DST_CA_INFO, 1, " -->dst_put_ci SUCCESS !");
dprintk(verbose, DST_CA_INFO, 1, " ================================ CI Module Application Info ======================================");
dprintk(verbose, DST_CA_INFO, 1, " Application Type=[%d], Application Vendor=[%d], Vendor Code=[%d]\n%s: Application info=[%s]",
state->messages[7], (state->messages[8] << 8) | state->messages[9],
(state->messages[10] << 8) | state->messages[11], __func__, (char *)(&state->messages[12]));
dprintk(verbose, DST_CA_INFO, 1, " ==================================================================================================");
// Transform dst message to correct application_info message
length = state->messages[5];
str_length = length - 6;
if (str_length < 0) {
str_length = 0;
dprintk(verbose, DST_CA_ERROR, 1, "Invalid string length returned in ca_get_app_info(). Recovering.");
}
// First, the command and length fields
put_command_and_length(&state->messages[0], CA_APP_INFO, length);
// Copy application_type, application_manufacturer and manufacturer_code
memmove(&state->messages[4], &state->messages[7], 5);
// Set string length and copy string
state->messages[9] = str_length;
memmove(&state->messages[10], &state->messages[12], str_length);
return 0;
}
static int ca_get_ca_info(struct dst_state *state)
{
int srcPtr, dstPtr, i, num_ids;
static u8 slot_command[8] = {0x07, 0x40, 0x00, 0x00, 0x02, 0x00, 0x00, 0xff};
const int in_system_id_pos = 8, out_system_id_pos = 4, in_num_ids_pos = 7;
put_checksum(&slot_command[0], slot_command[0]);
if ((dst_put_ci(state, slot_command, sizeof (slot_command), state->messages, GET_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !");
return -1;
}
dprintk(verbose, DST_CA_INFO, 1, " -->dst_put_ci SUCCESS !");
// Print raw data
dprintk(verbose, DST_CA_INFO, 0, " DST data = [");
for (i = 0; i < state->messages[0] + 1; i++) {
dprintk(verbose, DST_CA_INFO, 0, " 0x%02x", state->messages[i]);
}
dprintk(verbose, DST_CA_INFO, 0, "]\n");
// Set the command and length of the output
num_ids = state->messages[in_num_ids_pos];
if (num_ids >= 100) {
num_ids = 100;
dprintk(verbose, DST_CA_ERROR, 1, "Invalid number of ids (>100). Recovering.");
}
put_command_and_length(&state->messages[0], CA_INFO, num_ids * 2);
dprintk(verbose, DST_CA_INFO, 0, " CA_INFO = [");
srcPtr = in_system_id_pos;
dstPtr = out_system_id_pos;
for(i = 0; i < num_ids; i++) {
dprintk(verbose, DST_CA_INFO, 0, " 0x%02x%02x", state->messages[srcPtr + 0], state->messages[srcPtr + 1]);
// Append to output
state->messages[dstPtr + 0] = state->messages[srcPtr + 0];
state->messages[dstPtr + 1] = state->messages[srcPtr + 1];
srcPtr += 2;
dstPtr += 2;
}
dprintk(verbose, DST_CA_INFO, 0, "]\n");
return 0;
}
static int ca_get_slot_caps(struct dst_state *state, struct ca_caps *p_ca_caps, void __user *arg)
{
int i;
u8 slot_cap[256];
static u8 slot_command[8] = {0x07, 0x40, 0x02, 0x00, 0x02, 0x00, 0x00, 0xff};
put_checksum(&slot_command[0], slot_command[0]);
if ((dst_put_ci(state, slot_command, sizeof (slot_command), slot_cap, GET_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !");
return -1;
}
dprintk(verbose, DST_CA_NOTICE, 1, " -->dst_put_ci SUCCESS !");
/* Will implement the rest soon */
dprintk(verbose, DST_CA_INFO, 1, " Slot cap = [%d]", slot_cap[7]);
dprintk(verbose, DST_CA_INFO, 0, "===================================\n");
for (i = 0; i < slot_cap[0] + 1; i++)
dprintk(verbose, DST_CA_INFO, 0, " %d", slot_cap[i]);
dprintk(verbose, DST_CA_INFO, 0, "\n");
p_ca_caps->slot_num = 1;
p_ca_caps->slot_type = 1;
p_ca_caps->descr_num = slot_cap[7];
p_ca_caps->descr_type = 1;
if (copy_to_user(arg, p_ca_caps, sizeof (struct ca_caps)))
return -EFAULT;
return 0;
}
/* Need some more work */
static int ca_get_slot_descr(struct dst_state *state, struct ca_msg *p_ca_message, void __user *arg)
{
return -EOPNOTSUPP;
}
static int ca_get_slot_info(struct dst_state *state, struct ca_slot_info *p_ca_slot_info, void __user *arg)
{
int i;
static u8 slot_command[8] = {0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff};
u8 *slot_info = state->messages;
put_checksum(&slot_command[0], 7);
if ((dst_put_ci(state, slot_command, sizeof (slot_command), slot_info, GET_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !");
return -1;
}
dprintk(verbose, DST_CA_INFO, 1, " -->dst_put_ci SUCCESS !");
/* Will implement the rest soon */
dprintk(verbose, DST_CA_INFO, 1, " Slot info = [%d]", slot_info[3]);
dprintk(verbose, DST_CA_INFO, 0, "===================================\n");
for (i = 0; i < 8; i++)
dprintk(verbose, DST_CA_INFO, 0, " %d", slot_info[i]);
dprintk(verbose, DST_CA_INFO, 0, "\n");
if (slot_info[4] & 0x80) {
p_ca_slot_info->flags = CA_CI_MODULE_PRESENT;
p_ca_slot_info->num = 1;
p_ca_slot_info->type = CA_CI;
} else if (slot_info[4] & 0x40) {
p_ca_slot_info->flags = CA_CI_MODULE_READY;
p_ca_slot_info->num = 1;
p_ca_slot_info->type = CA_CI;
} else
p_ca_slot_info->flags = 0;
if (copy_to_user(arg, p_ca_slot_info, sizeof (struct ca_slot_info)))
return -EFAULT;
return 0;
}
static int ca_get_message(struct dst_state *state, struct ca_msg *p_ca_message, void __user *arg)
{
u8 i = 0;
u32 command = 0;
if (copy_from_user(p_ca_message, arg, sizeof (struct ca_msg)))
return -EFAULT;
dprintk(verbose, DST_CA_NOTICE, 1, " Message = [%*ph]",
3, p_ca_message->msg);
for (i = 0; i < 3; i++) {
command = command | p_ca_message->msg[i];
if (i < 2)
command = command << 8;
}
dprintk(verbose, DST_CA_NOTICE, 1, " Command=[0x%x]", command);
switch (command) {
case CA_APP_INFO:
memcpy(p_ca_message->msg, state->messages, 128);
if (copy_to_user(arg, p_ca_message, sizeof (struct ca_msg)) )
return -EFAULT;
break;
case CA_INFO:
memcpy(p_ca_message->msg, state->messages, 128);
if (copy_to_user(arg, p_ca_message, sizeof (struct ca_msg)) )
return -EFAULT;
break;
}
return 0;
}
static int handle_dst_tag(struct dst_state *state, struct ca_msg *p_ca_message, struct ca_msg *hw_buffer, u32 length)
{
if (state->dst_hw_cap & DST_TYPE_HAS_SESSION) {
hw_buffer->msg[2] = p_ca_message->msg[1]; /* MSB */
hw_buffer->msg[3] = p_ca_message->msg[2]; /* LSB */
} else {
if (length > 247) {
dprintk(verbose, DST_CA_ERROR, 1, " Message too long ! *** Bailing Out *** !");
return -1;
}
hw_buffer->msg[0] = (length & 0xff) + 7;
hw_buffer->msg[1] = 0x40;
hw_buffer->msg[2] = 0x03;
hw_buffer->msg[3] = 0x00;
hw_buffer->msg[4] = 0x03;
hw_buffer->msg[5] = length & 0xff;
hw_buffer->msg[6] = 0x00;
/*
* Need to compute length for EN50221 section 8.3.2, for the time being
* assuming 8.3.2 is not applicable
*/
memcpy(&hw_buffer->msg[7], &p_ca_message->msg[4], length);
}
return 0;
}
static int write_to_8820(struct dst_state *state, struct ca_msg *hw_buffer, u8 length, u8 reply)
{
if ((dst_put_ci(state, hw_buffer->msg, length, hw_buffer->msg, reply)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " DST-CI Command failed.");
dprintk(verbose, DST_CA_NOTICE, 1, " Resetting DST.");
rdc_reset_state(state);
return -1;
}
dprintk(verbose, DST_CA_NOTICE, 1, " DST-CI Command success.");
return 0;
}
static u32 asn_1_decode(u8 *asn_1_array)
{
u8 length_field = 0, word_count = 0, count = 0;
u32 length = 0;
length_field = asn_1_array[0];
dprintk(verbose, DST_CA_DEBUG, 1, " Length field=[%02x]", length_field);
if (length_field < 0x80) {
length = length_field & 0x7f;
dprintk(verbose, DST_CA_DEBUG, 1, " Length=[%02x]\n", length);
} else {
word_count = length_field & 0x7f;
for (count = 0; count < word_count; count++) {
length = length << 8;
length += asn_1_array[count + 1];
dprintk(verbose, DST_CA_DEBUG, 1, " Length=[%04x]", length);
}
}
return length;
}
static int debug_string(u8 *msg, u32 length, u32 offset)
{
u32 i;
dprintk(verbose, DST_CA_DEBUG, 0, " String=[ ");
for (i = offset; i < length; i++)
dprintk(verbose, DST_CA_DEBUG, 0, "%02x ", msg[i]);
dprintk(verbose, DST_CA_DEBUG, 0, "]\n");
return 0;
}
static int ca_set_pmt(struct dst_state *state, struct ca_msg *p_ca_message, struct ca_msg *hw_buffer, u8 reply, u8 query)
{
u32 length = 0;
u8 tag_length = 8;
length = asn_1_decode(&p_ca_message->msg[3]);
dprintk(verbose, DST_CA_DEBUG, 1, " CA Message length=[%d]", length);
debug_string(&p_ca_message->msg[4], length, 0); /* length is excluding tag & length */
memset(hw_buffer->msg, '\0', length);
handle_dst_tag(state, p_ca_message, hw_buffer, length);
put_checksum(hw_buffer->msg, hw_buffer->msg[0]);
debug_string(hw_buffer->msg, (length + tag_length), 0); /* tags too */
write_to_8820(state, hw_buffer, (length + tag_length), reply);
return 0;
}
/* Board supports CA PMT reply ? */
static int dst_check_ca_pmt(struct dst_state *state, struct ca_msg *p_ca_message, struct ca_msg *hw_buffer)
{
int ca_pmt_reply_test = 0;
/* Do test board */
/* Not there yet but soon */
/* CA PMT Reply capable */
if (ca_pmt_reply_test) {
if ((ca_set_pmt(state, p_ca_message, hw_buffer, 1, GET_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " ca_set_pmt.. failed !");
return -1;
}
/* Process CA PMT Reply */
/* will implement soon */
dprintk(verbose, DST_CA_ERROR, 1, " Not there yet");
}
/* CA PMT Reply not capable */
if (!ca_pmt_reply_test) {
if ((ca_set_pmt(state, p_ca_message, hw_buffer, 0, NO_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " ca_set_pmt.. failed !");
return -1;
}
dprintk(verbose, DST_CA_NOTICE, 1, " ca_set_pmt.. success !");
/* put a dummy message */
}
return 0;
}
static int ca_send_message(struct dst_state *state, struct ca_msg *p_ca_message, void __user *arg)
{
int i = 0;
u32 command = 0;
struct ca_msg *hw_buffer;
int result = 0;
if ((hw_buffer = kmalloc(sizeof (struct ca_msg), GFP_KERNEL)) == NULL) {
dprintk(verbose, DST_CA_ERROR, 1, " Memory allocation failure");
return -ENOMEM;
}
dprintk(verbose, DST_CA_DEBUG, 1, " ");
if (copy_from_user(p_ca_message, arg, sizeof (struct ca_msg))) {
result = -EFAULT;
goto free_mem_and_exit;
}
/* EN50221 tag */
command = 0;
for (i = 0; i < 3; i++) {
command = command | p_ca_message->msg[i];
if (i < 2)
command = command << 8;
}
dprintk(verbose, DST_CA_DEBUG, 1, " Command=[0x%x]\n", command);
switch (command) {
case CA_PMT:
dprintk(verbose, DST_CA_DEBUG, 1, "Command = SEND_CA_PMT");
if ((ca_set_pmt(state, p_ca_message, hw_buffer, 0, 0)) < 0) { // code simplification started
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_PMT Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_PMT Success !");
break;
case CA_PMT_REPLY:
dprintk(verbose, DST_CA_INFO, 1, "Command = CA_PMT_REPLY");
/* Have to handle the 2 basic types of cards here */
if ((dst_check_ca_pmt(state, p_ca_message, hw_buffer)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_PMT_REPLY Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_PMT_REPLY Success !");
break;
case CA_APP_INFO_ENQUIRY: // only for debugging
dprintk(verbose, DST_CA_INFO, 1, " Getting Cam Application information");
if ((ca_get_app_info(state)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_APP_INFO_ENQUIRY Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_APP_INFO_ENQUIRY Success !");
break;
case CA_INFO_ENQUIRY:
dprintk(verbose, DST_CA_INFO, 1, " Getting CA Information");
if ((ca_get_ca_info(state)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_INFO_ENQUIRY Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_INFO_ENQUIRY Success !");
break;
}
free_mem_and_exit:
kfree (hw_buffer);
return result;
}
static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioctl_arg)
{
struct dvb_device *dvbdev;
struct dst_state *state;
struct ca_slot_info *p_ca_slot_info;
struct ca_caps *p_ca_caps;
struct ca_msg *p_ca_message;
void __user *arg = (void __user *)ioctl_arg;
int result = 0;
mutex_lock(&dst_ca_mutex);
dvbdev = file->private_data;
state = (struct dst_state *)dvbdev->priv;
p_ca_message = kmalloc(sizeof (struct ca_msg), GFP_KERNEL);
p_ca_slot_info = kmalloc(sizeof (struct ca_slot_info), GFP_KERNEL);
p_ca_caps = kmalloc(sizeof (struct ca_caps), GFP_KERNEL);
if (!p_ca_message || !p_ca_slot_info || !p_ca_caps) {
dprintk(verbose, DST_CA_ERROR, 1, " Memory allocation failure");
result = -ENOMEM;
goto free_mem_and_exit;
}
/* We have now only the standard ioctl's, the driver is upposed to handle internals. */
switch (cmd) {
case CA_SEND_MSG:
dprintk(verbose, DST_CA_INFO, 1, " Sending message");
if ((ca_send_message(state, p_ca_message, arg)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_SEND_MSG Failed !");
result = -1;
goto free_mem_and_exit;
}
break;
case CA_GET_MSG:
dprintk(verbose, DST_CA_INFO, 1, " Getting message");
if ((ca_get_message(state, p_ca_message, arg)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_MSG Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_MSG Success !");
break;
case CA_RESET:
dprintk(verbose, DST_CA_ERROR, 1, " Resetting DST");
dst_error_bailout(state);
msleep(4000);
break;
case CA_GET_SLOT_INFO:
dprintk(verbose, DST_CA_INFO, 1, " Getting Slot info");
if ((ca_get_slot_info(state, p_ca_slot_info, arg)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_SLOT_INFO Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_SLOT_INFO Success !");
break;
case CA_GET_CAP:
dprintk(verbose, DST_CA_INFO, 1, " Getting Slot capabilities");
if ((ca_get_slot_caps(state, p_ca_caps, arg)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_CAP Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_CAP Success !");
break;
case CA_GET_DESCR_INFO:
dprintk(verbose, DST_CA_INFO, 1, " Getting descrambler description");
if ((ca_get_slot_descr(state, p_ca_message, arg)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_DESCR_INFO Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_DESCR_INFO Success !");
break;
case CA_SET_DESCR:
dprintk(verbose, DST_CA_INFO, 1, " Setting descrambler");
if ((ca_set_slot_descr()) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_SET_DESCR Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_SET_DESCR Success !");
break;
case CA_SET_PID:
dprintk(verbose, DST_CA_INFO, 1, " Setting PID");
if ((ca_set_pid()) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_SET_PID Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_SET_PID Success !");
default:
result = -EOPNOTSUPP;
}
free_mem_and_exit:
kfree (p_ca_message);
kfree (p_ca_slot_info);
kfree (p_ca_caps);
mutex_unlock(&dst_ca_mutex);
return result;
}
static int dst_ca_open(struct inode *inode, struct file *file)
{
dprintk(verbose, DST_CA_DEBUG, 1, " Device opened [%p] ", file);
try_module_get(THIS_MODULE);
return 0;
}
static int dst_ca_release(struct inode *inode, struct file *file)
{
dprintk(verbose, DST_CA_DEBUG, 1, " Device closed.");
module_put(THIS_MODULE);
return 0;
}
static ssize_t dst_ca_read(struct file *file, char __user *buffer, size_t length, loff_t *offset)
{
dprintk(verbose, DST_CA_DEBUG, 1, " Device read.");
return 0;
}
static ssize_t dst_ca_write(struct file *file, const char __user *buffer, size_t length, loff_t *offset)
{
dprintk(verbose, DST_CA_DEBUG, 1, " Device write.");
return 0;
}
static const struct file_operations dst_ca_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = dst_ca_ioctl,
.open = dst_ca_open,
.release = dst_ca_release,
.read = dst_ca_read,
.write = dst_ca_write,
.llseek = noop_llseek,
};
static struct dvb_device dvbdev_ca = {
.priv = NULL,
.users = 1,
.readers = 1,
.writers = 1,
.fops = &dst_ca_fops
};
struct dvb_device *dst_ca_attach(struct dst_state *dst, struct dvb_adapter *dvb_adapter)
{
struct dvb_device *dvbdev;
dprintk(verbose, DST_CA_ERROR, 1, "registering DST-CA device");
if (dvb_register_device(dvb_adapter, &dvbdev, &dvbdev_ca, dst, DVB_DEVICE_CA) == 0) {
dst->dst_ca = dvbdev;
return dst->dst_ca;
}
return NULL;
}
EXPORT_SYMBOL(dst_ca_attach);
MODULE_DESCRIPTION("DST DVB-S/T/C Combo CA driver");
MODULE_AUTHOR("Manu Abraham");
MODULE_LICENSE("GPL");
| gpl-2.0 |
jimbojr/linux | drivers/pwm/pwm-pxa.c | 1057 | 5586 | /*
* drivers/pwm/pwm-pxa.c
*
* simple driver for PWM (Pulse Width Modulator) controller
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* 2008-02-13 initial version
* eric miao <eric.miao@marvell.com>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/pwm.h>
#include <linux/of_device.h>
#include <asm/div64.h>
#define HAS_SECONDARY_PWM 0x10
static const struct platform_device_id pwm_id_table[] = {
/* PWM has_secondary_pwm? */
{ "pxa25x-pwm", 0 },
{ "pxa27x-pwm", HAS_SECONDARY_PWM },
{ "pxa168-pwm", 0 },
{ "pxa910-pwm", 0 },
{ },
};
MODULE_DEVICE_TABLE(platform, pwm_id_table);
/* PWM registers and bits definitions */
#define PWMCR (0x00)
#define PWMDCR (0x04)
#define PWMPCR (0x08)
#define PWMCR_SD (1 << 6)
#define PWMDCR_FD (1 << 10)
struct pxa_pwm_chip {
struct pwm_chip chip;
struct device *dev;
struct clk *clk;
void __iomem *mmio_base;
};
static inline struct pxa_pwm_chip *to_pxa_pwm_chip(struct pwm_chip *chip)
{
return container_of(chip, struct pxa_pwm_chip, chip);
}
/*
* period_ns = 10^9 * (PRESCALE + 1) * (PV + 1) / PWM_CLK_RATE
* duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
*/
static int pxa_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct pxa_pwm_chip *pc = to_pxa_pwm_chip(chip);
unsigned long long c;
unsigned long period_cycles, prescale, pv, dc;
unsigned long offset;
int rc;
offset = pwm->hwpwm ? 0x10 : 0;
c = clk_get_rate(pc->clk);
c = c * period_ns;
do_div(c, 1000000000);
period_cycles = c;
if (period_cycles < 1)
period_cycles = 1;
prescale = (period_cycles - 1) / 1024;
pv = period_cycles / (prescale + 1) - 1;
if (prescale > 63)
return -EINVAL;
if (duty_ns == period_ns)
dc = PWMDCR_FD;
else
dc = (pv + 1) * duty_ns / period_ns;
/* NOTE: the clock to PWM has to be enabled first
* before writing to the registers
*/
rc = clk_prepare_enable(pc->clk);
if (rc < 0)
return rc;
writel(prescale, pc->mmio_base + offset + PWMCR);
writel(dc, pc->mmio_base + offset + PWMDCR);
writel(pv, pc->mmio_base + offset + PWMPCR);
clk_disable_unprepare(pc->clk);
return 0;
}
static int pxa_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pxa_pwm_chip *pc = to_pxa_pwm_chip(chip);
return clk_prepare_enable(pc->clk);
}
static void pxa_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pxa_pwm_chip *pc = to_pxa_pwm_chip(chip);
clk_disable_unprepare(pc->clk);
}
static struct pwm_ops pxa_pwm_ops = {
.config = pxa_pwm_config,
.enable = pxa_pwm_enable,
.disable = pxa_pwm_disable,
.owner = THIS_MODULE,
};
#ifdef CONFIG_OF
/*
* Device tree users must create one device instance for each PWM channel.
* Hence we dispense with the HAS_SECONDARY_PWM and "tell" the original driver
* code that this is a single channel pxa25x-pwm. Currently all devices are
* supported identically.
*/
static const struct of_device_id pwm_of_match[] = {
{ .compatible = "marvell,pxa250-pwm", .data = &pwm_id_table[0]},
{ .compatible = "marvell,pxa270-pwm", .data = &pwm_id_table[0]},
{ .compatible = "marvell,pxa168-pwm", .data = &pwm_id_table[0]},
{ .compatible = "marvell,pxa910-pwm", .data = &pwm_id_table[0]},
{ }
};
MODULE_DEVICE_TABLE(of, pwm_of_match);
#else
#define pwm_of_match NULL
#endif
static const struct platform_device_id *pxa_pwm_get_id_dt(struct device *dev)
{
const struct of_device_id *id = of_match_device(pwm_of_match, dev);
return id ? id->data : NULL;
}
static struct pwm_device *
pxa_pwm_of_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
{
struct pwm_device *pwm;
pwm = pwm_request_from_chip(pc, 0, NULL);
if (IS_ERR(pwm))
return pwm;
pwm_set_period(pwm, args->args[0]);
return pwm;
}
static int pwm_probe(struct platform_device *pdev)
{
const struct platform_device_id *id = platform_get_device_id(pdev);
struct pxa_pwm_chip *pwm;
struct resource *r;
int ret = 0;
if (IS_ENABLED(CONFIG_OF) && id == NULL)
id = pxa_pwm_get_id_dt(&pdev->dev);
if (id == NULL)
return -EINVAL;
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
if (pwm == NULL)
return -ENOMEM;
pwm->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pwm->clk))
return PTR_ERR(pwm->clk);
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &pxa_pwm_ops;
pwm->chip.base = -1;
pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1;
if (IS_ENABLED(CONFIG_OF)) {
pwm->chip.of_xlate = pxa_pwm_of_xlate;
pwm->chip.of_pwm_n_cells = 1;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(pwm->mmio_base))
return PTR_ERR(pwm->mmio_base);
ret = pwmchip_add(&pwm->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
}
platform_set_drvdata(pdev, pwm);
return 0;
}
static int pwm_remove(struct platform_device *pdev)
{
struct pxa_pwm_chip *chip;
chip = platform_get_drvdata(pdev);
if (chip == NULL)
return -ENODEV;
return pwmchip_remove(&chip->chip);
}
static struct platform_driver pwm_driver = {
.driver = {
.name = "pxa25x-pwm",
.of_match_table = pwm_of_match,
},
.probe = pwm_probe,
.remove = pwm_remove,
.id_table = pwm_id_table,
};
module_platform_driver(pwm_driver);
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
krzk/tizen-tv-rpi-linux | drivers/hwmon/tmp421.c | 1569 | 8630 | /* tmp421.c
*
* Copyright (C) 2009 Andre Prendel <andre.prendel@gmx.de>
* Preliminary support by:
* Melvin Rook, Raymond Ng
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Driver for the Texas Instruments TMP421 SMBus temperature sensor IC.
* Supported models: TMP421, TMP422, TMP423, TMP441, TMP442
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2a, 0x4c, 0x4d, 0x4e, 0x4f,
I2C_CLIENT_END };
enum chips { tmp421, tmp422, tmp423, tmp441, tmp442 };
/* The TMP421 registers */
#define TMP421_STATUS_REG 0x08
#define TMP421_CONFIG_REG_1 0x09
#define TMP421_CONVERSION_RATE_REG 0x0B
#define TMP421_MANUFACTURER_ID_REG 0xFE
#define TMP421_DEVICE_ID_REG 0xFF
static const u8 TMP421_TEMP_MSB[4] = { 0x00, 0x01, 0x02, 0x03 };
static const u8 TMP421_TEMP_LSB[4] = { 0x10, 0x11, 0x12, 0x13 };
/* Flags */
#define TMP421_CONFIG_SHUTDOWN 0x40
#define TMP421_CONFIG_RANGE 0x04
/* Manufacturer / Device ID's */
#define TMP421_MANUFACTURER_ID 0x55
#define TMP421_DEVICE_ID 0x21
#define TMP422_DEVICE_ID 0x22
#define TMP423_DEVICE_ID 0x23
#define TMP441_DEVICE_ID 0x41
#define TMP442_DEVICE_ID 0x42
static const struct i2c_device_id tmp421_id[] = {
{ "tmp421", 2 },
{ "tmp422", 3 },
{ "tmp423", 4 },
{ "tmp441", 2 },
{ "tmp442", 3 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp421_id);
struct tmp421_data {
struct i2c_client *client;
struct mutex update_lock;
char valid;
unsigned long last_updated;
int channels;
u8 config;
s16 temp[4];
};
static int temp_from_s16(s16 reg)
{
/* Mask out status bits */
int temp = reg & ~0xf;
return (temp * 1000 + 128) / 256;
}
static int temp_from_u16(u16 reg)
{
/* Mask out status bits */
int temp = reg & ~0xf;
/* Add offset for extended temperature range. */
temp -= 64 * 256;
return (temp * 1000 + 128) / 256;
}
static struct tmp421_data *tmp421_update_device(struct device *dev)
{
struct tmp421_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
int i;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
data->config = i2c_smbus_read_byte_data(client,
TMP421_CONFIG_REG_1);
for (i = 0; i < data->channels; i++) {
data->temp[i] = i2c_smbus_read_byte_data(client,
TMP421_TEMP_MSB[i]) << 8;
data->temp[i] |= i2c_smbus_read_byte_data(client,
TMP421_TEMP_LSB[i]);
}
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
static ssize_t show_temp_value(struct device *dev,
struct device_attribute *devattr, char *buf)
{
int index = to_sensor_dev_attr(devattr)->index;
struct tmp421_data *data = tmp421_update_device(dev);
int temp;
mutex_lock(&data->update_lock);
if (data->config & TMP421_CONFIG_RANGE)
temp = temp_from_u16(data->temp[index]);
else
temp = temp_from_s16(data->temp[index]);
mutex_unlock(&data->update_lock);
return sprintf(buf, "%d\n", temp);
}
static ssize_t show_fault(struct device *dev,
struct device_attribute *devattr, char *buf)
{
int index = to_sensor_dev_attr(devattr)->index;
struct tmp421_data *data = tmp421_update_device(dev);
/*
* The OPEN bit signals a fault. This is bit 0 of the temperature
* register (low byte).
*/
if (data->temp[index] & 0x01)
return sprintf(buf, "1\n");
else
return sprintf(buf, "0\n");
}
static umode_t tmp421_is_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct tmp421_data *data = dev_get_drvdata(dev);
struct device_attribute *devattr;
unsigned int index;
devattr = container_of(a, struct device_attribute, attr);
index = to_sensor_dev_attr(devattr)->index;
if (index < data->channels)
return a->mode;
return 0;
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_value, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_value, NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_fault, NULL, 1);
static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp_value, NULL, 2);
static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_fault, NULL, 2);
static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp_value, NULL, 3);
static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_fault, NULL, 3);
static struct attribute *tmp421_attr[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_fault.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp3_fault.dev_attr.attr,
&sensor_dev_attr_temp4_input.dev_attr.attr,
&sensor_dev_attr_temp4_fault.dev_attr.attr,
NULL
};
static const struct attribute_group tmp421_group = {
.attrs = tmp421_attr,
.is_visible = tmp421_is_visible,
};
static const struct attribute_group *tmp421_groups[] = {
&tmp421_group,
NULL
};
static int tmp421_init_client(struct i2c_client *client)
{
int config, config_orig;
/* Set the conversion rate to 2 Hz */
i2c_smbus_write_byte_data(client, TMP421_CONVERSION_RATE_REG, 0x05);
/* Start conversions (disable shutdown if necessary) */
config = i2c_smbus_read_byte_data(client, TMP421_CONFIG_REG_1);
if (config < 0) {
dev_err(&client->dev,
"Could not read configuration register (%d)\n", config);
return config;
}
config_orig = config;
config &= ~TMP421_CONFIG_SHUTDOWN;
if (config != config_orig) {
dev_info(&client->dev, "Enable monitoring chip\n");
i2c_smbus_write_byte_data(client, TMP421_CONFIG_REG_1, config);
}
return 0;
}
static int tmp421_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
enum chips kind;
struct i2c_adapter *adapter = client->adapter;
const char * const names[] = { "TMP421", "TMP422", "TMP423",
"TMP441", "TMP442" };
int addr = client->addr;
u8 reg;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
reg = i2c_smbus_read_byte_data(client, TMP421_MANUFACTURER_ID_REG);
if (reg != TMP421_MANUFACTURER_ID)
return -ENODEV;
reg = i2c_smbus_read_byte_data(client, TMP421_CONVERSION_RATE_REG);
if (reg & 0xf8)
return -ENODEV;
reg = i2c_smbus_read_byte_data(client, TMP421_STATUS_REG);
if (reg & 0x7f)
return -ENODEV;
reg = i2c_smbus_read_byte_data(client, TMP421_DEVICE_ID_REG);
switch (reg) {
case TMP421_DEVICE_ID:
kind = tmp421;
break;
case TMP422_DEVICE_ID:
if (addr == 0x2a)
return -ENODEV;
kind = tmp422;
break;
case TMP423_DEVICE_ID:
if (addr != 0x4c && addr != 0x4d)
return -ENODEV;
kind = tmp423;
break;
case TMP441_DEVICE_ID:
kind = tmp441;
break;
case TMP442_DEVICE_ID:
if (addr != 0x4c && addr != 0x4d)
return -ENODEV;
kind = tmp442;
break;
default:
return -ENODEV;
}
strlcpy(info->type, tmp421_id[kind].name, I2C_NAME_SIZE);
dev_info(&adapter->dev, "Detected TI %s chip at 0x%02x\n",
names[kind], client->addr);
return 0;
}
static int tmp421_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct tmp421_data *data;
int err;
data = devm_kzalloc(dev, sizeof(struct tmp421_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
mutex_init(&data->update_lock);
data->channels = id->driver_data;
data->client = client;
err = tmp421_init_client(client);
if (err)
return err;
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, tmp421_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
static struct i2c_driver tmp421_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "tmp421",
},
.probe = tmp421_probe,
.id_table = tmp421_id,
.detect = tmp421_detect,
.address_list = normal_i2c,
};
module_i2c_driver(tmp421_driver);
MODULE_AUTHOR("Andre Prendel <andre.prendel@gmx.de>");
MODULE_DESCRIPTION("Texas Instruments TMP421/422/423/441/442 temperature sensor driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
garwynn/L900_MA7_Kernel | arch/mips/jazz/irq.c | 2337 | 4315 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 Linus Torvalds
* Copyright (C) 1994 - 2001, 2003, 07 Ralf Baechle
*/
#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <asm/irq_cpu.h>
#include <asm/i8253.h>
#include <asm/i8259.h>
#include <asm/io.h>
#include <asm/jazz.h>
#include <asm/pgtable.h>
static DEFINE_RAW_SPINLOCK(r4030_lock);
static void enable_r4030_irq(struct irq_data *d)
{
unsigned int mask = 1 << (d->irq - JAZZ_IRQ_START);
unsigned long flags;
raw_spin_lock_irqsave(&r4030_lock, flags);
mask |= r4030_read_reg16(JAZZ_IO_IRQ_ENABLE);
r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, mask);
raw_spin_unlock_irqrestore(&r4030_lock, flags);
}
void disable_r4030_irq(struct irq_data *d)
{
unsigned int mask = ~(1 << (d->irq - JAZZ_IRQ_START));
unsigned long flags;
raw_spin_lock_irqsave(&r4030_lock, flags);
mask &= r4030_read_reg16(JAZZ_IO_IRQ_ENABLE);
r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, mask);
raw_spin_unlock_irqrestore(&r4030_lock, flags);
}
static struct irq_chip r4030_irq_type = {
.name = "R4030",
.irq_mask = disable_r4030_irq,
.irq_unmask = enable_r4030_irq,
};
void __init init_r4030_ints(void)
{
int i;
for (i = JAZZ_IRQ_START; i <= JAZZ_IRQ_END; i++)
irq_set_chip_and_handler(i, &r4030_irq_type, handle_level_irq);
r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0);
r4030_read_reg16(JAZZ_IO_IRQ_SOURCE); /* clear pending IRQs */
r4030_read_reg32(JAZZ_R4030_INVAL_ADDR); /* clear error bits */
}
/*
* On systems with i8259-style interrupt controllers we assume for
* driver compatibility reasons interrupts 0 - 15 to be the i8259
* interrupts even if the hardware uses a different interrupt numbering.
*/
void __init arch_init_irq(void)
{
/*
* this is a hack to get back the still needed wired mapping
* killed by init_mm()
*/
/* Map 0xe0000000 -> 0x0:800005C0, 0xe0010000 -> 0x1:30000580 */
add_wired_entry(0x02000017, 0x03c00017, 0xe0000000, PM_64K);
/* Map 0xe2000000 -> 0x0:900005C0, 0xe3010000 -> 0x0:910005C0 */
add_wired_entry(0x02400017, 0x02440017, 0xe2000000, PM_16M);
/* Map 0xe4000000 -> 0x0:600005C0, 0xe4100000 -> 400005C0 */
add_wired_entry(0x01800017, 0x01000017, 0xe4000000, PM_4M);
init_i8259_irqs(); /* Integrated i8259 */
mips_cpu_irq_init();
init_r4030_ints();
change_c0_status(ST0_IM, IE_IRQ2 | IE_IRQ1);
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_cause() & read_c0_status();
unsigned int irq;
if (pending & IE_IRQ4) {
r4030_read_reg32(JAZZ_TIMER_REGISTER);
do_IRQ(JAZZ_TIMER_IRQ);
} else if (pending & IE_IRQ2) {
irq = *(volatile u8 *)JAZZ_EISA_IRQ_ACK;
do_IRQ(irq);
} else if (pending & IE_IRQ1) {
irq = *(volatile u8 *)JAZZ_IO_IRQ_SOURCE >> 2;
if (likely(irq > 0))
do_IRQ(irq + JAZZ_IRQ_START - 1);
else
panic("Unimplemented loc_no_irq handler");
}
}
static void r4030_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
/* Nothing to do ... */
}
struct clock_event_device r4030_clockevent = {
.name = "r4030",
.features = CLOCK_EVT_FEAT_PERIODIC,
.rating = 300,
.irq = JAZZ_TIMER_IRQ,
.set_mode = r4030_set_mode,
};
static irqreturn_t r4030_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *cd = dev_id;
cd->event_handler(cd);
return IRQ_HANDLED;
}
static struct irqaction r4030_timer_irqaction = {
.handler = r4030_timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER,
.name = "R4030 timer",
};
void __init plat_time_init(void)
{
struct clock_event_device *cd = &r4030_clockevent;
struct irqaction *action = &r4030_timer_irqaction;
unsigned int cpu = smp_processor_id();
BUG_ON(HZ != 100);
cd->cpumask = cpumask_of(cpu);
clockevents_register_device(cd);
action->dev_id = cd;
setup_irq(JAZZ_TIMER_IRQ, action);
/*
* Set clock to 100Hz.
*
* The R4030 timer receives an input clock of 1kHz which is divieded by
* a programmable 4-bit divider. This makes it fairly inflexible.
*/
r4030_write_reg32(JAZZ_TIMER_INTERVAL, 9);
setup_pit_timer();
}
| gpl-2.0 |
devil1437/GalaxyNexusKernel | arch/arm/plat-pxa/dma.c | 4129 | 10032 | /*
* linux/arch/arm/plat-pxa/dma.c
*
* PXA DMA registration and IRQ dispatching
*
* Author: Nicolas Pitre
* Created: Nov 15, 2001
* Copyright: MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/dma-mapping.h>
#include <asm/system.h>
#include <asm/irq.h>
#include <asm/memory.h>
#include <mach/hardware.h>
#include <mach/dma.h>
#define DMA_DEBUG_NAME "pxa_dma"
#define DMA_MAX_REQUESTERS 64
struct dma_channel {
char *name;
pxa_dma_prio prio;
void (*irq_handler)(int, void *);
void *data;
spinlock_t lock;
};
static struct dma_channel *dma_channels;
static int num_dma_channels;
/*
* Debug fs
*/
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
static struct dentry *dbgfs_root, *dbgfs_state, **dbgfs_chan;
static int dbg_show_requester_chan(struct seq_file *s, void *p)
{
int pos = 0;
int chan = (int)s->private;
int i;
u32 drcmr;
pos += seq_printf(s, "DMA channel %d requesters list :\n", chan);
for (i = 0; i < DMA_MAX_REQUESTERS; i++) {
drcmr = DRCMR(i);
if ((drcmr & DRCMR_CHLNUM) == chan)
pos += seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
!!(drcmr & DRCMR_MAPVLD));
}
return pos;
}
static inline int dbg_burst_from_dcmd(u32 dcmd)
{
int burst = (dcmd >> 16) & 0x3;
return burst ? 4 << burst : 0;
}
static int is_phys_valid(unsigned long addr)
{
return pfn_valid(__phys_to_pfn(addr));
}
#define DCSR_STR(flag) (dcsr & DCSR_##flag ? #flag" " : "")
#define DCMD_STR(flag) (dcmd & DCMD_##flag ? #flag" " : "")
static int dbg_show_descriptors(struct seq_file *s, void *p)
{
int pos = 0;
int chan = (int)s->private;
int i, max_show = 20, burst, width;
u32 dcmd;
unsigned long phys_desc;
struct pxa_dma_desc *desc;
unsigned long flags;
spin_lock_irqsave(&dma_channels[chan].lock, flags);
phys_desc = DDADR(chan);
pos += seq_printf(s, "DMA channel %d descriptors :\n", chan);
pos += seq_printf(s, "[%03d] First descriptor unknown\n", 0);
for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
desc = phys_to_virt(phys_desc);
dcmd = desc->dcmd;
burst = dbg_burst_from_dcmd(dcmd);
width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
pos += seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
i, phys_desc, desc);
pos += seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
pos += seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
pos += seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
pos += seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d"
" width=%d len=%d)\n",
dcmd,
DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
DCMD_STR(ENDIAN), burst, width,
dcmd & DCMD_LENGTH);
phys_desc = desc->ddadr;
}
if (i == max_show)
pos += seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
i, phys_desc);
else
pos += seq_printf(s, "[%03d] Desc at %08lx is %s\n",
i, phys_desc, phys_desc == DDADR_STOP ?
"DDADR_STOP" : "invalid");
spin_unlock_irqrestore(&dma_channels[chan].lock, flags);
return pos;
}
static int dbg_show_chan_state(struct seq_file *s, void *p)
{
int pos = 0;
int chan = (int)s->private;
u32 dcsr, dcmd;
int burst, width;
static char *str_prio[] = { "high", "normal", "low" };
dcsr = DCSR(chan);
dcmd = DCMD(chan);
burst = dbg_burst_from_dcmd(dcmd);
width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
pos += seq_printf(s, "DMA channel %d\n", chan);
pos += seq_printf(s, "\tPriority : %s\n",
str_prio[dma_channels[chan].prio]);
pos += seq_printf(s, "\tUnaligned transfer bit: %s\n",
DALGN & (1 << chan) ? "yes" : "no");
pos += seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
dcsr, DCSR_STR(RUN), DCSR_STR(NODESC),
DCSR_STR(STOPIRQEN), DCSR_STR(EORIRQEN),
DCSR_STR(EORJMPEN), DCSR_STR(EORSTOPEN),
DCSR_STR(SETCMPST), DCSR_STR(CLRCMPST),
DCSR_STR(CMPST), DCSR_STR(EORINTR), DCSR_STR(REQPEND),
DCSR_STR(STOPSTATE), DCSR_STR(ENDINTR),
DCSR_STR(STARTINTR), DCSR_STR(BUSERR));
pos += seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d"
" len=%d)\n",
dcmd,
DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
DCMD_STR(ENDIAN), burst, width, dcmd & DCMD_LENGTH);
pos += seq_printf(s, "\tDSADR = %08x\n", DSADR(chan));
pos += seq_printf(s, "\tDTADR = %08x\n", DTADR(chan));
pos += seq_printf(s, "\tDDADR = %08x\n", DDADR(chan));
return pos;
}
static int dbg_show_state(struct seq_file *s, void *p)
{
int pos = 0;
/* basic device status */
pos += seq_printf(s, "DMA engine status\n");
pos += seq_printf(s, "\tChannel number: %d\n", num_dma_channels);
return pos;
}
#define DBGFS_FUNC_DECL(name) \
static int dbg_open_##name(struct inode *inode, struct file *file) \
{ \
return single_open(file, dbg_show_##name, inode->i_private); \
} \
static const struct file_operations dbg_fops_##name = { \
.owner = THIS_MODULE, \
.open = dbg_open_##name, \
.llseek = seq_lseek, \
.read = seq_read, \
.release = single_release, \
}
DBGFS_FUNC_DECL(state);
DBGFS_FUNC_DECL(chan_state);
DBGFS_FUNC_DECL(descriptors);
DBGFS_FUNC_DECL(requester_chan);
static struct dentry *pxa_dma_dbg_alloc_chan(int ch, struct dentry *chandir)
{
char chan_name[11];
struct dentry *chan, *chan_state = NULL, *chan_descr = NULL;
struct dentry *chan_reqs = NULL;
void *dt;
scnprintf(chan_name, sizeof(chan_name), "%d", ch);
chan = debugfs_create_dir(chan_name, chandir);
dt = (void *)ch;
if (chan)
chan_state = debugfs_create_file("state", 0400, chan, dt,
&dbg_fops_chan_state);
if (chan_state)
chan_descr = debugfs_create_file("descriptors", 0400, chan, dt,
&dbg_fops_descriptors);
if (chan_descr)
chan_reqs = debugfs_create_file("requesters", 0400, chan, dt,
&dbg_fops_requester_chan);
if (!chan_reqs)
goto err_state;
return chan;
err_state:
debugfs_remove_recursive(chan);
return NULL;
}
static void pxa_dma_init_debugfs(void)
{
int i;
struct dentry *chandir;
dbgfs_root = debugfs_create_dir(DMA_DEBUG_NAME, NULL);
if (IS_ERR(dbgfs_root) || !dbgfs_root)
goto err_root;
dbgfs_state = debugfs_create_file("state", 0400, dbgfs_root, NULL,
&dbg_fops_state);
if (!dbgfs_state)
goto err_state;
dbgfs_chan = kmalloc(sizeof(*dbgfs_state) * num_dma_channels,
GFP_KERNEL);
if (!dbgfs_chan)
goto err_alloc;
chandir = debugfs_create_dir("channels", dbgfs_root);
if (!chandir)
goto err_chandir;
for (i = 0; i < num_dma_channels; i++) {
dbgfs_chan[i] = pxa_dma_dbg_alloc_chan(i, chandir);
if (!dbgfs_chan[i])
goto err_chans;
}
return;
err_chans:
err_chandir:
kfree(dbgfs_chan);
err_alloc:
err_state:
debugfs_remove_recursive(dbgfs_root);
err_root:
pr_err("pxa_dma: debugfs is not available\n");
}
static void __exit pxa_dma_cleanup_debugfs(void)
{
debugfs_remove_recursive(dbgfs_root);
}
#else
static inline void pxa_dma_init_debugfs(void) {}
static inline void pxa_dma_cleanup_debugfs(void) {}
#endif
int pxa_request_dma (char *name, pxa_dma_prio prio,
void (*irq_handler)(int, void *),
void *data)
{
unsigned long flags;
int i, found = 0;
/* basic sanity checks */
if (!name || !irq_handler)
return -EINVAL;
local_irq_save(flags);
do {
/* try grabbing a DMA channel with the requested priority */
for (i = 0; i < num_dma_channels; i++) {
if ((dma_channels[i].prio == prio) &&
!dma_channels[i].name) {
found = 1;
break;
}
}
/* if requested prio group is full, try a hier priority */
} while (!found && prio--);
if (found) {
DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
dma_channels[i].name = name;
dma_channels[i].irq_handler = irq_handler;
dma_channels[i].data = data;
} else {
printk (KERN_WARNING "No more available DMA channels for %s\n", name);
i = -ENODEV;
}
local_irq_restore(flags);
return i;
}
EXPORT_SYMBOL(pxa_request_dma);
void pxa_free_dma (int dma_ch)
{
unsigned long flags;
if (!dma_channels[dma_ch].name) {
printk (KERN_CRIT
"%s: trying to free channel %d which is already freed\n",
__func__, dma_ch);
return;
}
local_irq_save(flags);
DCSR(dma_ch) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
dma_channels[dma_ch].name = NULL;
local_irq_restore(flags);
}
EXPORT_SYMBOL(pxa_free_dma);
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
{
int i, dint = DINT;
struct dma_channel *channel;
while (dint) {
i = __ffs(dint);
dint &= (dint - 1);
channel = &dma_channels[i];
if (channel->name && channel->irq_handler) {
channel->irq_handler(i, channel->data);
} else {
/*
* IRQ for an unregistered DMA channel:
* let's clear the interrupts and disable it.
*/
printk (KERN_WARNING "spurious IRQ for DMA channel %d\n", i);
DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
}
}
return IRQ_HANDLED;
}
int __init pxa_init_dma(int irq, int num_ch)
{
int i, ret;
dma_channels = kzalloc(sizeof(struct dma_channel) * num_ch, GFP_KERNEL);
if (dma_channels == NULL)
return -ENOMEM;
/* dma channel priorities on pxa2xx processors:
* ch 0 - 3, 16 - 19 <--> (0) DMA_PRIO_HIGH
* ch 4 - 7, 20 - 23 <--> (1) DMA_PRIO_MEDIUM
* ch 8 - 15, 24 - 31 <--> (2) DMA_PRIO_LOW
*/
for (i = 0; i < num_ch; i++) {
DCSR(i) = 0;
dma_channels[i].prio = min((i & 0xf) >> 2, DMA_PRIO_LOW);
spin_lock_init(&dma_channels[i].lock);
}
ret = request_irq(irq, dma_irq_handler, IRQF_DISABLED, "DMA", NULL);
if (ret) {
printk (KERN_CRIT "Wow! Can't register IRQ for DMA\n");
kfree(dma_channels);
return ret;
}
num_dma_channels = num_ch;
pxa_dma_init_debugfs();
return 0;
}
| gpl-2.0 |
mordesku/Solid_Kernel-GEEHRC | drivers/media/dvb/frontends/dib7000p.c | 4897 | 65769 | /*
* Linux-DVB Driver for DiBcom's second generation DiB7000P (PC).
*
* Copyright (C) 2005-7 DiBcom (http://www.dibcom.fr/)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include "dvb_math.h"
#include "dvb_frontend.h"
#include "dib7000p.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
static int buggy_sfn_workaround;
module_param(buggy_sfn_workaround, int, 0644);
MODULE_PARM_DESC(buggy_sfn_workaround, "Enable work-around for buggy SFNs (default: 0)");
#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB7000P: "); printk(args); printk("\n"); } } while (0)
struct i2c_device {
struct i2c_adapter *i2c_adap;
u8 i2c_addr;
};
struct dib7000p_state {
struct dvb_frontend demod;
struct dib7000p_config cfg;
u8 i2c_addr;
struct i2c_adapter *i2c_adap;
struct dibx000_i2c_master i2c_master;
u16 wbd_ref;
u8 current_band;
u32 current_bandwidth;
struct dibx000_agc_config *current_agc;
u32 timf;
u8 div_force_off:1;
u8 div_state:1;
u16 div_sync_wait;
u8 agc_state;
u16 gpio_dir;
u16 gpio_val;
u8 sfn_workaround_active:1;
#define SOC7090 0x7090
u16 version;
u16 tuner_enable;
struct i2c_adapter dib7090_tuner_adap;
/* for the I2C transfer */
struct i2c_msg msg[2];
u8 i2c_write_buffer[4];
u8 i2c_read_buffer[2];
struct mutex i2c_buffer_lock;
u8 input_mode_mpeg;
};
enum dib7000p_power_mode {
DIB7000P_POWER_ALL = 0,
DIB7000P_POWER_ANALOG_ADC,
DIB7000P_POWER_INTERFACE_ONLY,
};
/* dib7090 specific fonctions */
static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode);
static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff);
static void dib7090_setDibTxMux(struct dib7000p_state *state, int mode);
static void dib7090_setHostBusMux(struct dib7000p_state *state, int mode);
static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
{
u16 ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
dprintk("could not acquire lock");
return 0;
}
state->i2c_write_buffer[0] = reg >> 8;
state->i2c_write_buffer[1] = reg & 0xff;
memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
state->msg[0].addr = state->i2c_addr >> 1;
state->msg[0].flags = 0;
state->msg[0].buf = state->i2c_write_buffer;
state->msg[0].len = 2;
state->msg[1].addr = state->i2c_addr >> 1;
state->msg[1].flags = I2C_M_RD;
state->msg[1].buf = state->i2c_read_buffer;
state->msg[1].len = 2;
if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
dprintk("i2c read error on %d", reg);
ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
mutex_unlock(&state->i2c_buffer_lock);
return ret;
}
static int dib7000p_write_word(struct dib7000p_state *state, u16 reg, u16 val)
{
int ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
dprintk("could not acquire lock");
return -EINVAL;
}
state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
state->i2c_write_buffer[1] = reg & 0xff;
state->i2c_write_buffer[2] = (val >> 8) & 0xff;
state->i2c_write_buffer[3] = val & 0xff;
memset(&state->msg[0], 0, sizeof(struct i2c_msg));
state->msg[0].addr = state->i2c_addr >> 1;
state->msg[0].flags = 0;
state->msg[0].buf = state->i2c_write_buffer;
state->msg[0].len = 4;
ret = (i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ?
-EREMOTEIO : 0);
mutex_unlock(&state->i2c_buffer_lock);
return ret;
}
static void dib7000p_write_tab(struct dib7000p_state *state, u16 * buf)
{
u16 l = 0, r, *n;
n = buf;
l = *n++;
while (l) {
r = *n++;
do {
dib7000p_write_word(state, r, *n++);
r++;
} while (--l);
l = *n++;
}
}
static int dib7000p_set_output_mode(struct dib7000p_state *state, int mode)
{
int ret = 0;
u16 outreg, fifo_threshold, smo_mode;
outreg = 0;
fifo_threshold = 1792;
smo_mode = (dib7000p_read_word(state, 235) & 0x0050) | (1 << 1);
dprintk("setting output mode for demod %p to %d", &state->demod, mode);
switch (mode) {
case OUTMODE_MPEG2_PAR_GATED_CLK:
outreg = (1 << 10); /* 0x0400 */
break;
case OUTMODE_MPEG2_PAR_CONT_CLK:
outreg = (1 << 10) | (1 << 6); /* 0x0440 */
break;
case OUTMODE_MPEG2_SERIAL:
outreg = (1 << 10) | (2 << 6) | (0 << 1); /* 0x0480 */
break;
case OUTMODE_DIVERSITY:
if (state->cfg.hostbus_diversity)
outreg = (1 << 10) | (4 << 6); /* 0x0500 */
else
outreg = (1 << 11);
break;
case OUTMODE_MPEG2_FIFO:
smo_mode |= (3 << 1);
fifo_threshold = 512;
outreg = (1 << 10) | (5 << 6);
break;
case OUTMODE_ANALOG_ADC:
outreg = (1 << 10) | (3 << 6);
break;
case OUTMODE_HIGH_Z:
outreg = 0;
break;
default:
dprintk("Unhandled output_mode passed to be set for demod %p", &state->demod);
break;
}
if (state->cfg.output_mpeg2_in_188_bytes)
smo_mode |= (1 << 5);
ret |= dib7000p_write_word(state, 235, smo_mode);
ret |= dib7000p_write_word(state, 236, fifo_threshold); /* synchronous fread */
if (state->version != SOC7090)
ret |= dib7000p_write_word(state, 1286, outreg); /* P_Div_active */
return ret;
}
static int dib7000p_set_diversity_in(struct dvb_frontend *demod, int onoff)
{
struct dib7000p_state *state = demod->demodulator_priv;
if (state->div_force_off) {
dprintk("diversity combination deactivated - forced by COFDM parameters");
onoff = 0;
dib7000p_write_word(state, 207, 0);
} else
dib7000p_write_word(state, 207, (state->div_sync_wait << 4) | (1 << 2) | (2 << 0));
state->div_state = (u8) onoff;
if (onoff) {
dib7000p_write_word(state, 204, 6);
dib7000p_write_word(state, 205, 16);
/* P_dvsy_sync_mode = 0, P_dvsy_sync_enable=1, P_dvcb_comb_mode=2 */
} else {
dib7000p_write_word(state, 204, 1);
dib7000p_write_word(state, 205, 0);
}
return 0;
}
static int dib7000p_set_power_mode(struct dib7000p_state *state, enum dib7000p_power_mode mode)
{
/* by default everything is powered off */
u16 reg_774 = 0x3fff, reg_775 = 0xffff, reg_776 = 0x0007, reg_899 = 0x0003, reg_1280 = (0xfe00) | (dib7000p_read_word(state, 1280) & 0x01ff);
/* now, depending on the requested mode, we power on */
switch (mode) {
/* power up everything in the demod */
case DIB7000P_POWER_ALL:
reg_774 = 0x0000;
reg_775 = 0x0000;
reg_776 = 0x0;
reg_899 = 0x0;
if (state->version == SOC7090)
reg_1280 &= 0x001f;
else
reg_1280 &= 0x01ff;
break;
case DIB7000P_POWER_ANALOG_ADC:
/* dem, cfg, iqc, sad, agc */
reg_774 &= ~((1 << 15) | (1 << 14) | (1 << 11) | (1 << 10) | (1 << 9));
/* nud */
reg_776 &= ~((1 << 0));
/* Dout */
if (state->version != SOC7090)
reg_1280 &= ~((1 << 11));
reg_1280 &= ~(1 << 6);
/* fall through wanted to enable the interfaces */
/* just leave power on the control-interfaces: GPIO and (I2C or SDIO) */
case DIB7000P_POWER_INTERFACE_ONLY: /* TODO power up either SDIO or I2C */
if (state->version == SOC7090)
reg_1280 &= ~((1 << 7) | (1 << 5));
else
reg_1280 &= ~((1 << 14) | (1 << 13) | (1 << 12) | (1 << 10));
break;
/* TODO following stuff is just converted from the dib7000-driver - check when is used what */
}
dib7000p_write_word(state, 774, reg_774);
dib7000p_write_word(state, 775, reg_775);
dib7000p_write_word(state, 776, reg_776);
dib7000p_write_word(state, 1280, reg_1280);
if (state->version != SOC7090)
dib7000p_write_word(state, 899, reg_899);
return 0;
}
static void dib7000p_set_adc_state(struct dib7000p_state *state, enum dibx000_adc_states no)
{
u16 reg_908 = 0, reg_909 = 0;
u16 reg;
if (state->version != SOC7090) {
reg_908 = dib7000p_read_word(state, 908);
reg_909 = dib7000p_read_word(state, 909);
}
switch (no) {
case DIBX000_SLOW_ADC_ON:
if (state->version == SOC7090) {
reg = dib7000p_read_word(state, 1925);
dib7000p_write_word(state, 1925, reg | (1 << 4) | (1 << 2)); /* en_slowAdc = 1 & reset_sladc = 1 */
reg = dib7000p_read_word(state, 1925); /* read acces to make it works... strange ... */
msleep(200);
dib7000p_write_word(state, 1925, reg & ~(1 << 4)); /* en_slowAdc = 1 & reset_sladc = 0 */
reg = dib7000p_read_word(state, 72) & ~((0x3 << 14) | (0x3 << 12));
dib7000p_write_word(state, 72, reg | (1 << 14) | (3 << 12) | 524); /* ref = Vin1 => Vbg ; sel = Vin0 or Vin3 ; (Vin2 = Vcm) */
} else {
reg_909 |= (1 << 1) | (1 << 0);
dib7000p_write_word(state, 909, reg_909);
reg_909 &= ~(1 << 1);
}
break;
case DIBX000_SLOW_ADC_OFF:
if (state->version == SOC7090) {
reg = dib7000p_read_word(state, 1925);
dib7000p_write_word(state, 1925, (reg & ~(1 << 2)) | (1 << 4)); /* reset_sladc = 1 en_slowAdc = 0 */
} else
reg_909 |= (1 << 1) | (1 << 0);
break;
case DIBX000_ADC_ON:
reg_908 &= 0x0fff;
reg_909 &= 0x0003;
break;
case DIBX000_ADC_OFF:
reg_908 |= (1 << 14) | (1 << 13) | (1 << 12);
reg_909 |= (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2);
break;
case DIBX000_VBG_ENABLE:
reg_908 &= ~(1 << 15);
break;
case DIBX000_VBG_DISABLE:
reg_908 |= (1 << 15);
break;
default:
break;
}
// dprintk( "908: %x, 909: %x\n", reg_908, reg_909);
reg_909 |= (state->cfg.disable_sample_and_hold & 1) << 4;
reg_908 |= (state->cfg.enable_current_mirror & 1) << 7;
if (state->version != SOC7090) {
dib7000p_write_word(state, 908, reg_908);
dib7000p_write_word(state, 909, reg_909);
}
}
static int dib7000p_set_bandwidth(struct dib7000p_state *state, u32 bw)
{
u32 timf;
// store the current bandwidth for later use
state->current_bandwidth = bw;
if (state->timf == 0) {
dprintk("using default timf");
timf = state->cfg.bw->timf;
} else {
dprintk("using updated timf");
timf = state->timf;
}
timf = timf * (bw / 50) / 160;
dib7000p_write_word(state, 23, (u16) ((timf >> 16) & 0xffff));
dib7000p_write_word(state, 24, (u16) ((timf) & 0xffff));
return 0;
}
static int dib7000p_sad_calib(struct dib7000p_state *state)
{
/* internal */
dib7000p_write_word(state, 73, (0 << 1) | (0 << 0));
if (state->version == SOC7090)
dib7000p_write_word(state, 74, 2048);
else
dib7000p_write_word(state, 74, 776);
/* do the calibration */
dib7000p_write_word(state, 73, (1 << 0));
dib7000p_write_word(state, 73, (0 << 0));
msleep(1);
return 0;
}
int dib7000p_set_wbd_ref(struct dvb_frontend *demod, u16 value)
{
struct dib7000p_state *state = demod->demodulator_priv;
if (value > 4095)
value = 4095;
state->wbd_ref = value;
return dib7000p_write_word(state, 105, (dib7000p_read_word(state, 105) & 0xf000) | value);
}
EXPORT_SYMBOL(dib7000p_set_wbd_ref);
int dib7000p_get_agc_values(struct dvb_frontend *fe,
u16 *agc_global, u16 *agc1, u16 *agc2, u16 *wbd)
{
struct dib7000p_state *state = fe->demodulator_priv;
if (agc_global != NULL)
*agc_global = dib7000p_read_word(state, 394);
if (agc1 != NULL)
*agc1 = dib7000p_read_word(state, 392);
if (agc2 != NULL)
*agc2 = dib7000p_read_word(state, 393);
if (wbd != NULL)
*wbd = dib7000p_read_word(state, 397);
return 0;
}
EXPORT_SYMBOL(dib7000p_get_agc_values);
static void dib7000p_reset_pll(struct dib7000p_state *state)
{
struct dibx000_bandwidth_config *bw = &state->cfg.bw[0];
u16 clk_cfg0;
if (state->version == SOC7090) {
dib7000p_write_word(state, 1856, (!bw->pll_reset << 13) | (bw->pll_range << 12) | (bw->pll_ratio << 6) | (bw->pll_prediv));
while (((dib7000p_read_word(state, 1856) >> 15) & 0x1) != 1)
;
dib7000p_write_word(state, 1857, dib7000p_read_word(state, 1857) | (!bw->pll_bypass << 15));
} else {
/* force PLL bypass */
clk_cfg0 = (1 << 15) | ((bw->pll_ratio & 0x3f) << 9) |
(bw->modulo << 7) | (bw->ADClkSrc << 6) | (bw->IO_CLK_en_core << 5) | (bw->bypclk_div << 2) | (bw->enable_refdiv << 1) | (0 << 0);
dib7000p_write_word(state, 900, clk_cfg0);
/* P_pll_cfg */
dib7000p_write_word(state, 903, (bw->pll_prediv << 5) | (((bw->pll_ratio >> 6) & 0x3) << 3) | (bw->pll_range << 1) | bw->pll_reset);
clk_cfg0 = (bw->pll_bypass << 15) | (clk_cfg0 & 0x7fff);
dib7000p_write_word(state, 900, clk_cfg0);
}
dib7000p_write_word(state, 18, (u16) (((bw->internal * 1000) >> 16) & 0xffff));
dib7000p_write_word(state, 19, (u16) ((bw->internal * 1000) & 0xffff));
dib7000p_write_word(state, 21, (u16) ((bw->ifreq >> 16) & 0xffff));
dib7000p_write_word(state, 22, (u16) ((bw->ifreq) & 0xffff));
dib7000p_write_word(state, 72, bw->sad_cfg);
}
static u32 dib7000p_get_internal_freq(struct dib7000p_state *state)
{
u32 internal = (u32) dib7000p_read_word(state, 18) << 16;
internal |= (u32) dib7000p_read_word(state, 19);
internal /= 1000;
return internal;
}
int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth_config *bw)
{
struct dib7000p_state *state = fe->demodulator_priv;
u16 reg_1857, reg_1856 = dib7000p_read_word(state, 1856);
u8 loopdiv, prediv;
u32 internal, xtal;
/* get back old values */
prediv = reg_1856 & 0x3f;
loopdiv = (reg_1856 >> 6) & 0x3f;
if ((bw != NULL) && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) {
dprintk("Updating pll (prediv: old = %d new = %d ; loopdiv : old = %d new = %d)", prediv, bw->pll_prediv, loopdiv, bw->pll_ratio);
reg_1856 &= 0xf000;
reg_1857 = dib7000p_read_word(state, 1857);
dib7000p_write_word(state, 1857, reg_1857 & ~(1 << 15));
dib7000p_write_word(state, 1856, reg_1856 | ((bw->pll_ratio & 0x3f) << 6) | (bw->pll_prediv & 0x3f));
/* write new system clk into P_sec_len */
internal = dib7000p_get_internal_freq(state);
xtal = (internal / loopdiv) * prediv;
internal = 1000 * (xtal / bw->pll_prediv) * bw->pll_ratio; /* new internal */
dib7000p_write_word(state, 18, (u16) ((internal >> 16) & 0xffff));
dib7000p_write_word(state, 19, (u16) (internal & 0xffff));
dib7000p_write_word(state, 1857, reg_1857 | (1 << 15));
while (((dib7000p_read_word(state, 1856) >> 15) & 0x1) != 1)
dprintk("Waiting for PLL to lock");
return 0;
}
return -EIO;
}
EXPORT_SYMBOL(dib7000p_update_pll);
static int dib7000p_reset_gpio(struct dib7000p_state *st)
{
/* reset the GPIOs */
dprintk("gpio dir: %x: val: %x, pwm_pos: %x", st->gpio_dir, st->gpio_val, st->cfg.gpio_pwm_pos);
dib7000p_write_word(st, 1029, st->gpio_dir);
dib7000p_write_word(st, 1030, st->gpio_val);
/* TODO 1031 is P_gpio_od */
dib7000p_write_word(st, 1032, st->cfg.gpio_pwm_pos);
dib7000p_write_word(st, 1037, st->cfg.pwm_freq_div);
return 0;
}
static int dib7000p_cfg_gpio(struct dib7000p_state *st, u8 num, u8 dir, u8 val)
{
st->gpio_dir = dib7000p_read_word(st, 1029);
st->gpio_dir &= ~(1 << num); /* reset the direction bit */
st->gpio_dir |= (dir & 0x1) << num; /* set the new direction */
dib7000p_write_word(st, 1029, st->gpio_dir);
st->gpio_val = dib7000p_read_word(st, 1030);
st->gpio_val &= ~(1 << num); /* reset the direction bit */
st->gpio_val |= (val & 0x01) << num; /* set the new value */
dib7000p_write_word(st, 1030, st->gpio_val);
return 0;
}
int dib7000p_set_gpio(struct dvb_frontend *demod, u8 num, u8 dir, u8 val)
{
struct dib7000p_state *state = demod->demodulator_priv;
return dib7000p_cfg_gpio(state, num, dir, val);
}
EXPORT_SYMBOL(dib7000p_set_gpio);
static u16 dib7000p_defaults[] = {
// auto search configuration
3, 2,
0x0004,
(1<<3)|(1<<11)|(1<<12)|(1<<13),
0x0814, /* Equal Lock */
12, 6,
0x001b,
0x7740,
0x005b,
0x8d80,
0x01c9,
0xc380,
0x0000,
0x0080,
0x0000,
0x0090,
0x0001,
0xd4c0,
1, 26,
0x6680,
/* set ADC level to -16 */
11, 79,
(1 << 13) - 825 - 117,
(1 << 13) - 837 - 117,
(1 << 13) - 811 - 117,
(1 << 13) - 766 - 117,
(1 << 13) - 737 - 117,
(1 << 13) - 693 - 117,
(1 << 13) - 648 - 117,
(1 << 13) - 619 - 117,
(1 << 13) - 575 - 117,
(1 << 13) - 531 - 117,
(1 << 13) - 501 - 117,
1, 142,
0x0410,
/* disable power smoothing */
8, 145,
0,
0,
0,
0,
0,
0,
0,
0,
1, 154,
1 << 13,
1, 168,
0x0ccd,
1, 183,
0x200f,
1, 212,
0x169,
5, 187,
0x023d,
0x00a4,
0x00a4,
0x7ff0,
0x3ccc,
1, 198,
0x800,
1, 222,
0x0010,
1, 235,
0x0062,
0,
};
static int dib7000p_demod_reset(struct dib7000p_state *state)
{
dib7000p_set_power_mode(state, DIB7000P_POWER_ALL);
if (state->version == SOC7090)
dibx000_reset_i2c_master(&state->i2c_master);
dib7000p_set_adc_state(state, DIBX000_VBG_ENABLE);
/* restart all parts */
dib7000p_write_word(state, 770, 0xffff);
dib7000p_write_word(state, 771, 0xffff);
dib7000p_write_word(state, 772, 0x001f);
dib7000p_write_word(state, 1280, 0x001f - ((1 << 4) | (1 << 3)));
dib7000p_write_word(state, 770, 0);
dib7000p_write_word(state, 771, 0);
dib7000p_write_word(state, 772, 0);
dib7000p_write_word(state, 1280, 0);
if (state->version != SOC7090) {
dib7000p_write_word(state, 898, 0x0003);
dib7000p_write_word(state, 898, 0);
}
/* default */
dib7000p_reset_pll(state);
if (dib7000p_reset_gpio(state) != 0)
dprintk("GPIO reset was not successful.");
if (state->version == SOC7090) {
dib7000p_write_word(state, 899, 0);
/* impulse noise */
dib7000p_write_word(state, 42, (1<<5) | 3); /* P_iqc_thsat_ipc = 1 ; P_iqc_win2 = 3 */
dib7000p_write_word(state, 43, 0x2d4); /*-300 fag P_iqc_dect_min = -280 */
dib7000p_write_word(state, 44, 300); /* 300 fag P_iqc_dect_min = +280 */
dib7000p_write_word(state, 273, (0<<6) | 30);
}
if (dib7000p_set_output_mode(state, OUTMODE_HIGH_Z) != 0)
dprintk("OUTPUT_MODE could not be reset.");
dib7000p_set_adc_state(state, DIBX000_SLOW_ADC_ON);
dib7000p_sad_calib(state);
dib7000p_set_adc_state(state, DIBX000_SLOW_ADC_OFF);
/* unforce divstr regardless whether i2c enumeration was done or not */
dib7000p_write_word(state, 1285, dib7000p_read_word(state, 1285) & ~(1 << 1));
dib7000p_set_bandwidth(state, 8000);
if (state->version == SOC7090) {
dib7000p_write_word(state, 36, 0x0755);/* P_iqc_impnc_on =1 & P_iqc_corr_inh = 1 for impulsive noise */
} else {
if (state->cfg.tuner_is_baseband)
dib7000p_write_word(state, 36, 0x0755);
else
dib7000p_write_word(state, 36, 0x1f55);
}
dib7000p_write_tab(state, dib7000p_defaults);
if (state->version != SOC7090) {
dib7000p_write_word(state, 901, 0x0006);
dib7000p_write_word(state, 902, (3 << 10) | (1 << 6));
dib7000p_write_word(state, 905, 0x2c8e);
}
dib7000p_set_power_mode(state, DIB7000P_POWER_INTERFACE_ONLY);
return 0;
}
static void dib7000p_pll_clk_cfg(struct dib7000p_state *state)
{
u16 tmp = 0;
tmp = dib7000p_read_word(state, 903);
dib7000p_write_word(state, 903, (tmp | 0x1));
tmp = dib7000p_read_word(state, 900);
dib7000p_write_word(state, 900, (tmp & 0x7fff) | (1 << 6));
}
static void dib7000p_restart_agc(struct dib7000p_state *state)
{
// P_restart_iqc & P_restart_agc
dib7000p_write_word(state, 770, (1 << 11) | (1 << 9));
dib7000p_write_word(state, 770, 0x0000);
}
static int dib7000p_update_lna(struct dib7000p_state *state)
{
u16 dyn_gain;
if (state->cfg.update_lna) {
dyn_gain = dib7000p_read_word(state, 394);
if (state->cfg.update_lna(&state->demod, dyn_gain)) {
dib7000p_restart_agc(state);
return 1;
}
}
return 0;
}
static int dib7000p_set_agc_config(struct dib7000p_state *state, u8 band)
{
struct dibx000_agc_config *agc = NULL;
int i;
if (state->current_band == band && state->current_agc != NULL)
return 0;
state->current_band = band;
for (i = 0; i < state->cfg.agc_config_count; i++)
if (state->cfg.agc[i].band_caps & band) {
agc = &state->cfg.agc[i];
break;
}
if (agc == NULL) {
dprintk("no valid AGC configuration found for band 0x%02x", band);
return -EINVAL;
}
state->current_agc = agc;
/* AGC */
dib7000p_write_word(state, 75, agc->setup);
dib7000p_write_word(state, 76, agc->inv_gain);
dib7000p_write_word(state, 77, agc->time_stabiliz);
dib7000p_write_word(state, 100, (agc->alpha_level << 12) | agc->thlock);
// Demod AGC loop configuration
dib7000p_write_word(state, 101, (agc->alpha_mant << 5) | agc->alpha_exp);
dib7000p_write_word(state, 102, (agc->beta_mant << 6) | agc->beta_exp);
/* AGC continued */
dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d",
state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel);
if (state->wbd_ref != 0)
dib7000p_write_word(state, 105, (agc->wbd_inv << 12) | state->wbd_ref);
else
dib7000p_write_word(state, 105, (agc->wbd_inv << 12) | agc->wbd_ref);
dib7000p_write_word(state, 106, (agc->wbd_sel << 13) | (agc->wbd_alpha << 9) | (agc->perform_agc_softsplit << 8));
dib7000p_write_word(state, 107, agc->agc1_max);
dib7000p_write_word(state, 108, agc->agc1_min);
dib7000p_write_word(state, 109, agc->agc2_max);
dib7000p_write_word(state, 110, agc->agc2_min);
dib7000p_write_word(state, 111, (agc->agc1_pt1 << 8) | agc->agc1_pt2);
dib7000p_write_word(state, 112, agc->agc1_pt3);
dib7000p_write_word(state, 113, (agc->agc1_slope1 << 8) | agc->agc1_slope2);
dib7000p_write_word(state, 114, (agc->agc2_pt1 << 8) | agc->agc2_pt2);
dib7000p_write_word(state, 115, (agc->agc2_slope1 << 8) | agc->agc2_slope2);
return 0;
}
static void dib7000p_set_dds(struct dib7000p_state *state, s32 offset_khz)
{
u32 internal = dib7000p_get_internal_freq(state);
s32 unit_khz_dds_val = 67108864 / (internal); /* 2**26 / Fsampling is the unit 1KHz offset */
u32 abs_offset_khz = ABS(offset_khz);
u32 dds = state->cfg.bw->ifreq & 0x1ffffff;
u8 invert = !!(state->cfg.bw->ifreq & (1 << 25));
dprintk("setting a frequency offset of %dkHz internal freq = %d invert = %d", offset_khz, internal, invert);
if (offset_khz < 0)
unit_khz_dds_val *= -1;
/* IF tuner */
if (invert)
dds -= (abs_offset_khz * unit_khz_dds_val); /* /100 because of /100 on the unit_khz_dds_val line calc for better accuracy */
else
dds += (abs_offset_khz * unit_khz_dds_val);
if (abs_offset_khz <= (internal / 2)) { /* Max dds offset is the half of the demod freq */
dib7000p_write_word(state, 21, (u16) (((dds >> 16) & 0x1ff) | (0 << 10) | (invert << 9)));
dib7000p_write_word(state, 22, (u16) (dds & 0xffff));
}
}
static int dib7000p_agc_startup(struct dvb_frontend *demod)
{
struct dtv_frontend_properties *ch = &demod->dtv_property_cache;
struct dib7000p_state *state = demod->demodulator_priv;
int ret = -1;
u8 *agc_state = &state->agc_state;
u8 agc_split;
u16 reg;
u32 upd_demod_gain_period = 0x1000;
switch (state->agc_state) {
case 0:
dib7000p_set_power_mode(state, DIB7000P_POWER_ALL);
if (state->version == SOC7090) {
reg = dib7000p_read_word(state, 0x79b) & 0xff00;
dib7000p_write_word(state, 0x79a, upd_demod_gain_period & 0xFFFF); /* lsb */
dib7000p_write_word(state, 0x79b, reg | (1 << 14) | ((upd_demod_gain_period >> 16) & 0xFF));
/* enable adc i & q */
reg = dib7000p_read_word(state, 0x780);
dib7000p_write_word(state, 0x780, (reg | (0x3)) & (~(1 << 7)));
} else {
dib7000p_set_adc_state(state, DIBX000_ADC_ON);
dib7000p_pll_clk_cfg(state);
}
if (dib7000p_set_agc_config(state, BAND_OF_FREQUENCY(ch->frequency / 1000)) != 0)
return -1;
dib7000p_set_dds(state, 0);
ret = 7;
(*agc_state)++;
break;
case 1:
if (state->cfg.agc_control)
state->cfg.agc_control(&state->demod, 1);
dib7000p_write_word(state, 78, 32768);
if (!state->current_agc->perform_agc_softsplit) {
/* we are using the wbd - so slow AGC startup */
/* force 0 split on WBD and restart AGC */
dib7000p_write_word(state, 106, (state->current_agc->wbd_sel << 13) | (state->current_agc->wbd_alpha << 9) | (1 << 8));
(*agc_state)++;
ret = 5;
} else {
/* default AGC startup */
(*agc_state) = 4;
/* wait AGC rough lock time */
ret = 7;
}
dib7000p_restart_agc(state);
break;
case 2: /* fast split search path after 5sec */
dib7000p_write_word(state, 75, state->current_agc->setup | (1 << 4)); /* freeze AGC loop */
dib7000p_write_word(state, 106, (state->current_agc->wbd_sel << 13) | (2 << 9) | (0 << 8)); /* fast split search 0.25kHz */
(*agc_state)++;
ret = 14;
break;
case 3: /* split search ended */
agc_split = (u8) dib7000p_read_word(state, 396); /* store the split value for the next time */
dib7000p_write_word(state, 78, dib7000p_read_word(state, 394)); /* set AGC gain start value */
dib7000p_write_word(state, 75, state->current_agc->setup); /* std AGC loop */
dib7000p_write_word(state, 106, (state->current_agc->wbd_sel << 13) | (state->current_agc->wbd_alpha << 9) | agc_split); /* standard split search */
dib7000p_restart_agc(state);
dprintk("SPLIT %p: %hd", demod, agc_split);
(*agc_state)++;
ret = 5;
break;
case 4: /* LNA startup */
ret = 7;
if (dib7000p_update_lna(state))
ret = 5;
else
(*agc_state)++;
break;
case 5:
if (state->cfg.agc_control)
state->cfg.agc_control(&state->demod, 0);
(*agc_state)++;
break;
default:
break;
}
return ret;
}
static void dib7000p_update_timf(struct dib7000p_state *state)
{
u32 timf = (dib7000p_read_word(state, 427) << 16) | dib7000p_read_word(state, 428);
state->timf = timf * 160 / (state->current_bandwidth / 50);
dib7000p_write_word(state, 23, (u16) (timf >> 16));
dib7000p_write_word(state, 24, (u16) (timf & 0xffff));
dprintk("updated timf_frequency: %d (default: %d)", state->timf, state->cfg.bw->timf);
}
u32 dib7000p_ctrl_timf(struct dvb_frontend *fe, u8 op, u32 timf)
{
struct dib7000p_state *state = fe->demodulator_priv;
switch (op) {
case DEMOD_TIMF_SET:
state->timf = timf;
break;
case DEMOD_TIMF_UPDATE:
dib7000p_update_timf(state);
break;
case DEMOD_TIMF_GET:
break;
}
dib7000p_set_bandwidth(state, state->current_bandwidth);
return state->timf;
}
EXPORT_SYMBOL(dib7000p_ctrl_timf);
static void dib7000p_set_channel(struct dib7000p_state *state,
struct dtv_frontend_properties *ch, u8 seq)
{
u16 value, est[4];
dib7000p_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->bandwidth_hz));
/* nfft, guard, qam, alpha */
value = 0;
switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K:
value |= (0 << 7);
break;
case TRANSMISSION_MODE_4K:
value |= (2 << 7);
break;
default:
case TRANSMISSION_MODE_8K:
value |= (1 << 7);
break;
}
switch (ch->guard_interval) {
case GUARD_INTERVAL_1_32:
value |= (0 << 5);
break;
case GUARD_INTERVAL_1_16:
value |= (1 << 5);
break;
case GUARD_INTERVAL_1_4:
value |= (3 << 5);
break;
default:
case GUARD_INTERVAL_1_8:
value |= (2 << 5);
break;
}
switch (ch->modulation) {
case QPSK:
value |= (0 << 3);
break;
case QAM_16:
value |= (1 << 3);
break;
default:
case QAM_64:
value |= (2 << 3);
break;
}
switch (HIERARCHY_1) {
case HIERARCHY_2:
value |= 2;
break;
case HIERARCHY_4:
value |= 4;
break;
default:
case HIERARCHY_1:
value |= 1;
break;
}
dib7000p_write_word(state, 0, value);
dib7000p_write_word(state, 5, (seq << 4) | 1); /* do not force tps, search list 0 */
/* P_dintl_native, P_dintlv_inv, P_hrch, P_code_rate, P_select_hp */
value = 0;
if (1 != 0)
value |= (1 << 6);
if (ch->hierarchy == 1)
value |= (1 << 4);
if (1 == 1)
value |= 1;
switch ((ch->hierarchy == 0 || 1 == 1) ? ch->code_rate_HP : ch->code_rate_LP) {
case FEC_2_3:
value |= (2 << 1);
break;
case FEC_3_4:
value |= (3 << 1);
break;
case FEC_5_6:
value |= (5 << 1);
break;
case FEC_7_8:
value |= (7 << 1);
break;
default:
case FEC_1_2:
value |= (1 << 1);
break;
}
dib7000p_write_word(state, 208, value);
/* offset loop parameters */
dib7000p_write_word(state, 26, 0x6680);
dib7000p_write_word(state, 32, 0x0003);
dib7000p_write_word(state, 29, 0x1273);
dib7000p_write_word(state, 33, 0x0005);
/* P_dvsy_sync_wait */
switch (ch->transmission_mode) {
case TRANSMISSION_MODE_8K:
value = 256;
break;
case TRANSMISSION_MODE_4K:
value = 128;
break;
case TRANSMISSION_MODE_2K:
default:
value = 64;
break;
}
switch (ch->guard_interval) {
case GUARD_INTERVAL_1_16:
value *= 2;
break;
case GUARD_INTERVAL_1_8:
value *= 4;
break;
case GUARD_INTERVAL_1_4:
value *= 8;
break;
default:
case GUARD_INTERVAL_1_32:
value *= 1;
break;
}
if (state->cfg.diversity_delay == 0)
state->div_sync_wait = (value * 3) / 2 + 48;
else
state->div_sync_wait = (value * 3) / 2 + state->cfg.diversity_delay;
/* deactive the possibility of diversity reception if extended interleaver */
state->div_force_off = !1 && ch->transmission_mode != TRANSMISSION_MODE_8K;
dib7000p_set_diversity_in(&state->demod, state->div_state);
/* channel estimation fine configuration */
switch (ch->modulation) {
case QAM_64:
est[0] = 0x0148; /* P_adp_regul_cnt 0.04 */
est[1] = 0xfff0; /* P_adp_noise_cnt -0.002 */
est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */
est[3] = 0xfff8; /* P_adp_noise_ext -0.001 */
break;
case QAM_16:
est[0] = 0x023d; /* P_adp_regul_cnt 0.07 */
est[1] = 0xffdf; /* P_adp_noise_cnt -0.004 */
est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */
est[3] = 0xfff0; /* P_adp_noise_ext -0.002 */
break;
default:
est[0] = 0x099a; /* P_adp_regul_cnt 0.3 */
est[1] = 0xffae; /* P_adp_noise_cnt -0.01 */
est[2] = 0x0333; /* P_adp_regul_ext 0.1 */
est[3] = 0xfff8; /* P_adp_noise_ext -0.002 */
break;
}
for (value = 0; value < 4; value++)
dib7000p_write_word(state, 187 + value, est[value]);
}
static int dib7000p_autosearch_start(struct dvb_frontend *demod)
{
struct dtv_frontend_properties *ch = &demod->dtv_property_cache;
struct dib7000p_state *state = demod->demodulator_priv;
struct dtv_frontend_properties schan;
u32 value, factor;
u32 internal = dib7000p_get_internal_freq(state);
schan = *ch;
schan.modulation = QAM_64;
schan.guard_interval = GUARD_INTERVAL_1_32;
schan.transmission_mode = TRANSMISSION_MODE_8K;
schan.code_rate_HP = FEC_2_3;
schan.code_rate_LP = FEC_3_4;
schan.hierarchy = 0;
dib7000p_set_channel(state, &schan, 7);
factor = BANDWIDTH_TO_KHZ(ch->bandwidth_hz);
if (factor >= 5000) {
if (state->version == SOC7090)
factor = 2;
else
factor = 1;
} else
factor = 6;
value = 30 * internal * factor;
dib7000p_write_word(state, 6, (u16) ((value >> 16) & 0xffff));
dib7000p_write_word(state, 7, (u16) (value & 0xffff));
value = 100 * internal * factor;
dib7000p_write_word(state, 8, (u16) ((value >> 16) & 0xffff));
dib7000p_write_word(state, 9, (u16) (value & 0xffff));
value = 500 * internal * factor;
dib7000p_write_word(state, 10, (u16) ((value >> 16) & 0xffff));
dib7000p_write_word(state, 11, (u16) (value & 0xffff));
value = dib7000p_read_word(state, 0);
dib7000p_write_word(state, 0, (u16) ((1 << 9) | value));
dib7000p_read_word(state, 1284);
dib7000p_write_word(state, 0, (u16) value);
return 0;
}
static int dib7000p_autosearch_is_irq(struct dvb_frontend *demod)
{
struct dib7000p_state *state = demod->demodulator_priv;
u16 irq_pending = dib7000p_read_word(state, 1284);
if (irq_pending & 0x1)
return 1;
if (irq_pending & 0x2)
return 2;
return 0;
}
static void dib7000p_spur_protect(struct dib7000p_state *state, u32 rf_khz, u32 bw)
{
static s16 notch[] = { 16143, 14402, 12238, 9713, 6902, 3888, 759, -2392 };
static u8 sine[] = { 0, 2, 3, 5, 6, 8, 9, 11, 13, 14, 16, 17, 19, 20, 22,
24, 25, 27, 28, 30, 31, 33, 34, 36, 38, 39, 41, 42, 44, 45, 47, 48, 50, 51,
53, 55, 56, 58, 59, 61, 62, 64, 65, 67, 68, 70, 71, 73, 74, 76, 77, 79, 80,
82, 83, 85, 86, 88, 89, 91, 92, 94, 95, 97, 98, 99, 101, 102, 104, 105,
107, 108, 109, 111, 112, 114, 115, 117, 118, 119, 121, 122, 123, 125, 126,
128, 129, 130, 132, 133, 134, 136, 137, 138, 140, 141, 142, 144, 145, 146,
147, 149, 150, 151, 152, 154, 155, 156, 157, 159, 160, 161, 162, 164, 165,
166, 167, 168, 170, 171, 172, 173, 174, 175, 177, 178, 179, 180, 181, 182,
183, 184, 185, 186, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198,
199, 200, 201, 202, 203, 204, 205, 206, 207, 207, 208, 209, 210, 211, 212,
213, 214, 215, 215, 216, 217, 218, 219, 220, 220, 221, 222, 223, 224, 224,
225, 226, 227, 227, 228, 229, 229, 230, 231, 231, 232, 233, 233, 234, 235,
235, 236, 237, 237, 238, 238, 239, 239, 240, 241, 241, 242, 242, 243, 243,
244, 244, 245, 245, 245, 246, 246, 247, 247, 248, 248, 248, 249, 249, 249,
250, 250, 250, 251, 251, 251, 252, 252, 252, 252, 253, 253, 253, 253, 254,
254, 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255
};
u32 xtal = state->cfg.bw->xtal_hz / 1000;
int f_rel = DIV_ROUND_CLOSEST(rf_khz, xtal) * xtal - rf_khz;
int k;
int coef_re[8], coef_im[8];
int bw_khz = bw;
u32 pha;
dprintk("relative position of the Spur: %dk (RF: %dk, XTAL: %dk)", f_rel, rf_khz, xtal);
if (f_rel < -bw_khz / 2 || f_rel > bw_khz / 2)
return;
bw_khz /= 100;
dib7000p_write_word(state, 142, 0x0610);
for (k = 0; k < 8; k++) {
pha = ((f_rel * (k + 1) * 112 * 80 / bw_khz) / 1000) & 0x3ff;
if (pha == 0) {
coef_re[k] = 256;
coef_im[k] = 0;
} else if (pha < 256) {
coef_re[k] = sine[256 - (pha & 0xff)];
coef_im[k] = sine[pha & 0xff];
} else if (pha == 256) {
coef_re[k] = 0;
coef_im[k] = 256;
} else if (pha < 512) {
coef_re[k] = -sine[pha & 0xff];
coef_im[k] = sine[256 - (pha & 0xff)];
} else if (pha == 512) {
coef_re[k] = -256;
coef_im[k] = 0;
} else if (pha < 768) {
coef_re[k] = -sine[256 - (pha & 0xff)];
coef_im[k] = -sine[pha & 0xff];
} else if (pha == 768) {
coef_re[k] = 0;
coef_im[k] = -256;
} else {
coef_re[k] = sine[pha & 0xff];
coef_im[k] = -sine[256 - (pha & 0xff)];
}
coef_re[k] *= notch[k];
coef_re[k] += (1 << 14);
if (coef_re[k] >= (1 << 24))
coef_re[k] = (1 << 24) - 1;
coef_re[k] /= (1 << 15);
coef_im[k] *= notch[k];
coef_im[k] += (1 << 14);
if (coef_im[k] >= (1 << 24))
coef_im[k] = (1 << 24) - 1;
coef_im[k] /= (1 << 15);
dprintk("PALF COEF: %d re: %d im: %d", k, coef_re[k], coef_im[k]);
dib7000p_write_word(state, 143, (0 << 14) | (k << 10) | (coef_re[k] & 0x3ff));
dib7000p_write_word(state, 144, coef_im[k] & 0x3ff);
dib7000p_write_word(state, 143, (1 << 14) | (k << 10) | (coef_re[k] & 0x3ff));
}
dib7000p_write_word(state, 143, 0);
}
static int dib7000p_tune(struct dvb_frontend *demod)
{
struct dtv_frontend_properties *ch = &demod->dtv_property_cache;
struct dib7000p_state *state = demod->demodulator_priv;
u16 tmp = 0;
if (ch != NULL)
dib7000p_set_channel(state, ch, 0);
else
return -EINVAL;
// restart demod
dib7000p_write_word(state, 770, 0x4000);
dib7000p_write_word(state, 770, 0x0000);
msleep(45);
/* P_ctrl_inh_cor=0, P_ctrl_alpha_cor=4, P_ctrl_inh_isi=0, P_ctrl_alpha_isi=3, P_ctrl_inh_cor4=1, P_ctrl_alpha_cor4=3 */
tmp = (0 << 14) | (4 << 10) | (0 << 9) | (3 << 5) | (1 << 4) | (0x3);
if (state->sfn_workaround_active) {
dprintk("SFN workaround is active");
tmp |= (1 << 9);
dib7000p_write_word(state, 166, 0x4000);
} else {
dib7000p_write_word(state, 166, 0x0000);
}
dib7000p_write_word(state, 29, tmp);
// never achieved a lock with that bandwidth so far - wait for osc-freq to update
if (state->timf == 0)
msleep(200);
/* offset loop parameters */
/* P_timf_alpha, P_corm_alpha=6, P_corm_thres=0x80 */
tmp = (6 << 8) | 0x80;
switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K:
tmp |= (2 << 12);
break;
case TRANSMISSION_MODE_4K:
tmp |= (3 << 12);
break;
default:
case TRANSMISSION_MODE_8K:
tmp |= (4 << 12);
break;
}
dib7000p_write_word(state, 26, tmp); /* timf_a(6xxx) */
/* P_ctrl_freeze_pha_shift=0, P_ctrl_pha_off_max */
tmp = (0 << 4);
switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K:
tmp |= 0x6;
break;
case TRANSMISSION_MODE_4K:
tmp |= 0x7;
break;
default:
case TRANSMISSION_MODE_8K:
tmp |= 0x8;
break;
}
dib7000p_write_word(state, 32, tmp);
/* P_ctrl_sfreq_inh=0, P_ctrl_sfreq_step */
tmp = (0 << 4);
switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K:
tmp |= 0x6;
break;
case TRANSMISSION_MODE_4K:
tmp |= 0x7;
break;
default:
case TRANSMISSION_MODE_8K:
tmp |= 0x8;
break;
}
dib7000p_write_word(state, 33, tmp);
tmp = dib7000p_read_word(state, 509);
if (!((tmp >> 6) & 0x1)) {
/* restart the fec */
tmp = dib7000p_read_word(state, 771);
dib7000p_write_word(state, 771, tmp | (1 << 1));
dib7000p_write_word(state, 771, tmp);
msleep(40);
tmp = dib7000p_read_word(state, 509);
}
// we achieved a lock - it's time to update the osc freq
if ((tmp >> 6) & 0x1) {
dib7000p_update_timf(state);
/* P_timf_alpha += 2 */
tmp = dib7000p_read_word(state, 26);
dib7000p_write_word(state, 26, (tmp & ~(0xf << 12)) | ((((tmp >> 12) & 0xf) + 5) << 12));
}
if (state->cfg.spur_protect)
dib7000p_spur_protect(state, ch->frequency / 1000, BANDWIDTH_TO_KHZ(ch->bandwidth_hz));
dib7000p_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->bandwidth_hz));
return 0;
}
static int dib7000p_wakeup(struct dvb_frontend *demod)
{
struct dib7000p_state *state = demod->demodulator_priv;
dib7000p_set_power_mode(state, DIB7000P_POWER_ALL);
dib7000p_set_adc_state(state, DIBX000_SLOW_ADC_ON);
if (state->version == SOC7090)
dib7000p_sad_calib(state);
return 0;
}
static int dib7000p_sleep(struct dvb_frontend *demod)
{
struct dib7000p_state *state = demod->demodulator_priv;
if (state->version == SOC7090)
return dib7000p_set_power_mode(state, DIB7000P_POWER_INTERFACE_ONLY);
return dib7000p_set_output_mode(state, OUTMODE_HIGH_Z) | dib7000p_set_power_mode(state, DIB7000P_POWER_INTERFACE_ONLY);
}
static int dib7000p_identify(struct dib7000p_state *st)
{
u16 value;
dprintk("checking demod on I2C address: %d (%x)", st->i2c_addr, st->i2c_addr);
if ((value = dib7000p_read_word(st, 768)) != 0x01b3) {
dprintk("wrong Vendor ID (read=0x%x)", value);
return -EREMOTEIO;
}
if ((value = dib7000p_read_word(st, 769)) != 0x4000) {
dprintk("wrong Device ID (%x)", value);
return -EREMOTEIO;
}
return 0;
}
static int dib7000p_get_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dib7000p_state *state = fe->demodulator_priv;
u16 tps = dib7000p_read_word(state, 463);
fep->inversion = INVERSION_AUTO;
fep->bandwidth_hz = BANDWIDTH_TO_HZ(state->current_bandwidth);
switch ((tps >> 8) & 0x3) {
case 0:
fep->transmission_mode = TRANSMISSION_MODE_2K;
break;
case 1:
fep->transmission_mode = TRANSMISSION_MODE_8K;
break;
/* case 2: fep->transmission_mode = TRANSMISSION_MODE_4K; break; */
}
switch (tps & 0x3) {
case 0:
fep->guard_interval = GUARD_INTERVAL_1_32;
break;
case 1:
fep->guard_interval = GUARD_INTERVAL_1_16;
break;
case 2:
fep->guard_interval = GUARD_INTERVAL_1_8;
break;
case 3:
fep->guard_interval = GUARD_INTERVAL_1_4;
break;
}
switch ((tps >> 14) & 0x3) {
case 0:
fep->modulation = QPSK;
break;
case 1:
fep->modulation = QAM_16;
break;
case 2:
default:
fep->modulation = QAM_64;
break;
}
/* as long as the frontend_param structure is fixed for hierarchical transmission I refuse to use it */
/* (tps >> 13) & 0x1 == hrch is used, (tps >> 10) & 0x7 == alpha */
fep->hierarchy = HIERARCHY_NONE;
switch ((tps >> 5) & 0x7) {
case 1:
fep->code_rate_HP = FEC_1_2;
break;
case 2:
fep->code_rate_HP = FEC_2_3;
break;
case 3:
fep->code_rate_HP = FEC_3_4;
break;
case 5:
fep->code_rate_HP = FEC_5_6;
break;
case 7:
default:
fep->code_rate_HP = FEC_7_8;
break;
}
switch ((tps >> 2) & 0x7) {
case 1:
fep->code_rate_LP = FEC_1_2;
break;
case 2:
fep->code_rate_LP = FEC_2_3;
break;
case 3:
fep->code_rate_LP = FEC_3_4;
break;
case 5:
fep->code_rate_LP = FEC_5_6;
break;
case 7:
default:
fep->code_rate_LP = FEC_7_8;
break;
}
/* native interleaver: (dib7000p_read_word(state, 464) >> 5) & 0x1 */
return 0;
}
static int dib7000p_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dib7000p_state *state = fe->demodulator_priv;
int time, ret;
if (state->version == SOC7090)
dib7090_set_diversity_in(fe, 0);
else
dib7000p_set_output_mode(state, OUTMODE_HIGH_Z);
/* maybe the parameter has been changed */
state->sfn_workaround_active = buggy_sfn_workaround;
if (fe->ops.tuner_ops.set_params)
fe->ops.tuner_ops.set_params(fe);
/* start up the AGC */
state->agc_state = 0;
do {
time = dib7000p_agc_startup(fe);
if (time != -1)
msleep(time);
} while (time != -1);
if (fep->transmission_mode == TRANSMISSION_MODE_AUTO ||
fep->guard_interval == GUARD_INTERVAL_AUTO || fep->modulation == QAM_AUTO || fep->code_rate_HP == FEC_AUTO) {
int i = 800, found;
dib7000p_autosearch_start(fe);
do {
msleep(1);
found = dib7000p_autosearch_is_irq(fe);
} while (found == 0 && i--);
dprintk("autosearch returns: %d", found);
if (found == 0 || found == 1)
return 0;
dib7000p_get_frontend(fe);
}
ret = dib7000p_tune(fe);
/* make this a config parameter */
if (state->version == SOC7090) {
dib7090_set_output_mode(fe, state->cfg.output_mode);
if (state->cfg.enMpegOutput == 0) {
dib7090_setDibTxMux(state, MPEG_ON_DIBTX);
dib7090_setHostBusMux(state, DIBTX_ON_HOSTBUS);
}
} else
dib7000p_set_output_mode(state, state->cfg.output_mode);
return ret;
}
static int dib7000p_read_status(struct dvb_frontend *fe, fe_status_t * stat)
{
struct dib7000p_state *state = fe->demodulator_priv;
u16 lock = dib7000p_read_word(state, 509);
*stat = 0;
if (lock & 0x8000)
*stat |= FE_HAS_SIGNAL;
if (lock & 0x3000)
*stat |= FE_HAS_CARRIER;
if (lock & 0x0100)
*stat |= FE_HAS_VITERBI;
if (lock & 0x0010)
*stat |= FE_HAS_SYNC;
if ((lock & 0x0038) == 0x38)
*stat |= FE_HAS_LOCK;
return 0;
}
static int dib7000p_read_ber(struct dvb_frontend *fe, u32 * ber)
{
struct dib7000p_state *state = fe->demodulator_priv;
*ber = (dib7000p_read_word(state, 500) << 16) | dib7000p_read_word(state, 501);
return 0;
}
static int dib7000p_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
{
struct dib7000p_state *state = fe->demodulator_priv;
*unc = dib7000p_read_word(state, 506);
return 0;
}
static int dib7000p_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
{
struct dib7000p_state *state = fe->demodulator_priv;
u16 val = dib7000p_read_word(state, 394);
*strength = 65535 - val;
return 0;
}
static int dib7000p_read_snr(struct dvb_frontend *fe, u16 * snr)
{
struct dib7000p_state *state = fe->demodulator_priv;
u16 val;
s32 signal_mant, signal_exp, noise_mant, noise_exp;
u32 result = 0;
val = dib7000p_read_word(state, 479);
noise_mant = (val >> 4) & 0xff;
noise_exp = ((val & 0xf) << 2);
val = dib7000p_read_word(state, 480);
noise_exp += ((val >> 14) & 0x3);
if ((noise_exp & 0x20) != 0)
noise_exp -= 0x40;
signal_mant = (val >> 6) & 0xFF;
signal_exp = (val & 0x3F);
if ((signal_exp & 0x20) != 0)
signal_exp -= 0x40;
if (signal_mant != 0)
result = intlog10(2) * 10 * signal_exp + 10 * intlog10(signal_mant);
else
result = intlog10(2) * 10 * signal_exp - 100;
if (noise_mant != 0)
result -= intlog10(2) * 10 * noise_exp + 10 * intlog10(noise_mant);
else
result -= intlog10(2) * 10 * noise_exp - 100;
*snr = result / ((1 << 24) / 10);
return 0;
}
static int dib7000p_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune)
{
tune->min_delay_ms = 1000;
return 0;
}
static void dib7000p_release(struct dvb_frontend *demod)
{
struct dib7000p_state *st = demod->demodulator_priv;
dibx000_exit_i2c_master(&st->i2c_master);
i2c_del_adapter(&st->dib7090_tuner_adap);
kfree(st);
}
int dib7000pc_detection(struct i2c_adapter *i2c_adap)
{
u8 *tx, *rx;
struct i2c_msg msg[2] = {
{.addr = 18 >> 1, .flags = 0, .len = 2},
{.addr = 18 >> 1, .flags = I2C_M_RD, .len = 2},
};
int ret = 0;
tx = kzalloc(2*sizeof(u8), GFP_KERNEL);
if (!tx)
return -ENOMEM;
rx = kzalloc(2*sizeof(u8), GFP_KERNEL);
if (!rx) {
ret = -ENOMEM;
goto rx_memory_error;
}
msg[0].buf = tx;
msg[1].buf = rx;
tx[0] = 0x03;
tx[1] = 0x00;
if (i2c_transfer(i2c_adap, msg, 2) == 2)
if (rx[0] == 0x01 && rx[1] == 0xb3) {
dprintk("-D- DiB7000PC detected");
return 1;
}
msg[0].addr = msg[1].addr = 0x40;
if (i2c_transfer(i2c_adap, msg, 2) == 2)
if (rx[0] == 0x01 && rx[1] == 0xb3) {
dprintk("-D- DiB7000PC detected");
return 1;
}
dprintk("-D- DiB7000PC not detected");
kfree(rx);
rx_memory_error:
kfree(tx);
return ret;
}
EXPORT_SYMBOL(dib7000pc_detection);
struct i2c_adapter *dib7000p_get_i2c_master(struct dvb_frontend *demod, enum dibx000_i2c_interface intf, int gating)
{
struct dib7000p_state *st = demod->demodulator_priv;
return dibx000_get_i2c_adapter(&st->i2c_master, intf, gating);
}
EXPORT_SYMBOL(dib7000p_get_i2c_master);
int dib7000p_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
{
struct dib7000p_state *state = fe->demodulator_priv;
u16 val = dib7000p_read_word(state, 235) & 0xffef;
val |= (onoff & 0x1) << 4;
dprintk("PID filter enabled %d", onoff);
return dib7000p_write_word(state, 235, val);
}
EXPORT_SYMBOL(dib7000p_pid_filter_ctrl);
int dib7000p_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
{
struct dib7000p_state *state = fe->demodulator_priv;
dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
return dib7000p_write_word(state, 241 + id, onoff ? (1 << 13) | pid : 0);
}
EXPORT_SYMBOL(dib7000p_pid_filter);
int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, struct dib7000p_config cfg[])
{
struct dib7000p_state *dpst;
int k = 0;
u8 new_addr = 0;
dpst = kzalloc(sizeof(struct dib7000p_state), GFP_KERNEL);
if (!dpst)
return -ENOMEM;
dpst->i2c_adap = i2c;
mutex_init(&dpst->i2c_buffer_lock);
for (k = no_of_demods - 1; k >= 0; k--) {
dpst->cfg = cfg[k];
/* designated i2c address */
if (cfg[k].default_i2c_addr != 0)
new_addr = cfg[k].default_i2c_addr + (k << 1);
else
new_addr = (0x40 + k) << 1;
dpst->i2c_addr = new_addr;
dib7000p_write_word(dpst, 1287, 0x0003); /* sram lead in, rdy */
if (dib7000p_identify(dpst) != 0) {
dpst->i2c_addr = default_addr;
dib7000p_write_word(dpst, 1287, 0x0003); /* sram lead in, rdy */
if (dib7000p_identify(dpst) != 0) {
dprintk("DiB7000P #%d: not identified\n", k);
kfree(dpst);
return -EIO;
}
}
/* start diversity to pull_down div_str - just for i2c-enumeration */
dib7000p_set_output_mode(dpst, OUTMODE_DIVERSITY);
/* set new i2c address and force divstart */
dib7000p_write_word(dpst, 1285, (new_addr << 2) | 0x2);
dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
}
for (k = 0; k < no_of_demods; k++) {
dpst->cfg = cfg[k];
if (cfg[k].default_i2c_addr != 0)
dpst->i2c_addr = (cfg[k].default_i2c_addr + k) << 1;
else
dpst->i2c_addr = (0x40 + k) << 1;
// unforce divstr
dib7000p_write_word(dpst, 1285, dpst->i2c_addr << 2);
/* deactivate div - it was just for i2c-enumeration */
dib7000p_set_output_mode(dpst, OUTMODE_HIGH_Z);
}
kfree(dpst);
return 0;
}
EXPORT_SYMBOL(dib7000p_i2c_enumeration);
static const s32 lut_1000ln_mant[] = {
6908, 6956, 7003, 7047, 7090, 7131, 7170, 7208, 7244, 7279, 7313, 7346, 7377, 7408, 7438, 7467, 7495, 7523, 7549, 7575, 7600
};
static s32 dib7000p_get_adc_power(struct dvb_frontend *fe)
{
struct dib7000p_state *state = fe->demodulator_priv;
u32 tmp_val = 0, exp = 0, mant = 0;
s32 pow_i;
u16 buf[2];
u8 ix = 0;
buf[0] = dib7000p_read_word(state, 0x184);
buf[1] = dib7000p_read_word(state, 0x185);
pow_i = (buf[0] << 16) | buf[1];
dprintk("raw pow_i = %d", pow_i);
tmp_val = pow_i;
while (tmp_val >>= 1)
exp++;
mant = (pow_i * 1000 / (1 << exp));
dprintk(" mant = %d exp = %d", mant / 1000, exp);
ix = (u8) ((mant - 1000) / 100); /* index of the LUT */
dprintk(" ix = %d", ix);
pow_i = (lut_1000ln_mant[ix] + 693 * (exp - 20) - 6908);
pow_i = (pow_i << 8) / 1000;
dprintk(" pow_i = %d", pow_i);
return pow_i;
}
static int map_addr_to_serpar_number(struct i2c_msg *msg)
{
if ((msg->buf[0] <= 15))
msg->buf[0] -= 1;
else if (msg->buf[0] == 17)
msg->buf[0] = 15;
else if (msg->buf[0] == 16)
msg->buf[0] = 17;
else if (msg->buf[0] == 19)
msg->buf[0] = 16;
else if (msg->buf[0] >= 21 && msg->buf[0] <= 25)
msg->buf[0] -= 3;
else if (msg->buf[0] == 28)
msg->buf[0] = 23;
else
return -EINVAL;
return 0;
}
static int w7090p_tuner_write_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
{
struct dib7000p_state *state = i2c_get_adapdata(i2c_adap);
u8 n_overflow = 1;
u16 i = 1000;
u16 serpar_num = msg[0].buf[0];
while (n_overflow == 1 && i) {
n_overflow = (dib7000p_read_word(state, 1984) >> 1) & 0x1;
i--;
if (i == 0)
dprintk("Tuner ITF: write busy (overflow)");
}
dib7000p_write_word(state, 1985, (1 << 6) | (serpar_num & 0x3f));
dib7000p_write_word(state, 1986, (msg[0].buf[1] << 8) | msg[0].buf[2]);
return num;
}
static int w7090p_tuner_read_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
{
struct dib7000p_state *state = i2c_get_adapdata(i2c_adap);
u8 n_overflow = 1, n_empty = 1;
u16 i = 1000;
u16 serpar_num = msg[0].buf[0];
u16 read_word;
while (n_overflow == 1 && i) {
n_overflow = (dib7000p_read_word(state, 1984) >> 1) & 0x1;
i--;
if (i == 0)
dprintk("TunerITF: read busy (overflow)");
}
dib7000p_write_word(state, 1985, (0 << 6) | (serpar_num & 0x3f));
i = 1000;
while (n_empty == 1 && i) {
n_empty = dib7000p_read_word(state, 1984) & 0x1;
i--;
if (i == 0)
dprintk("TunerITF: read busy (empty)");
}
read_word = dib7000p_read_word(state, 1987);
msg[1].buf[0] = (read_word >> 8) & 0xff;
msg[1].buf[1] = (read_word) & 0xff;
return num;
}
static int w7090p_tuner_rw_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
{
if (map_addr_to_serpar_number(&msg[0]) == 0) { /* else = Tuner regs to ignore : DIG_CFG, CTRL_RF_LT, PLL_CFG, PWM1_REG, ADCCLK, DIG_CFG_3; SLEEP_EN... */
if (num == 1) { /* write */
return w7090p_tuner_write_serpar(i2c_adap, msg, 1);
} else { /* read */
return w7090p_tuner_read_serpar(i2c_adap, msg, 2);
}
}
return num;
}
static int dib7090p_rw_on_apb(struct i2c_adapter *i2c_adap,
struct i2c_msg msg[], int num, u16 apb_address)
{
struct dib7000p_state *state = i2c_get_adapdata(i2c_adap);
u16 word;
if (num == 1) { /* write */
dib7000p_write_word(state, apb_address, ((msg[0].buf[1] << 8) | (msg[0].buf[2])));
} else {
word = dib7000p_read_word(state, apb_address);
msg[1].buf[0] = (word >> 8) & 0xff;
msg[1].buf[1] = (word) & 0xff;
}
return num;
}
static int dib7090_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
{
struct dib7000p_state *state = i2c_get_adapdata(i2c_adap);
u16 apb_address = 0, word;
int i = 0;
switch (msg[0].buf[0]) {
case 0x12:
apb_address = 1920;
break;
case 0x14:
apb_address = 1921;
break;
case 0x24:
apb_address = 1922;
break;
case 0x1a:
apb_address = 1923;
break;
case 0x22:
apb_address = 1924;
break;
case 0x33:
apb_address = 1926;
break;
case 0x34:
apb_address = 1927;
break;
case 0x35:
apb_address = 1928;
break;
case 0x36:
apb_address = 1929;
break;
case 0x37:
apb_address = 1930;
break;
case 0x38:
apb_address = 1931;
break;
case 0x39:
apb_address = 1932;
break;
case 0x2a:
apb_address = 1935;
break;
case 0x2b:
apb_address = 1936;
break;
case 0x2c:
apb_address = 1937;
break;
case 0x2d:
apb_address = 1938;
break;
case 0x2e:
apb_address = 1939;
break;
case 0x2f:
apb_address = 1940;
break;
case 0x30:
apb_address = 1941;
break;
case 0x31:
apb_address = 1942;
break;
case 0x32:
apb_address = 1943;
break;
case 0x3e:
apb_address = 1944;
break;
case 0x3f:
apb_address = 1945;
break;
case 0x40:
apb_address = 1948;
break;
case 0x25:
apb_address = 914;
break;
case 0x26:
apb_address = 915;
break;
case 0x27:
apb_address = 917;
break;
case 0x28:
apb_address = 916;
break;
case 0x1d:
i = ((dib7000p_read_word(state, 72) >> 12) & 0x3);
word = dib7000p_read_word(state, 384 + i);
msg[1].buf[0] = (word >> 8) & 0xff;
msg[1].buf[1] = (word) & 0xff;
return num;
case 0x1f:
if (num == 1) { /* write */
word = (u16) ((msg[0].buf[1] << 8) | msg[0].buf[2]);
word &= 0x3;
word = (dib7000p_read_word(state, 72) & ~(3 << 12)) | (word << 12);
dib7000p_write_word(state, 72, word); /* Set the proper input */
return num;
}
}
if (apb_address != 0) /* R/W acces via APB */
return dib7090p_rw_on_apb(i2c_adap, msg, num, apb_address);
else /* R/W access via SERPAR */
return w7090p_tuner_rw_serpar(i2c_adap, msg, num);
return 0;
}
static u32 dib7000p_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
static struct i2c_algorithm dib7090_tuner_xfer_algo = {
.master_xfer = dib7090_tuner_xfer,
.functionality = dib7000p_i2c_func,
};
struct i2c_adapter *dib7090_get_i2c_tuner(struct dvb_frontend *fe)
{
struct dib7000p_state *st = fe->demodulator_priv;
return &st->dib7090_tuner_adap;
}
EXPORT_SYMBOL(dib7090_get_i2c_tuner);
static int dib7090_host_bus_drive(struct dib7000p_state *state, u8 drive)
{
u16 reg;
/* drive host bus 2, 3, 4 */
reg = dib7000p_read_word(state, 1798) & ~((0x7) | (0x7 << 6) | (0x7 << 12));
reg |= (drive << 12) | (drive << 6) | drive;
dib7000p_write_word(state, 1798, reg);
/* drive host bus 5,6 */
reg = dib7000p_read_word(state, 1799) & ~((0x7 << 2) | (0x7 << 8));
reg |= (drive << 8) | (drive << 2);
dib7000p_write_word(state, 1799, reg);
/* drive host bus 7, 8, 9 */
reg = dib7000p_read_word(state, 1800) & ~((0x7) | (0x7 << 6) | (0x7 << 12));
reg |= (drive << 12) | (drive << 6) | drive;
dib7000p_write_word(state, 1800, reg);
/* drive host bus 10, 11 */
reg = dib7000p_read_word(state, 1801) & ~((0x7 << 2) | (0x7 << 8));
reg |= (drive << 8) | (drive << 2);
dib7000p_write_word(state, 1801, reg);
/* drive host bus 12, 13, 14 */
reg = dib7000p_read_word(state, 1802) & ~((0x7) | (0x7 << 6) | (0x7 << 12));
reg |= (drive << 12) | (drive << 6) | drive;
dib7000p_write_word(state, 1802, reg);
return 0;
}
static u32 dib7090_calcSyncFreq(u32 P_Kin, u32 P_Kout, u32 insertExtSynchro, u32 syncSize)
{
u32 quantif = 3;
u32 nom = (insertExtSynchro * P_Kin + syncSize);
u32 denom = P_Kout;
u32 syncFreq = ((nom << quantif) / denom);
if ((syncFreq & ((1 << quantif) - 1)) != 0)
syncFreq = (syncFreq >> quantif) + 1;
else
syncFreq = (syncFreq >> quantif);
if (syncFreq != 0)
syncFreq = syncFreq - 1;
return syncFreq;
}
static int dib7090_cfg_DibTx(struct dib7000p_state *state, u32 P_Kin, u32 P_Kout, u32 insertExtSynchro, u32 synchroMode, u32 syncWord, u32 syncSize)
{
dprintk("Configure DibStream Tx");
dib7000p_write_word(state, 1615, 1);
dib7000p_write_word(state, 1603, P_Kin);
dib7000p_write_word(state, 1605, P_Kout);
dib7000p_write_word(state, 1606, insertExtSynchro);
dib7000p_write_word(state, 1608, synchroMode);
dib7000p_write_word(state, 1609, (syncWord >> 16) & 0xffff);
dib7000p_write_word(state, 1610, syncWord & 0xffff);
dib7000p_write_word(state, 1612, syncSize);
dib7000p_write_word(state, 1615, 0);
return 0;
}
static int dib7090_cfg_DibRx(struct dib7000p_state *state, u32 P_Kin, u32 P_Kout, u32 synchroMode, u32 insertExtSynchro, u32 syncWord, u32 syncSize,
u32 dataOutRate)
{
u32 syncFreq;
dprintk("Configure DibStream Rx");
if ((P_Kin != 0) && (P_Kout != 0)) {
syncFreq = dib7090_calcSyncFreq(P_Kin, P_Kout, insertExtSynchro, syncSize);
dib7000p_write_word(state, 1542, syncFreq);
}
dib7000p_write_word(state, 1554, 1);
dib7000p_write_word(state, 1536, P_Kin);
dib7000p_write_word(state, 1537, P_Kout);
dib7000p_write_word(state, 1539, synchroMode);
dib7000p_write_word(state, 1540, (syncWord >> 16) & 0xffff);
dib7000p_write_word(state, 1541, syncWord & 0xffff);
dib7000p_write_word(state, 1543, syncSize);
dib7000p_write_word(state, 1544, dataOutRate);
dib7000p_write_word(state, 1554, 0);
return 0;
}
static void dib7090_enMpegMux(struct dib7000p_state *state, int onoff)
{
u16 reg_1287 = dib7000p_read_word(state, 1287);
switch (onoff) {
case 1:
reg_1287 &= ~(1<<7);
break;
case 0:
reg_1287 |= (1<<7);
break;
}
dib7000p_write_word(state, 1287, reg_1287);
}
static void dib7090_configMpegMux(struct dib7000p_state *state,
u16 pulseWidth, u16 enSerialMode, u16 enSerialClkDiv2)
{
dprintk("Enable Mpeg mux");
dib7090_enMpegMux(state, 0);
/* If the input mode is MPEG do not divide the serial clock */
if ((enSerialMode == 1) && (state->input_mode_mpeg == 1))
enSerialClkDiv2 = 0;
dib7000p_write_word(state, 1287, ((pulseWidth & 0x1f) << 2)
| ((enSerialMode & 0x1) << 1)
| (enSerialClkDiv2 & 0x1));
dib7090_enMpegMux(state, 1);
}
static void dib7090_setDibTxMux(struct dib7000p_state *state, int mode)
{
u16 reg_1288 = dib7000p_read_word(state, 1288) & ~(0x7 << 7);
switch (mode) {
case MPEG_ON_DIBTX:
dprintk("SET MPEG ON DIBSTREAM TX");
dib7090_cfg_DibTx(state, 8, 5, 0, 0, 0, 0);
reg_1288 |= (1<<9);
break;
case DIV_ON_DIBTX:
dprintk("SET DIV_OUT ON DIBSTREAM TX");
dib7090_cfg_DibTx(state, 5, 5, 0, 0, 0, 0);
reg_1288 |= (1<<8);
break;
case ADC_ON_DIBTX:
dprintk("SET ADC_OUT ON DIBSTREAM TX");
dib7090_cfg_DibTx(state, 20, 5, 10, 0, 0, 0);
reg_1288 |= (1<<7);
break;
default:
break;
}
dib7000p_write_word(state, 1288, reg_1288);
}
static void dib7090_setHostBusMux(struct dib7000p_state *state, int mode)
{
u16 reg_1288 = dib7000p_read_word(state, 1288) & ~(0x7 << 4);
switch (mode) {
case DEMOUT_ON_HOSTBUS:
dprintk("SET DEM OUT OLD INTERF ON HOST BUS");
dib7090_enMpegMux(state, 0);
reg_1288 |= (1<<6);
break;
case DIBTX_ON_HOSTBUS:
dprintk("SET DIBSTREAM TX ON HOST BUS");
dib7090_enMpegMux(state, 0);
reg_1288 |= (1<<5);
break;
case MPEG_ON_HOSTBUS:
dprintk("SET MPEG MUX ON HOST BUS");
reg_1288 |= (1<<4);
break;
default:
break;
}
dib7000p_write_word(state, 1288, reg_1288);
}
int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff)
{
struct dib7000p_state *state = fe->demodulator_priv;
u16 reg_1287;
switch (onoff) {
case 0: /* only use the internal way - not the diversity input */
dprintk("%s mode OFF : by default Enable Mpeg INPUT", __func__);
dib7090_cfg_DibRx(state, 8, 5, 0, 0, 0, 8, 0);
/* Do not divide the serial clock of MPEG MUX */
/* in SERIAL MODE in case input mode MPEG is used */
reg_1287 = dib7000p_read_word(state, 1287);
/* enSerialClkDiv2 == 1 ? */
if ((reg_1287 & 0x1) == 1) {
/* force enSerialClkDiv2 = 0 */
reg_1287 &= ~0x1;
dib7000p_write_word(state, 1287, reg_1287);
}
state->input_mode_mpeg = 1;
break;
case 1: /* both ways */
case 2: /* only the diversity input */
dprintk("%s ON : Enable diversity INPUT", __func__);
dib7090_cfg_DibRx(state, 5, 5, 0, 0, 0, 0, 0);
state->input_mode_mpeg = 0;
break;
}
dib7000p_set_diversity_in(&state->demod, onoff);
return 0;
}
static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
{
struct dib7000p_state *state = fe->demodulator_priv;
u16 outreg, smo_mode, fifo_threshold;
u8 prefer_mpeg_mux_use = 1;
int ret = 0;
dib7090_host_bus_drive(state, 1);
fifo_threshold = 1792;
smo_mode = (dib7000p_read_word(state, 235) & 0x0050) | (1 << 1);
outreg = dib7000p_read_word(state, 1286) & ~((1 << 10) | (0x7 << 6) | (1 << 1));
switch (mode) {
case OUTMODE_HIGH_Z:
outreg = 0;
break;
case OUTMODE_MPEG2_SERIAL:
if (prefer_mpeg_mux_use) {
dprintk("setting output mode TS_SERIAL using Mpeg Mux");
dib7090_configMpegMux(state, 3, 1, 1);
dib7090_setHostBusMux(state, MPEG_ON_HOSTBUS);
} else {/* Use Smooth block */
dprintk("setting output mode TS_SERIAL using Smooth bloc");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (2<<6) | (0 << 1);
}
break;
case OUTMODE_MPEG2_PAR_GATED_CLK:
if (prefer_mpeg_mux_use) {
dprintk("setting output mode TS_PARALLEL_GATED using Mpeg Mux");
dib7090_configMpegMux(state, 2, 0, 0);
dib7090_setHostBusMux(state, MPEG_ON_HOSTBUS);
} else { /* Use Smooth block */
dprintk("setting output mode TS_PARALLEL_GATED using Smooth block");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (0<<6);
}
break;
case OUTMODE_MPEG2_PAR_CONT_CLK: /* Using Smooth block only */
dprintk("setting output mode TS_PARALLEL_CONT using Smooth block");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (1<<6);
break;
case OUTMODE_MPEG2_FIFO: /* Using Smooth block because not supported by new Mpeg Mux bloc */
dprintk("setting output mode TS_FIFO using Smooth block");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (5<<6);
smo_mode |= (3 << 1);
fifo_threshold = 512;
break;
case OUTMODE_DIVERSITY:
dprintk("setting output mode MODE_DIVERSITY");
dib7090_setDibTxMux(state, DIV_ON_DIBTX);
dib7090_setHostBusMux(state, DIBTX_ON_HOSTBUS);
break;
case OUTMODE_ANALOG_ADC:
dprintk("setting output mode MODE_ANALOG_ADC");
dib7090_setDibTxMux(state, ADC_ON_DIBTX);
dib7090_setHostBusMux(state, DIBTX_ON_HOSTBUS);
break;
}
if (mode != OUTMODE_HIGH_Z)
outreg |= (1 << 10);
if (state->cfg.output_mpeg2_in_188_bytes)
smo_mode |= (1 << 5);
ret |= dib7000p_write_word(state, 235, smo_mode);
ret |= dib7000p_write_word(state, 236, fifo_threshold); /* synchronous fread */
ret |= dib7000p_write_word(state, 1286, outreg);
return ret;
}
int dib7090_tuner_sleep(struct dvb_frontend *fe, int onoff)
{
struct dib7000p_state *state = fe->demodulator_priv;
u16 en_cur_state;
dprintk("sleep dib7090: %d", onoff);
en_cur_state = dib7000p_read_word(state, 1922);
if (en_cur_state > 0xff)
state->tuner_enable = en_cur_state;
if (onoff)
en_cur_state &= 0x00ff;
else {
if (state->tuner_enable != 0)
en_cur_state = state->tuner_enable;
}
dib7000p_write_word(state, 1922, en_cur_state);
return 0;
}
EXPORT_SYMBOL(dib7090_tuner_sleep);
int dib7090_get_adc_power(struct dvb_frontend *fe)
{
return dib7000p_get_adc_power(fe);
}
EXPORT_SYMBOL(dib7090_get_adc_power);
int dib7090_slave_reset(struct dvb_frontend *fe)
{
struct dib7000p_state *state = fe->demodulator_priv;
u16 reg;
reg = dib7000p_read_word(state, 1794);
dib7000p_write_word(state, 1794, reg | (4 << 12));
dib7000p_write_word(state, 1032, 0xffff);
return 0;
}
EXPORT_SYMBOL(dib7090_slave_reset);
static struct dvb_frontend_ops dib7000p_ops;
struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg)
{
struct dvb_frontend *demod;
struct dib7000p_state *st;
st = kzalloc(sizeof(struct dib7000p_state), GFP_KERNEL);
if (st == NULL)
return NULL;
memcpy(&st->cfg, cfg, sizeof(struct dib7000p_config));
st->i2c_adap = i2c_adap;
st->i2c_addr = i2c_addr;
st->gpio_val = cfg->gpio_val;
st->gpio_dir = cfg->gpio_dir;
/* Ensure the output mode remains at the previous default if it's
* not specifically set by the caller.
*/
if ((st->cfg.output_mode != OUTMODE_MPEG2_SERIAL) && (st->cfg.output_mode != OUTMODE_MPEG2_PAR_GATED_CLK))
st->cfg.output_mode = OUTMODE_MPEG2_FIFO;
demod = &st->demod;
demod->demodulator_priv = st;
memcpy(&st->demod.ops, &dib7000p_ops, sizeof(struct dvb_frontend_ops));
mutex_init(&st->i2c_buffer_lock);
dib7000p_write_word(st, 1287, 0x0003); /* sram lead in, rdy */
if (dib7000p_identify(st) != 0)
goto error;
st->version = dib7000p_read_word(st, 897);
/* FIXME: make sure the dev.parent field is initialized, or else
request_firmware() will hit an OOPS (this should be moved somewhere
more common) */
st->i2c_master.gated_tuner_i2c_adap.dev.parent = i2c_adap->dev.parent;
/* FIXME: make sure the dev.parent field is initialized, or else
request_firmware() will hit an OOPS (this should be moved somewhere
more common) */
st->i2c_master.gated_tuner_i2c_adap.dev.parent = i2c_adap->dev.parent;
dibx000_init_i2c_master(&st->i2c_master, DIB7000P, st->i2c_adap, st->i2c_addr);
/* init 7090 tuner adapter */
strncpy(st->dib7090_tuner_adap.name, "DiB7090 tuner interface", sizeof(st->dib7090_tuner_adap.name));
st->dib7090_tuner_adap.algo = &dib7090_tuner_xfer_algo;
st->dib7090_tuner_adap.algo_data = NULL;
st->dib7090_tuner_adap.dev.parent = st->i2c_adap->dev.parent;
i2c_set_adapdata(&st->dib7090_tuner_adap, st);
i2c_add_adapter(&st->dib7090_tuner_adap);
dib7000p_demod_reset(st);
if (st->version == SOC7090) {
dib7090_set_output_mode(demod, st->cfg.output_mode);
dib7090_set_diversity_in(demod, 0);
}
return demod;
error:
kfree(st);
return NULL;
}
EXPORT_SYMBOL(dib7000p_attach);
static struct dvb_frontend_ops dib7000p_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 7000PC",
.frequency_min = 44250000,
.frequency_max = 867250000,
.frequency_stepsize = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO,
},
.release = dib7000p_release,
.init = dib7000p_wakeup,
.sleep = dib7000p_sleep,
.set_frontend = dib7000p_set_frontend,
.get_tune_settings = dib7000p_fe_get_tune_settings,
.get_frontend = dib7000p_get_frontend,
.read_status = dib7000p_read_status,
.read_ber = dib7000p_read_ber,
.read_signal_strength = dib7000p_read_signal_strength,
.read_snr = dib7000p_read_snr,
.read_ucblocks = dib7000p_read_unc_blocks,
};
MODULE_AUTHOR("Olivier Grenie <ogrenie@dibcom.fr>");
MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
MODULE_DESCRIPTION("Driver for the DiBcom 7000PC COFDM demodulator");
MODULE_LICENSE("GPL");
| gpl-2.0 |
jmztaylor/android_kernel_htc_zara | drivers/staging/comedi/drivers/adl_pci7432.c | 4897 | 6868 | /*
comedi/drivers/adl_pci7432.c
Hardware comedi driver fot PCI7432 Adlink card
Copyright (C) 2004 Michel Lachine <mike@mikelachaine.ca>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: adl_pci7432
Description: Driver for the Adlink PCI-7432 64 ch. isolated digital io board
Devices: [ADLink] PCI-7432 (adl_pci7432)
Author: Michel Lachaine <mike@mikelachaine.ca>
Status: experimental
Updated: Mon, 14 Apr 2008 15:08:14 +0100
Configuration Options:
[0] - PCI bus of device (optional)
[1] - PCI slot of device (optional)
If bus/slot is not specified, the first supported
PCI device found will be used.
*/
#include "../comedidev.h"
#include <linux/kernel.h>
#include "comedi_pci.h"
#define PCI7432_DI 0x00
#define PCI7432_DO 0x00
#define PCI_DEVICE_ID_PCI7432 0x7432
static DEFINE_PCI_DEVICE_TABLE(adl_pci7432_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7432) },
{0}
};
MODULE_DEVICE_TABLE(pci, adl_pci7432_pci_table);
struct adl_pci7432_private {
int data;
struct pci_dev *pci_dev;
};
#define devpriv ((struct adl_pci7432_private *)dev->private)
static int adl_pci7432_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int adl_pci7432_detach(struct comedi_device *dev);
static struct comedi_driver driver_adl_pci7432 = {
.driver_name = "adl_pci7432",
.module = THIS_MODULE,
.attach = adl_pci7432_attach,
.detach = adl_pci7432_detach,
};
/* Digital IO */
static int adl_pci7432_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data);
static int adl_pci7432_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data);
/* */
static int adl_pci7432_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct pci_dev *pcidev = NULL;
struct comedi_subdevice *s;
int bus, slot;
printk(KERN_INFO "comedi%d: attach adl_pci7432\n", dev->minor);
dev->board_name = "pci7432";
bus = it->options[0];
slot = it->options[1];
if (alloc_private(dev, sizeof(struct adl_pci7432_private)) < 0)
return -ENOMEM;
if (alloc_subdevices(dev, 2) < 0)
return -ENOMEM;
for_each_pci_dev(pcidev) {
if (pcidev->vendor == PCI_VENDOR_ID_ADLINK &&
pcidev->device == PCI_DEVICE_ID_PCI7432) {
if (bus || slot) {
/* requested particular bus/slot */
if (pcidev->bus->number != bus
|| PCI_SLOT(pcidev->devfn) != slot) {
continue;
}
}
devpriv->pci_dev = pcidev;
if (comedi_pci_enable(pcidev, "adl_pci7432") < 0) {
printk(KERN_ERR "comedi%d: Failed to enable PCI device and request regions\n",
dev->minor);
return -EIO;
}
dev->iobase = pci_resource_start(pcidev, 2);
printk(KERN_INFO "comedi: base addr %4lx\n",
dev->iobase);
s = dev->subdevices + 0;
s->type = COMEDI_SUBD_DI;
s->subdev_flags =
SDF_READABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = 32;
s->maxdata = 1;
s->len_chanlist = 32;
s->io_bits = 0x00000000;
s->range_table = &range_digital;
s->insn_bits = adl_pci7432_di_insn_bits;
s = dev->subdevices + 1;
s->type = COMEDI_SUBD_DO;
s->subdev_flags =
SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = 32;
s->maxdata = 1;
s->len_chanlist = 32;
s->io_bits = 0xffffffff;
s->range_table = &range_digital;
s->insn_bits = adl_pci7432_do_insn_bits;
printk(KERN_DEBUG "comedi%d: adl_pci7432 attached\n",
dev->minor);
return 1;
}
}
printk(KERN_ERR "comedi%d: no supported board found! (req. bus/slot : %d/%d)\n",
dev->minor, bus, slot);
return -EIO;
}
static int adl_pci7432_detach(struct comedi_device *dev)
{
printk(KERN_INFO "comedi%d: pci7432: remove\n", dev->minor);
if (devpriv && devpriv->pci_dev) {
if (dev->iobase)
comedi_pci_disable(devpriv->pci_dev);
pci_dev_put(devpriv->pci_dev);
}
return 0;
}
static int adl_pci7432_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
printk(KERN_DEBUG "comedi: pci7432_do_insn_bits called\n");
printk(KERN_DEBUG "comedi: data0: %8x data1: %8x\n", data[0], data[1]);
if (insn->n != 2)
return -EINVAL;
if (data[0]) {
s->state &= ~data[0];
s->state |= (data[0] & data[1]);
printk(KERN_DEBUG "comedi: out: %8x on iobase %4lx\n", s->state,
dev->iobase + PCI7432_DO);
outl(s->state & 0xffffffff, dev->iobase + PCI7432_DO);
}
return 2;
}
static int adl_pci7432_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
printk(KERN_DEBUG "comedi: pci7432_di_insn_bits called\n");
printk(KERN_DEBUG "comedi: data0: %8x data1: %8x\n", data[0], data[1]);
if (insn->n != 2)
return -EINVAL;
data[1] = inl(dev->iobase + PCI7432_DI) & 0xffffffff;
printk(KERN_DEBUG "comedi: data1 %8x\n", data[1]);
return 2;
}
static int __devinit driver_adl_pci7432_pci_probe(struct pci_dev *dev,
const struct pci_device_id
*ent)
{
return comedi_pci_auto_config(dev, driver_adl_pci7432.driver_name);
}
static void __devexit driver_adl_pci7432_pci_remove(struct pci_dev *dev)
{
comedi_pci_auto_unconfig(dev);
}
static struct pci_driver driver_adl_pci7432_pci_driver = {
.id_table = adl_pci7432_pci_table,
.probe = &driver_adl_pci7432_pci_probe,
.remove = __devexit_p(&driver_adl_pci7432_pci_remove)
};
static int __init driver_adl_pci7432_init_module(void)
{
int retval;
retval = comedi_driver_register(&driver_adl_pci7432);
if (retval < 0)
return retval;
driver_adl_pci7432_pci_driver.name =
(char *)driver_adl_pci7432.driver_name;
return pci_register_driver(&driver_adl_pci7432_pci_driver);
}
static void __exit driver_adl_pci7432_cleanup_module(void)
{
pci_unregister_driver(&driver_adl_pci7432_pci_driver);
comedi_driver_unregister(&driver_adl_pci7432);
}
module_init(driver_adl_pci7432_init_module);
module_exit(driver_adl_pci7432_cleanup_module);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
AuxXxi/caf_kernel | drivers/staging/vme/bridges/vme_ca91cx42.c | 4897 | 49589 | /*
* Support for the Tundra Universe I/II VME-PCI Bridge Chips
*
* Author: Martyn Welch <martyn.welch@ge.com>
* Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*
* Based on work by Tom Armistead and Ajit Prem
* Copyright 2004 Motorola Inc.
*
* Derived from ca91c042.c by Michael Wyrick
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/poll.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include "../vme.h"
#include "../vme_bridge.h"
#include "vme_ca91cx42.h"
static int __init ca91cx42_init(void);
static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
static void ca91cx42_remove(struct pci_dev *);
static void __exit ca91cx42_exit(void);
/* Module parameters */
static int geoid;
static const char driver_name[] = "vme_ca91cx42";
static DEFINE_PCI_DEVICE_TABLE(ca91cx42_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
{ },
};
static struct pci_driver ca91cx42_driver = {
.name = driver_name,
.id_table = ca91cx42_ids,
.probe = ca91cx42_probe,
.remove = ca91cx42_remove,
};
static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
{
wake_up(&bridge->dma_queue);
return CA91CX42_LINT_DMA;
}
static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
{
int i;
u32 serviced = 0;
for (i = 0; i < 4; i++) {
if (stat & CA91CX42_LINT_LM[i]) {
/* We only enable interrupts if the callback is set */
bridge->lm_callback[i](i);
serviced |= CA91CX42_LINT_LM[i];
}
}
return serviced;
}
/* XXX This needs to be split into 4 queues */
static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
{
wake_up(&bridge->mbox_queue);
return CA91CX42_LINT_MBOX;
}
static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
{
wake_up(&bridge->iack_queue);
return CA91CX42_LINT_SW_IACK;
}
static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
{
int val;
struct ca91cx42_driver *bridge;
bridge = ca91cx42_bridge->driver_priv;
val = ioread32(bridge->base + DGCS);
if (!(val & 0x00000800)) {
dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
"Read Error DGCS=%08X\n", val);
}
return CA91CX42_LINT_VERR;
}
static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
{
int val;
struct ca91cx42_driver *bridge;
bridge = ca91cx42_bridge->driver_priv;
val = ioread32(bridge->base + DGCS);
if (!(val & 0x00000800))
dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
"Read Error DGCS=%08X\n", val);
return CA91CX42_LINT_LERR;
}
static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
int stat)
{
int vec, i, serviced = 0;
struct ca91cx42_driver *bridge;
bridge = ca91cx42_bridge->driver_priv;
for (i = 7; i > 0; i--) {
if (stat & (1 << i)) {
vec = ioread32(bridge->base +
CA91CX42_V_STATID[i]) & 0xff;
vme_irq_handler(ca91cx42_bridge, i, vec);
serviced |= (1 << i);
}
}
return serviced;
}
static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
{
u32 stat, enable, serviced = 0;
struct vme_bridge *ca91cx42_bridge;
struct ca91cx42_driver *bridge;
ca91cx42_bridge = ptr;
bridge = ca91cx42_bridge->driver_priv;
enable = ioread32(bridge->base + LINT_EN);
stat = ioread32(bridge->base + LINT_STAT);
/* Only look at unmasked interrupts */
stat &= enable;
if (unlikely(!stat))
return IRQ_NONE;
if (stat & CA91CX42_LINT_DMA)
serviced |= ca91cx42_DMA_irqhandler(bridge);
if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
CA91CX42_LINT_LM3))
serviced |= ca91cx42_LM_irqhandler(bridge, stat);
if (stat & CA91CX42_LINT_MBOX)
serviced |= ca91cx42_MB_irqhandler(bridge, stat);
if (stat & CA91CX42_LINT_SW_IACK)
serviced |= ca91cx42_IACK_irqhandler(bridge);
if (stat & CA91CX42_LINT_VERR)
serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
if (stat & CA91CX42_LINT_LERR)
serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
CA91CX42_LINT_VIRQ7))
serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
/* Clear serviced interrupts */
iowrite32(serviced, bridge->base + LINT_STAT);
return IRQ_HANDLED;
}
static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
{
int result, tmp;
struct pci_dev *pdev;
struct ca91cx42_driver *bridge;
bridge = ca91cx42_bridge->driver_priv;
/* Need pdev */
pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
/* Initialise list for VME bus errors */
INIT_LIST_HEAD(&ca91cx42_bridge->vme_errors);
mutex_init(&ca91cx42_bridge->irq_mtx);
/* Disable interrupts from PCI to VME */
iowrite32(0, bridge->base + VINT_EN);
/* Disable PCI interrupts */
iowrite32(0, bridge->base + LINT_EN);
/* Clear Any Pending PCI Interrupts */
iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
driver_name, ca91cx42_bridge);
if (result) {
dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
pdev->irq);
return result;
}
/* Ensure all interrupts are mapped to PCI Interrupt 0 */
iowrite32(0, bridge->base + LINT_MAP0);
iowrite32(0, bridge->base + LINT_MAP1);
iowrite32(0, bridge->base + LINT_MAP2);
/* Enable DMA, mailbox & LM Interrupts */
tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
iowrite32(tmp, bridge->base + LINT_EN);
return 0;
}
static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
struct pci_dev *pdev)
{
/* Disable interrupts from PCI to VME */
iowrite32(0, bridge->base + VINT_EN);
/* Disable PCI interrupts */
iowrite32(0, bridge->base + LINT_EN);
/* Clear Any Pending PCI Interrupts */
iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
free_irq(pdev->irq, pdev);
}
static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level)
{
u32 tmp;
tmp = ioread32(bridge->base + LINT_STAT);
if (tmp & (1 << level))
return 0;
else
return 1;
}
/*
* Set up an VME interrupt
*/
static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
int state, int sync)
{
struct pci_dev *pdev;
u32 tmp;
struct ca91cx42_driver *bridge;
bridge = ca91cx42_bridge->driver_priv;
/* Enable IRQ level */
tmp = ioread32(bridge->base + LINT_EN);
if (state == 0)
tmp &= ~CA91CX42_LINT_VIRQ[level];
else
tmp |= CA91CX42_LINT_VIRQ[level];
iowrite32(tmp, bridge->base + LINT_EN);
if ((state == 0) && (sync != 0)) {
pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
dev);
synchronize_irq(pdev->irq);
}
}
static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
int statid)
{
u32 tmp;
struct ca91cx42_driver *bridge;
bridge = ca91cx42_bridge->driver_priv;
/* Universe can only generate even vectors */
if (statid & 1)
return -EINVAL;
mutex_lock(&bridge->vme_int);
tmp = ioread32(bridge->base + VINT_EN);
/* Set Status/ID */
iowrite32(statid << 24, bridge->base + STATID);
/* Assert VMEbus IRQ */
tmp = tmp | (1 << (level + 24));
iowrite32(tmp, bridge->base + VINT_EN);
/* Wait for IACK */
wait_event_interruptible(bridge->iack_queue,
ca91cx42_iack_received(bridge, level));
/* Return interrupt to low state */
tmp = ioread32(bridge->base + VINT_EN);
tmp = tmp & ~(1 << (level + 24));
iowrite32(tmp, bridge->base + VINT_EN);
mutex_unlock(&bridge->vme_int);
return 0;
}
static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
dma_addr_t pci_base, u32 aspace, u32 cycle)
{
unsigned int i, addr = 0, granularity;
unsigned int temp_ctl = 0;
unsigned int vme_bound, pci_offset;
struct vme_bridge *ca91cx42_bridge;
struct ca91cx42_driver *bridge;
ca91cx42_bridge = image->parent;
bridge = ca91cx42_bridge->driver_priv;
i = image->number;
switch (aspace) {
case VME_A16:
addr |= CA91CX42_VSI_CTL_VAS_A16;
break;
case VME_A24:
addr |= CA91CX42_VSI_CTL_VAS_A24;
break;
case VME_A32:
addr |= CA91CX42_VSI_CTL_VAS_A32;
break;
case VME_USER1:
addr |= CA91CX42_VSI_CTL_VAS_USER1;
break;
case VME_USER2:
addr |= CA91CX42_VSI_CTL_VAS_USER2;
break;
case VME_A64:
case VME_CRCSR:
case VME_USER3:
case VME_USER4:
default:
dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
return -EINVAL;
break;
}
/*
* Bound address is a valid address for the window, adjust
* accordingly
*/
vme_bound = vme_base + size;
pci_offset = pci_base - vme_base;
if ((i == 0) || (i == 4))
granularity = 0x1000;
else
granularity = 0x10000;
if (vme_base & (granularity - 1)) {
dev_err(ca91cx42_bridge->parent, "Invalid VME base "
"alignment\n");
return -EINVAL;
}
if (vme_bound & (granularity - 1)) {
dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
"alignment\n");
return -EINVAL;
}
if (pci_offset & (granularity - 1)) {
dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
"alignment\n");
return -EINVAL;
}
/* Disable while we are mucking around */
temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
temp_ctl &= ~CA91CX42_VSI_CTL_EN;
iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
/* Setup mapping */
iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
/* Setup address space */
temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
temp_ctl |= addr;
/* Setup cycle types */
temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
if (cycle & VME_SUPER)
temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
if (cycle & VME_USER)
temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
if (cycle & VME_PROG)
temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
if (cycle & VME_DATA)
temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
/* Write ctl reg without enable */
iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
if (enabled)
temp_ctl |= CA91CX42_VSI_CTL_EN;
iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
return 0;
}
static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
{
unsigned int i, granularity = 0, ctl = 0;
unsigned long long vme_bound, pci_offset;
struct ca91cx42_driver *bridge;
bridge = image->parent->driver_priv;
i = image->number;
if ((i == 0) || (i == 4))
granularity = 0x1000;
else
granularity = 0x10000;
/* Read Registers */
ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
*vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
*pci_base = (dma_addr_t)vme_base + pci_offset;
*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
*enabled = 0;
*aspace = 0;
*cycle = 0;
if (ctl & CA91CX42_VSI_CTL_EN)
*enabled = 1;
if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
*aspace = VME_A16;
if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
*aspace = VME_A24;
if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
*aspace = VME_A32;
if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
*aspace = VME_USER1;
if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
*aspace = VME_USER2;
if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
*cycle |= VME_SUPER;
if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
*cycle |= VME_USER;
if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
*cycle |= VME_PROG;
if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
*cycle |= VME_DATA;
return 0;
}
/*
* Allocate and map PCI Resource
*/
static int ca91cx42_alloc_resource(struct vme_master_resource *image,
unsigned long long size)
{
unsigned long long existing_size;
int retval = 0;
struct pci_dev *pdev;
struct vme_bridge *ca91cx42_bridge;
ca91cx42_bridge = image->parent;
/* Find pci_dev container of dev */
if (ca91cx42_bridge->parent == NULL) {
dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
return -EINVAL;
}
pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
existing_size = (unsigned long long)(image->bus_resource.end -
image->bus_resource.start);
/* If the existing size is OK, return */
if (existing_size == (size - 1))
return 0;
if (existing_size != 0) {
iounmap(image->kern_base);
image->kern_base = NULL;
kfree(image->bus_resource.name);
release_resource(&image->bus_resource);
memset(&image->bus_resource, 0, sizeof(struct resource));
}
if (image->bus_resource.name == NULL) {
image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
if (image->bus_resource.name == NULL) {
dev_err(ca91cx42_bridge->parent, "Unable to allocate "
"memory for resource name\n");
retval = -ENOMEM;
goto err_name;
}
}
sprintf((char *)image->bus_resource.name, "%s.%d",
ca91cx42_bridge->name, image->number);
image->bus_resource.start = 0;
image->bus_resource.end = (unsigned long)size;
image->bus_resource.flags = IORESOURCE_MEM;
retval = pci_bus_alloc_resource(pdev->bus,
&image->bus_resource, size, size, PCIBIOS_MIN_MEM,
0, NULL, NULL);
if (retval) {
dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
"resource for window %d size 0x%lx start 0x%lx\n",
image->number, (unsigned long)size,
(unsigned long)image->bus_resource.start);
goto err_resource;
}
image->kern_base = ioremap_nocache(
image->bus_resource.start, size);
if (image->kern_base == NULL) {
dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
goto err_remap;
}
return 0;
err_remap:
release_resource(&image->bus_resource);
err_resource:
kfree(image->bus_resource.name);
memset(&image->bus_resource, 0, sizeof(struct resource));
err_name:
return retval;
}
/*
* Free and unmap PCI Resource
*/
static void ca91cx42_free_resource(struct vme_master_resource *image)
{
iounmap(image->kern_base);
image->kern_base = NULL;
release_resource(&image->bus_resource);
kfree(image->bus_resource.name);
memset(&image->bus_resource, 0, sizeof(struct resource));
}
static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size, u32 aspace,
u32 cycle, u32 dwidth)
{
int retval = 0;
unsigned int i, granularity = 0;
unsigned int temp_ctl = 0;
unsigned long long pci_bound, vme_offset, pci_base;
struct vme_bridge *ca91cx42_bridge;
struct ca91cx42_driver *bridge;
ca91cx42_bridge = image->parent;
bridge = ca91cx42_bridge->driver_priv;
i = image->number;
if ((i == 0) || (i == 4))
granularity = 0x1000;
else
granularity = 0x10000;
/* Verify input data */
if (vme_base & (granularity - 1)) {
dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
"alignment\n");
retval = -EINVAL;
goto err_window;
}
if (size & (granularity - 1)) {
dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
"alignment\n");
retval = -EINVAL;
goto err_window;
}
spin_lock(&image->lock);
/*
* Let's allocate the resource here rather than further up the stack as
* it avoids pushing loads of bus dependent stuff up the stack
*/
retval = ca91cx42_alloc_resource(image, size);
if (retval) {
spin_unlock(&image->lock);
dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
"for resource name\n");
retval = -ENOMEM;
goto err_res;
}
pci_base = (unsigned long long)image->bus_resource.start;
/*
* Bound address is a valid address for the window, adjust
* according to window granularity.
*/
pci_bound = pci_base + size;
vme_offset = vme_base - pci_base;
/* Disable while we are mucking around */
temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
temp_ctl &= ~CA91CX42_LSI_CTL_EN;
iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
/* Setup cycle types */
temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
if (cycle & VME_BLT)
temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
if (cycle & VME_MBLT)
temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
/* Setup data width */
temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
switch (dwidth) {
case VME_D8:
temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
break;
case VME_D16:
temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
break;
case VME_D32:
temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
break;
case VME_D64:
temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
break;
default:
spin_unlock(&image->lock);
dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
retval = -EINVAL;
goto err_dwidth;
break;
}
/* Setup address space */
temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
switch (aspace) {
case VME_A16:
temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
break;
case VME_A24:
temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
break;
case VME_A32:
temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
break;
case VME_CRCSR:
temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
break;
case VME_USER1:
temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
break;
case VME_USER2:
temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
break;
case VME_A64:
case VME_USER3:
case VME_USER4:
default:
spin_unlock(&image->lock);
dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
retval = -EINVAL;
goto err_aspace;
break;
}
temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
if (cycle & VME_SUPER)
temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
if (cycle & VME_PROG)
temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
/* Setup mapping */
iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
/* Write ctl reg without enable */
iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
if (enabled)
temp_ctl |= CA91CX42_LSI_CTL_EN;
iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
spin_unlock(&image->lock);
return 0;
err_aspace:
err_dwidth:
ca91cx42_free_resource(image);
err_res:
err_window:
return retval;
}
static int __ca91cx42_master_get(struct vme_master_resource *image,
int *enabled, unsigned long long *vme_base, unsigned long long *size,
u32 *aspace, u32 *cycle, u32 *dwidth)
{
unsigned int i, ctl;
unsigned long long pci_base, pci_bound, vme_offset;
struct ca91cx42_driver *bridge;
bridge = image->parent->driver_priv;
i = image->number;
ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
*vme_base = pci_base + vme_offset;
*size = (unsigned long long)(pci_bound - pci_base);
*enabled = 0;
*aspace = 0;
*cycle = 0;
*dwidth = 0;
if (ctl & CA91CX42_LSI_CTL_EN)
*enabled = 1;
/* Setup address space */
switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
case CA91CX42_LSI_CTL_VAS_A16:
*aspace = VME_A16;
break;
case CA91CX42_LSI_CTL_VAS_A24:
*aspace = VME_A24;
break;
case CA91CX42_LSI_CTL_VAS_A32:
*aspace = VME_A32;
break;
case CA91CX42_LSI_CTL_VAS_CRCSR:
*aspace = VME_CRCSR;
break;
case CA91CX42_LSI_CTL_VAS_USER1:
*aspace = VME_USER1;
break;
case CA91CX42_LSI_CTL_VAS_USER2:
*aspace = VME_USER2;
break;
}
/* XXX Not sure howto check for MBLT */
/* Setup cycle types */
if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
*cycle |= VME_BLT;
else
*cycle |= VME_SCT;
if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
*cycle |= VME_SUPER;
else
*cycle |= VME_USER;
if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
*cycle = VME_PROG;
else
*cycle = VME_DATA;
/* Setup data width */
switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
case CA91CX42_LSI_CTL_VDW_D8:
*dwidth = VME_D8;
break;
case CA91CX42_LSI_CTL_VDW_D16:
*dwidth = VME_D16;
break;
case CA91CX42_LSI_CTL_VDW_D32:
*dwidth = VME_D32;
break;
case CA91CX42_LSI_CTL_VDW_D64:
*dwidth = VME_D64;
break;
}
return 0;
}
static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
u32 *cycle, u32 *dwidth)
{
int retval;
spin_lock(&image->lock);
retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
cycle, dwidth);
spin_unlock(&image->lock);
return retval;
}
static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
void *buf, size_t count, loff_t offset)
{
ssize_t retval;
void *addr = image->kern_base + offset;
unsigned int done = 0;
unsigned int count32;
if (count == 0)
return 0;
spin_lock(&image->lock);
/* The following code handles VME address alignment problem
* in order to assure the maximal data width cycle.
* We cannot use memcpy_xxx directly here because it
* may cut data transfer in 8-bits cycles, thus making
* D16 cycle impossible.
* From the other hand, the bridge itself assures that
* maximal configured data cycle is used and splits it
* automatically for non-aligned addresses.
*/
if ((uintptr_t)addr & 0x1) {
*(u8 *)buf = ioread8(addr);
done += 1;
if (done == count)
goto out;
}
if ((uintptr_t)addr & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
goto out;
} else {
*(u16 *)(buf + done) = ioread16(addr + done);
done += 2;
}
}
count32 = (count - done) & ~0x3;
if (count32 > 0) {
memcpy_fromio(buf + done, addr + done, (unsigned int)count);
done += count32;
}
if ((count - done) & 0x2) {
*(u16 *)(buf + done) = ioread16(addr + done);
done += 2;
}
if ((count - done) & 0x1) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
}
out:
retval = count;
spin_unlock(&image->lock);
return retval;
}
static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
void *buf, size_t count, loff_t offset)
{
ssize_t retval;
void *addr = image->kern_base + offset;
unsigned int done = 0;
unsigned int count32;
if (count == 0)
return 0;
spin_lock(&image->lock);
/* Here we apply for the same strategy we do in master_read
* function in order to assure D16 cycle when required.
*/
if ((uintptr_t)addr & 0x1) {
iowrite8(*(u8 *)buf, addr);
done += 1;
if (done == count)
goto out;
}
if ((uintptr_t)addr & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
goto out;
} else {
iowrite16(*(u16 *)(buf + done), addr + done);
done += 2;
}
}
count32 = (count - done) & ~0x3;
if (count32 > 0) {
memcpy_toio(addr + done, buf + done, count32);
done += count32;
}
if ((count - done) & 0x2) {
iowrite16(*(u16 *)(buf + done), addr + done);
done += 2;
}
if ((count - done) & 0x1) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
}
out:
retval = count;
spin_unlock(&image->lock);
return retval;
}
static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
unsigned int mask, unsigned int compare, unsigned int swap,
loff_t offset)
{
u32 result;
uintptr_t pci_addr;
int i;
struct ca91cx42_driver *bridge;
struct device *dev;
bridge = image->parent->driver_priv;
dev = image->parent->parent;
/* Find the PCI address that maps to the desired VME address */
i = image->number;
/* Locking as we can only do one of these at a time */
mutex_lock(&bridge->vme_rmw);
/* Lock image */
spin_lock(&image->lock);
pci_addr = (uintptr_t)image->kern_base + offset;
/* Address must be 4-byte aligned */
if (pci_addr & 0x3) {
dev_err(dev, "RMW Address not 4-byte aligned\n");
result = -EINVAL;
goto out;
}
/* Ensure RMW Disabled whilst configuring */
iowrite32(0, bridge->base + SCYC_CTL);
/* Configure registers */
iowrite32(mask, bridge->base + SCYC_EN);
iowrite32(compare, bridge->base + SCYC_CMP);
iowrite32(swap, bridge->base + SCYC_SWP);
iowrite32(pci_addr, bridge->base + SCYC_ADDR);
/* Enable RMW */
iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
/* Kick process off with a read to the required address. */
result = ioread32(image->kern_base + offset);
/* Disable RMW */
iowrite32(0, bridge->base + SCYC_CTL);
out:
spin_unlock(&image->lock);
mutex_unlock(&bridge->vme_rmw);
return result;
}
static int ca91cx42_dma_list_add(struct vme_dma_list *list,
struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
{
struct ca91cx42_dma_entry *entry, *prev;
struct vme_dma_pci *pci_attr;
struct vme_dma_vme *vme_attr;
dma_addr_t desc_ptr;
int retval = 0;
struct device *dev;
dev = list->parent->parent->parent;
/* XXX descriptor must be aligned on 64-bit boundaries */
entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
if (entry == NULL) {
dev_err(dev, "Failed to allocate memory for dma resource "
"structure\n");
retval = -ENOMEM;
goto err_mem;
}
/* Test descriptor alignment */
if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
"required: %p\n", &entry->descriptor);
retval = -EINVAL;
goto err_align;
}
memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor));
if (dest->type == VME_DMA_VME) {
entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
vme_attr = dest->private;
pci_attr = src->private;
} else {
vme_attr = src->private;
pci_attr = dest->private;
}
/* Check we can do fulfill required attributes */
if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
VME_USER2)) != 0) {
dev_err(dev, "Unsupported cycle type\n");
retval = -EINVAL;
goto err_aspace;
}
if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
VME_PROG | VME_DATA)) != 0) {
dev_err(dev, "Unsupported cycle type\n");
retval = -EINVAL;
goto err_cycle;
}
/* Check to see if we can fulfill source and destination */
if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
dev_err(dev, "Cannot perform transfer with this "
"source-destination combination\n");
retval = -EINVAL;
goto err_direct;
}
/* Setup cycle types */
if (vme_attr->cycle & VME_BLT)
entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
/* Setup data width */
switch (vme_attr->dwidth) {
case VME_D8:
entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
break;
case VME_D16:
entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
break;
case VME_D32:
entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
break;
case VME_D64:
entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
break;
default:
dev_err(dev, "Invalid data width\n");
return -EINVAL;
}
/* Setup address space */
switch (vme_attr->aspace) {
case VME_A16:
entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
break;
case VME_A24:
entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
break;
case VME_A32:
entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
break;
case VME_USER1:
entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
break;
case VME_USER2:
entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
break;
default:
dev_err(dev, "Invalid address space\n");
return -EINVAL;
break;
}
if (vme_attr->cycle & VME_SUPER)
entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
if (vme_attr->cycle & VME_PROG)
entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
entry->descriptor.dtbc = count;
entry->descriptor.dla = pci_attr->address;
entry->descriptor.dva = vme_attr->address;
entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
/* Add to list */
list_add_tail(&entry->list, &list->entries);
/* Fill out previous descriptors "Next Address" */
if (entry->list.prev != &list->entries) {
prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
list);
/* We need the bus address for the pointer */
desc_ptr = virt_to_bus(&entry->descriptor);
prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
}
return 0;
err_cycle:
err_aspace:
err_direct:
err_align:
kfree(entry);
err_mem:
return retval;
}
static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
{
u32 tmp;
struct ca91cx42_driver *bridge;
bridge = ca91cx42_bridge->driver_priv;
tmp = ioread32(bridge->base + DGCS);
if (tmp & CA91CX42_DGCS_ACT)
return 0;
else
return 1;
}
static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
{
struct vme_dma_resource *ctrlr;
struct ca91cx42_dma_entry *entry;
int retval = 0;
dma_addr_t bus_addr;
u32 val;
struct device *dev;
struct ca91cx42_driver *bridge;
ctrlr = list->parent;
bridge = ctrlr->parent->driver_priv;
dev = ctrlr->parent->parent;
mutex_lock(&ctrlr->mtx);
if (!(list_empty(&ctrlr->running))) {
/*
* XXX We have an active DMA transfer and currently haven't
* sorted out the mechanism for "pending" DMA transfers.
* Return busy.
*/
/* Need to add to pending here */
mutex_unlock(&ctrlr->mtx);
return -EBUSY;
} else {
list_add(&list->list, &ctrlr->running);
}
/* Get first bus address and write into registers */
entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
list);
bus_addr = virt_to_bus(&entry->descriptor);
mutex_unlock(&ctrlr->mtx);
iowrite32(0, bridge->base + DTBC);
iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
/* Start the operation */
val = ioread32(bridge->base + DGCS);
/* XXX Could set VMEbus On and Off Counters here */
val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
CA91CX42_DGCS_PERR);
iowrite32(val, bridge->base + DGCS);
val |= CA91CX42_DGCS_GO;
iowrite32(val, bridge->base + DGCS);
wait_event_interruptible(bridge->dma_queue,
ca91cx42_dma_busy(ctrlr->parent));
/*
* Read status register, this register is valid until we kick off a
* new transfer.
*/
val = ioread32(bridge->base + DGCS);
if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
CA91CX42_DGCS_PERR)) {
dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
val = ioread32(bridge->base + DCTL);
}
/* Remove list from running list */
mutex_lock(&ctrlr->mtx);
list_del(&list->list);
mutex_unlock(&ctrlr->mtx);
return retval;
}
static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
{
struct list_head *pos, *temp;
struct ca91cx42_dma_entry *entry;
/* detach and free each entry */
list_for_each_safe(pos, temp, &list->entries) {
list_del(pos);
entry = list_entry(pos, struct ca91cx42_dma_entry, list);
kfree(entry);
}
return 0;
}
/*
* All 4 location monitors reside at the same base - this is therefore a
* system wide configuration.
*
* This does not enable the LM monitor - that should be done when the first
* callback is attached and disabled when the last callback is removed.
*/
static int ca91cx42_lm_set(struct vme_lm_resource *lm,
unsigned long long lm_base, u32 aspace, u32 cycle)
{
u32 temp_base, lm_ctl = 0;
int i;
struct ca91cx42_driver *bridge;
struct device *dev;
bridge = lm->parent->driver_priv;
dev = lm->parent->parent;
/* Check the alignment of the location monitor */
temp_base = (u32)lm_base;
if (temp_base & 0xffff) {
dev_err(dev, "Location monitor must be aligned to 64KB "
"boundary");
return -EINVAL;
}
mutex_lock(&lm->mtx);
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i] != NULL) {
mutex_unlock(&lm->mtx);
dev_err(dev, "Location monitor callback attached, "
"can't reset\n");
return -EBUSY;
}
}
switch (aspace) {
case VME_A16:
lm_ctl |= CA91CX42_LM_CTL_AS_A16;
break;
case VME_A24:
lm_ctl |= CA91CX42_LM_CTL_AS_A24;
break;
case VME_A32:
lm_ctl |= CA91CX42_LM_CTL_AS_A32;
break;
default:
mutex_unlock(&lm->mtx);
dev_err(dev, "Invalid address space\n");
return -EINVAL;
break;
}
if (cycle & VME_SUPER)
lm_ctl |= CA91CX42_LM_CTL_SUPR;
if (cycle & VME_USER)
lm_ctl |= CA91CX42_LM_CTL_NPRIV;
if (cycle & VME_PROG)
lm_ctl |= CA91CX42_LM_CTL_PGM;
if (cycle & VME_DATA)
lm_ctl |= CA91CX42_LM_CTL_DATA;
iowrite32(lm_base, bridge->base + LM_BS);
iowrite32(lm_ctl, bridge->base + LM_CTL);
mutex_unlock(&lm->mtx);
return 0;
}
/* Get configuration of the callback monitor and return whether it is enabled
* or disabled.
*/
static int ca91cx42_lm_get(struct vme_lm_resource *lm,
unsigned long long *lm_base, u32 *aspace, u32 *cycle)
{
u32 lm_ctl, enabled = 0;
struct ca91cx42_driver *bridge;
bridge = lm->parent->driver_priv;
mutex_lock(&lm->mtx);
*lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
lm_ctl = ioread32(bridge->base + LM_CTL);
if (lm_ctl & CA91CX42_LM_CTL_EN)
enabled = 1;
if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
*aspace = VME_A16;
if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
*aspace = VME_A24;
if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
*aspace = VME_A32;
*cycle = 0;
if (lm_ctl & CA91CX42_LM_CTL_SUPR)
*cycle |= VME_SUPER;
if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
*cycle |= VME_USER;
if (lm_ctl & CA91CX42_LM_CTL_PGM)
*cycle |= VME_PROG;
if (lm_ctl & CA91CX42_LM_CTL_DATA)
*cycle |= VME_DATA;
mutex_unlock(&lm->mtx);
return enabled;
}
/*
* Attach a callback to a specific location monitor.
*
* Callback will be passed the monitor triggered.
*/
static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
void (*callback)(int))
{
u32 lm_ctl, tmp;
struct ca91cx42_driver *bridge;
struct device *dev;
bridge = lm->parent->driver_priv;
dev = lm->parent->parent;
mutex_lock(&lm->mtx);
/* Ensure that the location monitor is configured - need PGM or DATA */
lm_ctl = ioread32(bridge->base + LM_CTL);
if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
mutex_unlock(&lm->mtx);
dev_err(dev, "Location monitor not properly configured\n");
return -EINVAL;
}
/* Check that a callback isn't already attached */
if (bridge->lm_callback[monitor] != NULL) {
mutex_unlock(&lm->mtx);
dev_err(dev, "Existing callback attached\n");
return -EBUSY;
}
/* Attach callback */
bridge->lm_callback[monitor] = callback;
/* Enable Location Monitor interrupt */
tmp = ioread32(bridge->base + LINT_EN);
tmp |= CA91CX42_LINT_LM[monitor];
iowrite32(tmp, bridge->base + LINT_EN);
/* Ensure that global Location Monitor Enable set */
if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
lm_ctl |= CA91CX42_LM_CTL_EN;
iowrite32(lm_ctl, bridge->base + LM_CTL);
}
mutex_unlock(&lm->mtx);
return 0;
}
/*
* Detach a callback function forn a specific location monitor.
*/
static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
{
u32 tmp;
struct ca91cx42_driver *bridge;
bridge = lm->parent->driver_priv;
mutex_lock(&lm->mtx);
/* Disable Location Monitor and ensure previous interrupts are clear */
tmp = ioread32(bridge->base + LINT_EN);
tmp &= ~CA91CX42_LINT_LM[monitor];
iowrite32(tmp, bridge->base + LINT_EN);
iowrite32(CA91CX42_LINT_LM[monitor],
bridge->base + LINT_STAT);
/* Detach callback */
bridge->lm_callback[monitor] = NULL;
/* If all location monitors disabled, disable global Location Monitor */
if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
CA91CX42_LINT_LM3)) == 0) {
tmp = ioread32(bridge->base + LM_CTL);
tmp &= ~CA91CX42_LM_CTL_EN;
iowrite32(tmp, bridge->base + LM_CTL);
}
mutex_unlock(&lm->mtx);
return 0;
}
static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
{
u32 slot = 0;
struct ca91cx42_driver *bridge;
bridge = ca91cx42_bridge->driver_priv;
if (!geoid) {
slot = ioread32(bridge->base + VCSR_BS);
slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
} else
slot = geoid;
return (int)slot;
}
void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
dma_addr_t *dma)
{
struct pci_dev *pdev;
/* Find pci_dev container of dev */
pdev = container_of(parent, struct pci_dev, dev);
return pci_alloc_consistent(pdev, size, dma);
}
void ca91cx42_free_consistent(struct device *parent, size_t size, void *vaddr,
dma_addr_t dma)
{
struct pci_dev *pdev;
/* Find pci_dev container of dev */
pdev = container_of(parent, struct pci_dev, dev);
pci_free_consistent(pdev, size, vaddr, dma);
}
static int __init ca91cx42_init(void)
{
return pci_register_driver(&ca91cx42_driver);
}
/*
* Configure CR/CSR space
*
* Access to the CR/CSR can be configured at power-up. The location of the
* CR/CSR registers in the CR/CSR address space is determined by the boards
* Auto-ID or Geographic address. This function ensures that the window is
* enabled at an offset consistent with the boards geopgraphic address.
*/
static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
struct pci_dev *pdev)
{
unsigned int crcsr_addr;
int tmp, slot;
struct ca91cx42_driver *bridge;
bridge = ca91cx42_bridge->driver_priv;
slot = ca91cx42_slot_get(ca91cx42_bridge);
/* Write CSR Base Address if slot ID is supplied as a module param */
if (geoid)
iowrite32(geoid << 27, bridge->base + VCSR_BS);
dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
if (slot == 0) {
dev_err(&pdev->dev, "Slot number is unset, not configuring "
"CR/CSR space\n");
return -EINVAL;
}
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
&bridge->crcsr_bus);
if (bridge->crcsr_kernel == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
"image\n");
return -ENOMEM;
}
memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
crcsr_addr = slot * (512 * 1024);
iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
tmp = ioread32(bridge->base + VCSR_CTL);
tmp |= CA91CX42_VCSR_CTL_EN;
iowrite32(tmp, bridge->base + VCSR_CTL);
return 0;
}
static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
struct pci_dev *pdev)
{
u32 tmp;
struct ca91cx42_driver *bridge;
bridge = ca91cx42_bridge->driver_priv;
/* Turn off CR/CSR space */
tmp = ioread32(bridge->base + VCSR_CTL);
tmp &= ~CA91CX42_VCSR_CTL_EN;
iowrite32(tmp, bridge->base + VCSR_CTL);
/* Free image */
iowrite32(0, bridge->base + VCSR_TO);
pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
bridge->crcsr_bus);
}
static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int retval, i;
u32 data;
struct list_head *pos = NULL;
struct vme_bridge *ca91cx42_bridge;
struct ca91cx42_driver *ca91cx42_device;
struct vme_master_resource *master_image;
struct vme_slave_resource *slave_image;
struct vme_dma_resource *dma_ctrlr;
struct vme_lm_resource *lm;
/* We want to support more than one of each bridge so we need to
* dynamically allocate the bridge structure
*/
ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
if (ca91cx42_bridge == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
"structure\n");
retval = -ENOMEM;
goto err_struct;
}
ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
if (ca91cx42_device == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
"structure\n");
retval = -ENOMEM;
goto err_driver;
}
ca91cx42_bridge->driver_priv = ca91cx42_device;
/* Enable the device */
retval = pci_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "Unable to enable device\n");
goto err_enable;
}
/* Map Registers */
retval = pci_request_regions(pdev, driver_name);
if (retval) {
dev_err(&pdev->dev, "Unable to reserve resources\n");
goto err_resource;
}
/* map registers in BAR 0 */
ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
4096);
if (!ca91cx42_device->base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
goto err_remap;
}
/* Check to see if the mapping worked out */
data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
if (data != PCI_VENDOR_ID_TUNDRA) {
dev_err(&pdev->dev, "PCI_ID check failed\n");
retval = -EIO;
goto err_test;
}
/* Initialize wait queues & mutual exclusion flags */
init_waitqueue_head(&ca91cx42_device->dma_queue);
init_waitqueue_head(&ca91cx42_device->iack_queue);
mutex_init(&ca91cx42_device->vme_int);
mutex_init(&ca91cx42_device->vme_rmw);
ca91cx42_bridge->parent = &pdev->dev;
strcpy(ca91cx42_bridge->name, driver_name);
/* Setup IRQ */
retval = ca91cx42_irq_init(ca91cx42_bridge);
if (retval != 0) {
dev_err(&pdev->dev, "Chip Initialization failed.\n");
goto err_irq;
}
/* Add master windows to list */
INIT_LIST_HEAD(&ca91cx42_bridge->master_resources);
for (i = 0; i < CA91C142_MAX_MASTER; i++) {
master_image = kmalloc(sizeof(struct vme_master_resource),
GFP_KERNEL);
if (master_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"master resource structure\n");
retval = -ENOMEM;
goto err_master;
}
master_image->parent = ca91cx42_bridge;
spin_lock_init(&master_image->lock);
master_image->locked = 0;
master_image->number = i;
master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
VME_CRCSR | VME_USER1 | VME_USER2;
master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
VME_SUPER | VME_USER | VME_PROG | VME_DATA;
master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
memset(&master_image->bus_resource, 0,
sizeof(struct resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
&ca91cx42_bridge->master_resources);
}
/* Add slave windows to list */
INIT_LIST_HEAD(&ca91cx42_bridge->slave_resources);
for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
slave_image = kmalloc(sizeof(struct vme_slave_resource),
GFP_KERNEL);
if (slave_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"slave resource structure\n");
retval = -ENOMEM;
goto err_slave;
}
slave_image->parent = ca91cx42_bridge;
mutex_init(&slave_image->mtx);
slave_image->locked = 0;
slave_image->number = i;
slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
VME_USER2;
/* Only windows 0 and 4 support A16 */
if (i == 0 || i == 4)
slave_image->address_attr |= VME_A16;
slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
VME_SUPER | VME_USER | VME_PROG | VME_DATA;
list_add_tail(&slave_image->list,
&ca91cx42_bridge->slave_resources);
}
/* Add dma engines to list */
INIT_LIST_HEAD(&ca91cx42_bridge->dma_resources);
for (i = 0; i < CA91C142_MAX_DMA; i++) {
dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
GFP_KERNEL);
if (dma_ctrlr == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"dma resource structure\n");
retval = -ENOMEM;
goto err_dma;
}
dma_ctrlr->parent = ca91cx42_bridge;
mutex_init(&dma_ctrlr->mtx);
dma_ctrlr->locked = 0;
dma_ctrlr->number = i;
dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
VME_DMA_MEM_TO_VME;
INIT_LIST_HEAD(&dma_ctrlr->pending);
INIT_LIST_HEAD(&dma_ctrlr->running);
list_add_tail(&dma_ctrlr->list,
&ca91cx42_bridge->dma_resources);
}
/* Add location monitor to list */
INIT_LIST_HEAD(&ca91cx42_bridge->lm_resources);
lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
if (lm == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"location monitor resource structure\n");
retval = -ENOMEM;
goto err_lm;
}
lm->parent = ca91cx42_bridge;
mutex_init(&lm->mtx);
lm->locked = 0;
lm->number = 1;
lm->monitors = 4;
list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
ca91cx42_bridge->slave_get = ca91cx42_slave_get;
ca91cx42_bridge->slave_set = ca91cx42_slave_set;
ca91cx42_bridge->master_get = ca91cx42_master_get;
ca91cx42_bridge->master_set = ca91cx42_master_set;
ca91cx42_bridge->master_read = ca91cx42_master_read;
ca91cx42_bridge->master_write = ca91cx42_master_write;
ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
ca91cx42_bridge->irq_set = ca91cx42_irq_set;
ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
ca91cx42_bridge->lm_set = ca91cx42_lm_set;
ca91cx42_bridge->lm_get = ca91cx42_lm_get;
ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
ca91cx42_bridge->slot_get = ca91cx42_slot_get;
ca91cx42_bridge->alloc_consistent = ca91cx42_alloc_consistent;
ca91cx42_bridge->free_consistent = ca91cx42_free_consistent;
data = ioread32(ca91cx42_device->base + MISC_CTL);
dev_info(&pdev->dev, "Board is%s the VME system controller\n",
(data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
dev_info(&pdev->dev, "Slot ID is %d\n",
ca91cx42_slot_get(ca91cx42_bridge));
if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
/* Need to save ca91cx42_bridge pointer locally in link list for use in
* ca91cx42_remove()
*/
retval = vme_register_bridge(ca91cx42_bridge);
if (retval != 0) {
dev_err(&pdev->dev, "Chip Registration failed.\n");
goto err_reg;
}
pci_set_drvdata(pdev, ca91cx42_bridge);
return 0;
err_reg:
ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
err_lm:
/* resources are stored in link list */
list_for_each(pos, &ca91cx42_bridge->lm_resources) {
lm = list_entry(pos, struct vme_lm_resource, list);
list_del(pos);
kfree(lm);
}
err_dma:
/* resources are stored in link list */
list_for_each(pos, &ca91cx42_bridge->dma_resources) {
dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
list_del(pos);
kfree(dma_ctrlr);
}
err_slave:
/* resources are stored in link list */
list_for_each(pos, &ca91cx42_bridge->slave_resources) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
err_master:
/* resources are stored in link list */
list_for_each(pos, &ca91cx42_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
kfree(master_image);
}
ca91cx42_irq_exit(ca91cx42_device, pdev);
err_irq:
err_test:
iounmap(ca91cx42_device->base);
err_remap:
pci_release_regions(pdev);
err_resource:
pci_disable_device(pdev);
err_enable:
kfree(ca91cx42_device);
err_driver:
kfree(ca91cx42_bridge);
err_struct:
return retval;
}
static void ca91cx42_remove(struct pci_dev *pdev)
{
struct list_head *pos = NULL;
struct vme_master_resource *master_image;
struct vme_slave_resource *slave_image;
struct vme_dma_resource *dma_ctrlr;
struct vme_lm_resource *lm;
struct ca91cx42_driver *bridge;
struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
bridge = ca91cx42_bridge->driver_priv;
/* Turn off Ints */
iowrite32(0, bridge->base + LINT_EN);
/* Turn off the windows */
iowrite32(0x00800000, bridge->base + LSI0_CTL);
iowrite32(0x00800000, bridge->base + LSI1_CTL);
iowrite32(0x00800000, bridge->base + LSI2_CTL);
iowrite32(0x00800000, bridge->base + LSI3_CTL);
iowrite32(0x00800000, bridge->base + LSI4_CTL);
iowrite32(0x00800000, bridge->base + LSI5_CTL);
iowrite32(0x00800000, bridge->base + LSI6_CTL);
iowrite32(0x00800000, bridge->base + LSI7_CTL);
iowrite32(0x00F00000, bridge->base + VSI0_CTL);
iowrite32(0x00F00000, bridge->base + VSI1_CTL);
iowrite32(0x00F00000, bridge->base + VSI2_CTL);
iowrite32(0x00F00000, bridge->base + VSI3_CTL);
iowrite32(0x00F00000, bridge->base + VSI4_CTL);
iowrite32(0x00F00000, bridge->base + VSI5_CTL);
iowrite32(0x00F00000, bridge->base + VSI6_CTL);
iowrite32(0x00F00000, bridge->base + VSI7_CTL);
vme_unregister_bridge(ca91cx42_bridge);
ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
/* resources are stored in link list */
list_for_each(pos, &ca91cx42_bridge->lm_resources) {
lm = list_entry(pos, struct vme_lm_resource, list);
list_del(pos);
kfree(lm);
}
/* resources are stored in link list */
list_for_each(pos, &ca91cx42_bridge->dma_resources) {
dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
list_del(pos);
kfree(dma_ctrlr);
}
/* resources are stored in link list */
list_for_each(pos, &ca91cx42_bridge->slave_resources) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
/* resources are stored in link list */
list_for_each(pos, &ca91cx42_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
kfree(master_image);
}
ca91cx42_irq_exit(bridge, pdev);
iounmap(bridge->base);
pci_release_regions(pdev);
pci_disable_device(pdev);
kfree(ca91cx42_bridge);
}
static void __exit ca91cx42_exit(void)
{
pci_unregister_driver(&ca91cx42_driver);
}
MODULE_PARM_DESC(geoid, "Override geographical addressing");
module_param(geoid, int, 0);
MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
MODULE_LICENSE("GPL");
module_init(ca91cx42_init);
module_exit(ca91cx42_exit);
| gpl-2.0 |
sbreen94/Zeus_S4_r3 | arch/arm/mach-cns3xxx/pm.c | 5153 | 2878 | /*
* Copyright 2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/atomic.h>
#include <mach/cns3xxx.h>
#include <mach/pm.h>
#include "core.h"
void cns3xxx_pwr_clk_en(unsigned int block)
{
u32 reg = __raw_readl(PM_CLK_GATE_REG);
reg |= (block & PM_CLK_GATE_REG_MASK);
__raw_writel(reg, PM_CLK_GATE_REG);
}
EXPORT_SYMBOL(cns3xxx_pwr_clk_en);
void cns3xxx_pwr_clk_dis(unsigned int block)
{
u32 reg = __raw_readl(PM_CLK_GATE_REG);
reg &= ~(block & PM_CLK_GATE_REG_MASK);
__raw_writel(reg, PM_CLK_GATE_REG);
}
EXPORT_SYMBOL(cns3xxx_pwr_clk_dis);
void cns3xxx_pwr_power_up(unsigned int block)
{
u32 reg = __raw_readl(PM_PLL_HM_PD_CTRL_REG);
reg &= ~(block & CNS3XXX_PWR_PLL_ALL);
__raw_writel(reg, PM_PLL_HM_PD_CTRL_REG);
/* Wait for 300us for the PLL output clock locked. */
udelay(300);
};
EXPORT_SYMBOL(cns3xxx_pwr_power_up);
void cns3xxx_pwr_power_down(unsigned int block)
{
u32 reg = __raw_readl(PM_PLL_HM_PD_CTRL_REG);
/* write '1' to power down */
reg |= (block & CNS3XXX_PWR_PLL_ALL);
__raw_writel(reg, PM_PLL_HM_PD_CTRL_REG);
};
EXPORT_SYMBOL(cns3xxx_pwr_power_down);
static void cns3xxx_pwr_soft_rst_force(unsigned int block)
{
u32 reg = __raw_readl(PM_SOFT_RST_REG);
/*
* bit 0, 28, 29 => program low to reset,
* the other else program low and then high
*/
if (block & 0x30000001) {
reg &= ~(block & PM_SOFT_RST_REG_MASK);
} else {
reg &= ~(block & PM_SOFT_RST_REG_MASK);
__raw_writel(reg, PM_SOFT_RST_REG);
reg |= (block & PM_SOFT_RST_REG_MASK);
}
__raw_writel(reg, PM_SOFT_RST_REG);
}
EXPORT_SYMBOL(cns3xxx_pwr_soft_rst_force);
void cns3xxx_pwr_soft_rst(unsigned int block)
{
static unsigned int soft_reset;
if (soft_reset & block) {
/* SPI/I2C/GPIO use the same block, reset once. */
return;
} else {
soft_reset |= block;
}
cns3xxx_pwr_soft_rst_force(block);
}
EXPORT_SYMBOL(cns3xxx_pwr_soft_rst);
void cns3xxx_restart(char mode, const char *cmd)
{
/*
* To reset, we hit the on-board reset register
* in the system FPGA.
*/
cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(GLOBAL));
}
/*
* cns3xxx_cpu_clock - return CPU/L2 clock
* aclk: cpu clock/2
* hclk: cpu clock/4
* pclk: cpu clock/8
*/
int cns3xxx_cpu_clock(void)
{
u32 reg = __raw_readl(PM_CLK_CTRL_REG);
int cpu;
int cpu_sel;
int div_sel;
cpu_sel = (reg >> PM_CLK_CTRL_REG_OFFSET_PLL_CPU_SEL) & 0xf;
div_sel = (reg >> PM_CLK_CTRL_REG_OFFSET_CPU_CLK_DIV) & 0x3;
cpu = (300 + ((cpu_sel / 3) * 100) + ((cpu_sel % 3) * 33)) >> div_sel;
return cpu;
}
EXPORT_SYMBOL(cns3xxx_cpu_clock);
atomic_t usb_pwr_ref = ATOMIC_INIT(0);
EXPORT_SYMBOL(usb_pwr_ref);
| gpl-2.0 |
Buckmarble/Elite_M8 | drivers/gpu/drm/i915/dvo_ch7xxx.c | 5665 | 7707 | /**************************************************************************
Copyright © 2006 Dave Airlie
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sub license, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
#include "dvo.h"
#define CH7xxx_REG_VID 0x4a
#define CH7xxx_REG_DID 0x4b
#define CH7011_VID 0x83 /* 7010 as well */
#define CH7009A_VID 0x84
#define CH7009B_VID 0x85
#define CH7301_VID 0x95
#define CH7xxx_VID 0x84
#define CH7xxx_DID 0x17
#define CH7xxx_NUM_REGS 0x4c
#define CH7xxx_CM 0x1c
#define CH7xxx_CM_XCM (1<<0)
#define CH7xxx_CM_MCP (1<<2)
#define CH7xxx_INPUT_CLOCK 0x1d
#define CH7xxx_GPIO 0x1e
#define CH7xxx_GPIO_HPIR (1<<3)
#define CH7xxx_IDF 0x1f
#define CH7xxx_IDF_HSP (1<<3)
#define CH7xxx_IDF_VSP (1<<4)
#define CH7xxx_CONNECTION_DETECT 0x20
#define CH7xxx_CDET_DVI (1<<5)
#define CH7301_DAC_CNTL 0x21
#define CH7301_HOTPLUG 0x23
#define CH7xxx_TCTL 0x31
#define CH7xxx_TVCO 0x32
#define CH7xxx_TPCP 0x33
#define CH7xxx_TPD 0x34
#define CH7xxx_TPVT 0x35
#define CH7xxx_TLPF 0x36
#define CH7xxx_TCT 0x37
#define CH7301_TEST_PATTERN 0x48
#define CH7xxx_PM 0x49
#define CH7xxx_PM_FPD (1<<0)
#define CH7301_PM_DACPD0 (1<<1)
#define CH7301_PM_DACPD1 (1<<2)
#define CH7301_PM_DACPD2 (1<<3)
#define CH7xxx_PM_DVIL (1<<6)
#define CH7xxx_PM_DVIP (1<<7)
#define CH7301_SYNC_POLARITY 0x56
#define CH7301_SYNC_RGB_YUV (1<<0)
#define CH7301_SYNC_POL_DVI (1<<5)
/** @file
* driver for the Chrontel 7xxx DVI chip over DVO.
*/
static struct ch7xxx_id_struct {
uint8_t vid;
char *name;
} ch7xxx_ids[] = {
{ CH7011_VID, "CH7011" },
{ CH7009A_VID, "CH7009A" },
{ CH7009B_VID, "CH7009B" },
{ CH7301_VID, "CH7301" },
};
struct ch7xxx_priv {
bool quiet;
};
static char *ch7xxx_get_id(uint8_t vid)
{
int i;
for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) {
if (ch7xxx_ids[i].vid == vid)
return ch7xxx_ids[i].name;
}
return NULL;
}
/** Reads an 8 bit register */
static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
.addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
}
};
out_buf[0] = addr;
out_buf[1] = 0;
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
addr, adapter->name, dvo->slave_addr);
}
return false;
}
/** Writes an 8 bit register */
static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
};
out_buf[0] = addr;
out_buf[1] = ch;
if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, adapter->name, dvo->slave_addr);
}
return false;
}
static bool ch7xxx_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
/* this will detect the CH7xxx chip on the specified i2c bus */
struct ch7xxx_priv *ch7xxx;
uint8_t vendor, device;
char *name;
ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
if (ch7xxx == NULL)
return false;
dvo->i2c_bus = adapter;
dvo->dev_priv = ch7xxx;
ch7xxx->quiet = true;
if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor))
goto out;
name = ch7xxx_get_id(vendor);
if (!name) {
DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
"slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
goto out;
}
if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
goto out;
if (device != CH7xxx_DID) {
DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
"slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
goto out;
}
ch7xxx->quiet = false;
DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
name, vendor, device);
return true;
out:
kfree(ch7xxx);
return false;
}
static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
{
uint8_t cdet, orig_pm, pm;
ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
pm = orig_pm;
pm &= ~CH7xxx_PM_FPD;
pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP;
ch7xxx_writeb(dvo, CH7xxx_PM, pm);
ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet);
ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm);
if (cdet & CH7xxx_CDET_DVI)
return connector_status_connected;
return connector_status_disconnected;
}
static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
uint8_t tvco, tpcp, tpd, tlpf, idf;
if (mode->clock <= 65000) {
tvco = 0x23;
tpcp = 0x08;
tpd = 0x16;
tlpf = 0x60;
} else {
tvco = 0x2d;
tpcp = 0x06;
tpd = 0x26;
tlpf = 0xa0;
}
ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00);
ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco);
ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp);
ch7xxx_writeb(dvo, CH7xxx_TPD, tpd);
ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30);
ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf);
ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00);
ch7xxx_readb(dvo, CH7xxx_IDF, &idf);
idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP);
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
idf |= CH7xxx_IDF_HSP;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
idf |= CH7xxx_IDF_HSP;
ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
}
/* set the CH7xxx power state */
static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
{
if (mode == DRM_MODE_DPMS_ON)
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
else
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
}
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
{
int i;
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
uint8_t val;
if ((i % 8) == 0)
DRM_LOG_KMS("\n %02X: ", i);
ch7xxx_readb(dvo, i, &val);
DRM_LOG_KMS("%02X ", val);
}
}
static void ch7xxx_destroy(struct intel_dvo_device *dvo)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
if (ch7xxx) {
kfree(ch7xxx);
dvo->dev_priv = NULL;
}
}
struct intel_dvo_dev_ops ch7xxx_ops = {
.init = ch7xxx_init,
.detect = ch7xxx_detect,
.mode_valid = ch7xxx_mode_valid,
.mode_set = ch7xxx_mode_set,
.dpms = ch7xxx_dpms,
.dump_regs = ch7xxx_dump_regs,
.destroy = ch7xxx_destroy,
};
| gpl-2.0 |
psachin/apc-rock-II-kernel | arch/sparc/kernel/viohs.c | 7457 | 18129 | /* viohs.c: LDOM Virtual I/O handshake helper layer.
*
* Copyright (C) 2007 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <asm/ldc.h>
#include <asm/vio.h>
int vio_ldc_send(struct vio_driver_state *vio, void *data, int len)
{
int err, limit = 1000;
err = -EINVAL;
while (limit-- > 0) {
err = ldc_write(vio->lp, data, len);
if (!err || (err != -EAGAIN))
break;
udelay(1);
}
return err;
}
EXPORT_SYMBOL(vio_ldc_send);
static int send_ctrl(struct vio_driver_state *vio,
struct vio_msg_tag *tag, int len)
{
tag->sid = vio_send_sid(vio);
return vio_ldc_send(vio, tag, len);
}
static void init_tag(struct vio_msg_tag *tag, u8 type, u8 stype, u16 stype_env)
{
tag->type = type;
tag->stype = stype;
tag->stype_env = stype_env;
}
static int send_version(struct vio_driver_state *vio, u16 major, u16 minor)
{
struct vio_ver_info pkt;
vio->_local_sid = (u32) sched_clock();
memset(&pkt, 0, sizeof(pkt));
init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO);
pkt.major = major;
pkt.minor = minor;
pkt.dev_class = vio->dev_class;
viodbg(HS, "SEND VERSION INFO maj[%u] min[%u] devclass[%u]\n",
major, minor, vio->dev_class);
return send_ctrl(vio, &pkt.tag, sizeof(pkt));
}
static int start_handshake(struct vio_driver_state *vio)
{
int err;
viodbg(HS, "START HANDSHAKE\n");
vio->hs_state = VIO_HS_INVALID;
err = send_version(vio,
vio->ver_table[0].major,
vio->ver_table[0].minor);
if (err < 0)
return err;
return 0;
}
static void flush_rx_dring(struct vio_driver_state *vio)
{
struct vio_dring_state *dr;
u64 ident;
BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG));
dr = &vio->drings[VIO_DRIVER_RX_RING];
ident = dr->ident;
BUG_ON(!vio->desc_buf);
kfree(vio->desc_buf);
vio->desc_buf = NULL;
memset(dr, 0, sizeof(*dr));
dr->ident = ident;
}
void vio_link_state_change(struct vio_driver_state *vio, int event)
{
if (event == LDC_EVENT_UP) {
vio->hs_state = VIO_HS_INVALID;
switch (vio->dev_class) {
case VDEV_NETWORK:
case VDEV_NETWORK_SWITCH:
vio->dr_state = (VIO_DR_STATE_TXREQ |
VIO_DR_STATE_RXREQ);
break;
case VDEV_DISK:
vio->dr_state = VIO_DR_STATE_TXREQ;
break;
case VDEV_DISK_SERVER:
vio->dr_state = VIO_DR_STATE_RXREQ;
break;
}
start_handshake(vio);
} else if (event == LDC_EVENT_RESET) {
vio->hs_state = VIO_HS_INVALID;
if (vio->dr_state & VIO_DR_STATE_RXREG)
flush_rx_dring(vio);
vio->dr_state = 0x00;
memset(&vio->ver, 0, sizeof(vio->ver));
ldc_disconnect(vio->lp);
}
}
EXPORT_SYMBOL(vio_link_state_change);
static int handshake_failure(struct vio_driver_state *vio)
{
struct vio_dring_state *dr;
/* XXX Put policy here... Perhaps start a timer to fire
* XXX in 100 ms, which will bring the link up and retry
* XXX the handshake.
*/
viodbg(HS, "HANDSHAKE FAILURE\n");
vio->dr_state &= ~(VIO_DR_STATE_TXREG |
VIO_DR_STATE_RXREG);
dr = &vio->drings[VIO_DRIVER_RX_RING];
memset(dr, 0, sizeof(*dr));
kfree(vio->desc_buf);
vio->desc_buf = NULL;
vio->desc_buf_len = 0;
vio->hs_state = VIO_HS_INVALID;
return -ECONNRESET;
}
static int process_unknown(struct vio_driver_state *vio, void *arg)
{
struct vio_msg_tag *pkt = arg;
viodbg(HS, "UNKNOWN CONTROL [%02x:%02x:%04x:%08x]\n",
pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n",
vio->vdev->channel_id);
ldc_disconnect(vio->lp);
return -ECONNRESET;
}
static int send_dreg(struct vio_driver_state *vio)
{
struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_TX_RING];
union {
struct vio_dring_register pkt;
char all[sizeof(struct vio_dring_register) +
(sizeof(struct ldc_trans_cookie) *
dr->ncookies)];
} u;
int i;
memset(&u, 0, sizeof(u));
init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
u.pkt.dring_ident = 0;
u.pkt.num_descr = dr->num_entries;
u.pkt.descr_size = dr->entry_size;
u.pkt.options = VIO_TX_DRING;
u.pkt.num_cookies = dr->ncookies;
viodbg(HS, "SEND DRING_REG INFO ndesc[%u] dsz[%u] opt[0x%x] "
"ncookies[%u]\n",
u.pkt.num_descr, u.pkt.descr_size, u.pkt.options,
u.pkt.num_cookies);
for (i = 0; i < dr->ncookies; i++) {
u.pkt.cookies[i] = dr->cookies[i];
viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
i,
(unsigned long long) u.pkt.cookies[i].cookie_addr,
(unsigned long long) u.pkt.cookies[i].cookie_size);
}
return send_ctrl(vio, &u.pkt.tag, sizeof(u));
}
static int send_rdx(struct vio_driver_state *vio)
{
struct vio_rdx pkt;
memset(&pkt, 0, sizeof(pkt));
init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX);
viodbg(HS, "SEND RDX INFO\n");
return send_ctrl(vio, &pkt.tag, sizeof(pkt));
}
static int send_attr(struct vio_driver_state *vio)
{
return vio->ops->send_attr(vio);
}
static struct vio_version *find_by_major(struct vio_driver_state *vio,
u16 major)
{
struct vio_version *ret = NULL;
int i;
for (i = 0; i < vio->ver_table_entries; i++) {
struct vio_version *v = &vio->ver_table[i];
if (v->major <= major) {
ret = v;
break;
}
}
return ret;
}
static int process_ver_info(struct vio_driver_state *vio,
struct vio_ver_info *pkt)
{
struct vio_version *vap;
int err;
viodbg(HS, "GOT VERSION INFO maj[%u] min[%u] devclass[%u]\n",
pkt->major, pkt->minor, pkt->dev_class);
if (vio->hs_state != VIO_HS_INVALID) {
/* XXX Perhaps invoke start_handshake? XXX */
memset(&vio->ver, 0, sizeof(vio->ver));
vio->hs_state = VIO_HS_INVALID;
}
vap = find_by_major(vio, pkt->major);
vio->_peer_sid = pkt->tag.sid;
if (!vap) {
pkt->tag.stype = VIO_SUBTYPE_NACK;
pkt->major = 0;
pkt->minor = 0;
viodbg(HS, "SEND VERSION NACK maj[0] min[0]\n");
err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
} else if (vap->major != pkt->major) {
pkt->tag.stype = VIO_SUBTYPE_NACK;
pkt->major = vap->major;
pkt->minor = vap->minor;
viodbg(HS, "SEND VERSION NACK maj[%u] min[%u]\n",
pkt->major, pkt->minor);
err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
} else {
struct vio_version ver = {
.major = pkt->major,
.minor = pkt->minor,
};
if (ver.minor > vap->minor)
ver.minor = vap->minor;
pkt->minor = ver.minor;
pkt->tag.stype = VIO_SUBTYPE_ACK;
viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
pkt->major, pkt->minor);
err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
if (err > 0) {
vio->ver = ver;
vio->hs_state = VIO_HS_GOTVERS;
}
}
if (err < 0)
return handshake_failure(vio);
return 0;
}
static int process_ver_ack(struct vio_driver_state *vio,
struct vio_ver_info *pkt)
{
viodbg(HS, "GOT VERSION ACK maj[%u] min[%u] devclass[%u]\n",
pkt->major, pkt->minor, pkt->dev_class);
if (vio->hs_state & VIO_HS_GOTVERS) {
if (vio->ver.major != pkt->major ||
vio->ver.minor != pkt->minor) {
pkt->tag.stype = VIO_SUBTYPE_NACK;
(void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
return handshake_failure(vio);
}
} else {
vio->ver.major = pkt->major;
vio->ver.minor = pkt->minor;
vio->hs_state = VIO_HS_GOTVERS;
}
switch (vio->dev_class) {
case VDEV_NETWORK:
case VDEV_DISK:
if (send_attr(vio) < 0)
return handshake_failure(vio);
break;
default:
break;
}
return 0;
}
static int process_ver_nack(struct vio_driver_state *vio,
struct vio_ver_info *pkt)
{
struct vio_version *nver;
viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n",
pkt->major, pkt->minor, pkt->dev_class);
if (pkt->major == 0 && pkt->minor == 0)
return handshake_failure(vio);
nver = find_by_major(vio, pkt->major);
if (!nver)
return handshake_failure(vio);
if (send_version(vio, nver->major, nver->minor) < 0)
return handshake_failure(vio);
return 0;
}
static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
{
switch (pkt->tag.stype) {
case VIO_SUBTYPE_INFO:
return process_ver_info(vio, pkt);
case VIO_SUBTYPE_ACK:
return process_ver_ack(vio, pkt);
case VIO_SUBTYPE_NACK:
return process_ver_nack(vio, pkt);
default:
return handshake_failure(vio);
}
}
static int process_attr(struct vio_driver_state *vio, void *pkt)
{
int err;
if (!(vio->hs_state & VIO_HS_GOTVERS))
return handshake_failure(vio);
err = vio->ops->handle_attr(vio, pkt);
if (err < 0) {
return handshake_failure(vio);
} else {
vio->hs_state |= VIO_HS_GOT_ATTR;
if ((vio->dr_state & VIO_DR_STATE_TXREQ) &&
!(vio->hs_state & VIO_HS_SENT_DREG)) {
if (send_dreg(vio) < 0)
return handshake_failure(vio);
vio->hs_state |= VIO_HS_SENT_DREG;
}
}
return 0;
}
static int all_drings_registered(struct vio_driver_state *vio)
{
int need_rx, need_tx;
need_rx = (vio->dr_state & VIO_DR_STATE_RXREQ);
need_tx = (vio->dr_state & VIO_DR_STATE_TXREQ);
if (need_rx &&
!(vio->dr_state & VIO_DR_STATE_RXREG))
return 0;
if (need_tx &&
!(vio->dr_state & VIO_DR_STATE_TXREG))
return 0;
return 1;
}
static int process_dreg_info(struct vio_driver_state *vio,
struct vio_dring_register *pkt)
{
struct vio_dring_state *dr;
int i, len;
viodbg(HS, "GOT DRING_REG INFO ident[%llx] "
"ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
(unsigned long long) pkt->dring_ident,
pkt->num_descr, pkt->descr_size, pkt->options,
pkt->num_cookies);
if (!(vio->dr_state & VIO_DR_STATE_RXREQ))
goto send_nack;
if (vio->dr_state & VIO_DR_STATE_RXREG)
goto send_nack;
BUG_ON(vio->desc_buf);
vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC);
if (!vio->desc_buf)
goto send_nack;
vio->desc_buf_len = pkt->descr_size;
dr = &vio->drings[VIO_DRIVER_RX_RING];
dr->num_entries = pkt->num_descr;
dr->entry_size = pkt->descr_size;
dr->ncookies = pkt->num_cookies;
for (i = 0; i < dr->ncookies; i++) {
dr->cookies[i] = pkt->cookies[i];
viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
i,
(unsigned long long)
pkt->cookies[i].cookie_addr,
(unsigned long long)
pkt->cookies[i].cookie_size);
}
pkt->tag.stype = VIO_SUBTYPE_ACK;
pkt->dring_ident = ++dr->ident;
viodbg(HS, "SEND DRING_REG ACK ident[%llx]\n",
(unsigned long long) pkt->dring_ident);
len = (sizeof(*pkt) +
(dr->ncookies * sizeof(struct ldc_trans_cookie)));
if (send_ctrl(vio, &pkt->tag, len) < 0)
goto send_nack;
vio->dr_state |= VIO_DR_STATE_RXREG;
return 0;
send_nack:
pkt->tag.stype = VIO_SUBTYPE_NACK;
viodbg(HS, "SEND DRING_REG NACK\n");
(void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
return handshake_failure(vio);
}
static int process_dreg_ack(struct vio_driver_state *vio,
struct vio_dring_register *pkt)
{
struct vio_dring_state *dr;
viodbg(HS, "GOT DRING_REG ACK ident[%llx] "
"ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
(unsigned long long) pkt->dring_ident,
pkt->num_descr, pkt->descr_size, pkt->options,
pkt->num_cookies);
dr = &vio->drings[VIO_DRIVER_TX_RING];
if (!(vio->dr_state & VIO_DR_STATE_TXREQ))
return handshake_failure(vio);
dr->ident = pkt->dring_ident;
vio->dr_state |= VIO_DR_STATE_TXREG;
if (all_drings_registered(vio)) {
if (send_rdx(vio) < 0)
return handshake_failure(vio);
vio->hs_state = VIO_HS_SENT_RDX;
}
return 0;
}
static int process_dreg_nack(struct vio_driver_state *vio,
struct vio_dring_register *pkt)
{
viodbg(HS, "GOT DRING_REG NACK ident[%llx] "
"ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
(unsigned long long) pkt->dring_ident,
pkt->num_descr, pkt->descr_size, pkt->options,
pkt->num_cookies);
return handshake_failure(vio);
}
static int process_dreg(struct vio_driver_state *vio,
struct vio_dring_register *pkt)
{
if (!(vio->hs_state & VIO_HS_GOTVERS))
return handshake_failure(vio);
switch (pkt->tag.stype) {
case VIO_SUBTYPE_INFO:
return process_dreg_info(vio, pkt);
case VIO_SUBTYPE_ACK:
return process_dreg_ack(vio, pkt);
case VIO_SUBTYPE_NACK:
return process_dreg_nack(vio, pkt);
default:
return handshake_failure(vio);
}
}
static int process_dunreg(struct vio_driver_state *vio,
struct vio_dring_unregister *pkt)
{
struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_RX_RING];
viodbg(HS, "GOT DRING_UNREG\n");
if (pkt->dring_ident != dr->ident)
return 0;
vio->dr_state &= ~VIO_DR_STATE_RXREG;
memset(dr, 0, sizeof(*dr));
kfree(vio->desc_buf);
vio->desc_buf = NULL;
vio->desc_buf_len = 0;
return 0;
}
static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt)
{
viodbg(HS, "GOT RDX INFO\n");
pkt->tag.stype = VIO_SUBTYPE_ACK;
viodbg(HS, "SEND RDX ACK\n");
if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0)
return handshake_failure(vio);
vio->hs_state |= VIO_HS_SENT_RDX_ACK;
return 0;
}
static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt)
{
viodbg(HS, "GOT RDX ACK\n");
if (!(vio->hs_state & VIO_HS_SENT_RDX))
return handshake_failure(vio);
vio->hs_state |= VIO_HS_GOT_RDX_ACK;
return 0;
}
static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt)
{
viodbg(HS, "GOT RDX NACK\n");
return handshake_failure(vio);
}
static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt)
{
if (!all_drings_registered(vio))
handshake_failure(vio);
switch (pkt->tag.stype) {
case VIO_SUBTYPE_INFO:
return process_rdx_info(vio, pkt);
case VIO_SUBTYPE_ACK:
return process_rdx_ack(vio, pkt);
case VIO_SUBTYPE_NACK:
return process_rdx_nack(vio, pkt);
default:
return handshake_failure(vio);
}
}
int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
{
struct vio_msg_tag *tag = pkt;
u8 prev_state = vio->hs_state;
int err;
switch (tag->stype_env) {
case VIO_VER_INFO:
err = process_ver(vio, pkt);
break;
case VIO_ATTR_INFO:
err = process_attr(vio, pkt);
break;
case VIO_DRING_REG:
err = process_dreg(vio, pkt);
break;
case VIO_DRING_UNREG:
err = process_dunreg(vio, pkt);
break;
case VIO_RDX:
err = process_rdx(vio, pkt);
break;
default:
err = process_unknown(vio, pkt);
break;
}
if (!err &&
vio->hs_state != prev_state &&
(vio->hs_state & VIO_HS_COMPLETE))
vio->ops->handshake_complete(vio);
return err;
}
EXPORT_SYMBOL(vio_control_pkt_engine);
void vio_conn_reset(struct vio_driver_state *vio)
{
}
EXPORT_SYMBOL(vio_conn_reset);
/* The issue is that the Solaris virtual disk server just mirrors the
* SID values it gets from the client peer. So we work around that
* here in vio_{validate,send}_sid() so that the drivers don't need
* to be aware of this crap.
*/
int vio_validate_sid(struct vio_driver_state *vio, struct vio_msg_tag *tp)
{
u32 sid;
/* Always let VERSION+INFO packets through unchecked, they
* define the new SID.
*/
if (tp->type == VIO_TYPE_CTRL &&
tp->stype == VIO_SUBTYPE_INFO &&
tp->stype_env == VIO_VER_INFO)
return 0;
/* Ok, now figure out which SID to use. */
switch (vio->dev_class) {
case VDEV_NETWORK:
case VDEV_NETWORK_SWITCH:
case VDEV_DISK_SERVER:
default:
sid = vio->_peer_sid;
break;
case VDEV_DISK:
sid = vio->_local_sid;
break;
}
if (sid == tp->sid)
return 0;
viodbg(DATA, "BAD SID tag->sid[%08x] peer_sid[%08x] local_sid[%08x]\n",
tp->sid, vio->_peer_sid, vio->_local_sid);
return -EINVAL;
}
EXPORT_SYMBOL(vio_validate_sid);
u32 vio_send_sid(struct vio_driver_state *vio)
{
switch (vio->dev_class) {
case VDEV_NETWORK:
case VDEV_NETWORK_SWITCH:
case VDEV_DISK:
default:
return vio->_local_sid;
case VDEV_DISK_SERVER:
return vio->_peer_sid;
}
}
EXPORT_SYMBOL(vio_send_sid);
int vio_ldc_alloc(struct vio_driver_state *vio,
struct ldc_channel_config *base_cfg,
void *event_arg)
{
struct ldc_channel_config cfg = *base_cfg;
struct ldc_channel *lp;
cfg.tx_irq = vio->vdev->tx_irq;
cfg.rx_irq = vio->vdev->rx_irq;
lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg);
if (IS_ERR(lp))
return PTR_ERR(lp);
vio->lp = lp;
return 0;
}
EXPORT_SYMBOL(vio_ldc_alloc);
void vio_ldc_free(struct vio_driver_state *vio)
{
ldc_free(vio->lp);
vio->lp = NULL;
kfree(vio->desc_buf);
vio->desc_buf = NULL;
vio->desc_buf_len = 0;
}
EXPORT_SYMBOL(vio_ldc_free);
void vio_port_up(struct vio_driver_state *vio)
{
unsigned long flags;
int err, state;
spin_lock_irqsave(&vio->lock, flags);
state = ldc_state(vio->lp);
err = 0;
if (state == LDC_STATE_INIT) {
err = ldc_bind(vio->lp, vio->name);
if (err)
printk(KERN_WARNING "%s: Port %lu bind failed, "
"err=%d\n",
vio->name, vio->vdev->channel_id, err);
}
if (!err) {
err = ldc_connect(vio->lp);
if (err)
printk(KERN_WARNING "%s: Port %lu connect failed, "
"err=%d\n",
vio->name, vio->vdev->channel_id, err);
}
if (err) {
unsigned long expires = jiffies + HZ;
expires = round_jiffies(expires);
mod_timer(&vio->timer, expires);
}
spin_unlock_irqrestore(&vio->lock, flags);
}
EXPORT_SYMBOL(vio_port_up);
static void vio_port_timer(unsigned long _arg)
{
struct vio_driver_state *vio = (struct vio_driver_state *) _arg;
vio_port_up(vio);
}
int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
u8 dev_class, struct vio_version *ver_table,
int ver_table_size, struct vio_driver_ops *ops,
char *name)
{
switch (dev_class) {
case VDEV_NETWORK:
case VDEV_NETWORK_SWITCH:
case VDEV_DISK:
case VDEV_DISK_SERVER:
break;
default:
return -EINVAL;
}
if (!ops->send_attr ||
!ops->handle_attr ||
!ops->handshake_complete)
return -EINVAL;
if (!ver_table || ver_table_size < 0)
return -EINVAL;
if (!name)
return -EINVAL;
spin_lock_init(&vio->lock);
vio->name = name;
vio->dev_class = dev_class;
vio->vdev = vdev;
vio->ver_table = ver_table;
vio->ver_table_entries = ver_table_size;
vio->ops = ops;
setup_timer(&vio->timer, vio_port_timer, (unsigned long) vio);
return 0;
}
EXPORT_SYMBOL(vio_driver_init);
| gpl-2.0 |
joryb/android_kernel_samsung_jf | sound/soc/pxa/spitz.c | 7713 | 9626 | /*
* spitz.c -- SoC audio for Sharp SL-Cxx00 models Spitz, Borzoi and Akita
*
* Copyright 2005 Wolfson Microelectronics PLC.
* Copyright 2005 Openedhand Ltd.
*
* Authors: Liam Girdwood <lrg@slimlogic.co.uk>
* Richard Purdie <richard@openedhand.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-types.h>
#include <mach/spitz.h>
#include "../codecs/wm8750.h"
#include "pxa2xx-i2s.h"
#define SPITZ_HP 0
#define SPITZ_MIC 1
#define SPITZ_LINE 2
#define SPITZ_HEADSET 3
#define SPITZ_HP_OFF 4
#define SPITZ_SPK_ON 0
#define SPITZ_SPK_OFF 1
/* audio clock in Hz - rounded from 12.235MHz */
#define SPITZ_AUDIO_CLOCK 12288000
static int spitz_jack_func;
static int spitz_spk_func;
static int spitz_mic_gpio;
static void spitz_ext_control(struct snd_soc_dapm_context *dapm)
{
if (spitz_spk_func == SPITZ_SPK_ON)
snd_soc_dapm_enable_pin(dapm, "Ext Spk");
else
snd_soc_dapm_disable_pin(dapm, "Ext Spk");
/* set up jack connection */
switch (spitz_jack_func) {
case SPITZ_HP:
/* enable and unmute hp jack, disable mic bias */
snd_soc_dapm_disable_pin(dapm, "Headset Jack");
snd_soc_dapm_disable_pin(dapm, "Mic Jack");
snd_soc_dapm_disable_pin(dapm, "Line Jack");
snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
gpio_set_value(SPITZ_GPIO_MUTE_L, 1);
gpio_set_value(SPITZ_GPIO_MUTE_R, 1);
break;
case SPITZ_MIC:
/* enable mic jack and bias, mute hp */
snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
snd_soc_dapm_disable_pin(dapm, "Headset Jack");
snd_soc_dapm_disable_pin(dapm, "Line Jack");
snd_soc_dapm_enable_pin(dapm, "Mic Jack");
gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
break;
case SPITZ_LINE:
/* enable line jack, disable mic bias and mute hp */
snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
snd_soc_dapm_disable_pin(dapm, "Headset Jack");
snd_soc_dapm_disable_pin(dapm, "Mic Jack");
snd_soc_dapm_enable_pin(dapm, "Line Jack");
gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
break;
case SPITZ_HEADSET:
/* enable and unmute headset jack enable mic bias, mute L hp */
snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
snd_soc_dapm_enable_pin(dapm, "Mic Jack");
snd_soc_dapm_disable_pin(dapm, "Line Jack");
snd_soc_dapm_enable_pin(dapm, "Headset Jack");
gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
gpio_set_value(SPITZ_GPIO_MUTE_R, 1);
break;
case SPITZ_HP_OFF:
/* jack removed, everything off */
snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
snd_soc_dapm_disable_pin(dapm, "Headset Jack");
snd_soc_dapm_disable_pin(dapm, "Mic Jack");
snd_soc_dapm_disable_pin(dapm, "Line Jack");
gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
break;
}
snd_soc_dapm_sync(dapm);
}
static int spitz_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_codec *codec = rtd->codec;
mutex_lock(&codec->mutex);
/* check the jack status at stream startup */
spitz_ext_control(&codec->dapm);
mutex_unlock(&codec->mutex);
return 0;
}
static int spitz_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
unsigned int clk = 0;
int ret = 0;
switch (params_rate(params)) {
case 8000:
case 16000:
case 48000:
case 96000:
clk = 12288000;
break;
case 11025:
case 22050:
case 44100:
clk = 11289600;
break;
}
/* set the codec system clock for DAC and ADC */
ret = snd_soc_dai_set_sysclk(codec_dai, WM8750_SYSCLK, clk,
SND_SOC_CLOCK_IN);
if (ret < 0)
return ret;
/* set the I2S system clock as input (unused) */
ret = snd_soc_dai_set_sysclk(cpu_dai, PXA2XX_I2S_SYSCLK, 0,
SND_SOC_CLOCK_IN);
if (ret < 0)
return ret;
return 0;
}
static struct snd_soc_ops spitz_ops = {
.startup = spitz_startup,
.hw_params = spitz_hw_params,
};
static int spitz_get_jack(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = spitz_jack_func;
return 0;
}
static int spitz_set_jack(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
if (spitz_jack_func == ucontrol->value.integer.value[0])
return 0;
spitz_jack_func = ucontrol->value.integer.value[0];
spitz_ext_control(&card->dapm);
return 1;
}
static int spitz_get_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = spitz_spk_func;
return 0;
}
static int spitz_set_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
if (spitz_spk_func == ucontrol->value.integer.value[0])
return 0;
spitz_spk_func = ucontrol->value.integer.value[0];
spitz_ext_control(&card->dapm);
return 1;
}
static int spitz_mic_bias(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
gpio_set_value_cansleep(spitz_mic_gpio, SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
/* spitz machine dapm widgets */
static const struct snd_soc_dapm_widget wm8750_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_MIC("Mic Jack", spitz_mic_bias),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
SND_SOC_DAPM_LINE("Line Jack", NULL),
/* headset is a mic and mono headphone */
SND_SOC_DAPM_HP("Headset Jack", NULL),
};
/* Spitz machine audio_map */
static const struct snd_soc_dapm_route spitz_audio_map[] = {
/* headphone connected to LOUT1, ROUT1 */
{"Headphone Jack", NULL, "LOUT1"},
{"Headphone Jack", NULL, "ROUT1"},
/* headset connected to ROUT1 and LINPUT1 with bias (def below) */
{"Headset Jack", NULL, "ROUT1"},
/* ext speaker connected to LOUT2, ROUT2 */
{"Ext Spk", NULL , "ROUT2"},
{"Ext Spk", NULL , "LOUT2"},
/* mic is connected to input 1 - with bias */
{"LINPUT1", NULL, "Mic Bias"},
{"Mic Bias", NULL, "Mic Jack"},
/* line is connected to input 1 - no bias */
{"LINPUT1", NULL, "Line Jack"},
};
static const char *jack_function[] = {"Headphone", "Mic", "Line", "Headset",
"Off"};
static const char *spk_function[] = {"On", "Off"};
static const struct soc_enum spitz_enum[] = {
SOC_ENUM_SINGLE_EXT(5, jack_function),
SOC_ENUM_SINGLE_EXT(2, spk_function),
};
static const struct snd_kcontrol_new wm8750_spitz_controls[] = {
SOC_ENUM_EXT("Jack Function", spitz_enum[0], spitz_get_jack,
spitz_set_jack),
SOC_ENUM_EXT("Speaker Function", spitz_enum[1], spitz_get_spk,
spitz_set_spk),
};
/*
* Logic for a wm8750 as connected on a Sharp SL-Cxx00 Device
*/
static int spitz_wm8750_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
/* NC codec pins */
snd_soc_dapm_nc_pin(dapm, "RINPUT1");
snd_soc_dapm_nc_pin(dapm, "LINPUT2");
snd_soc_dapm_nc_pin(dapm, "RINPUT2");
snd_soc_dapm_nc_pin(dapm, "LINPUT3");
snd_soc_dapm_nc_pin(dapm, "RINPUT3");
snd_soc_dapm_nc_pin(dapm, "OUT3");
snd_soc_dapm_nc_pin(dapm, "MONO1");
return 0;
}
/* spitz digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link spitz_dai = {
.name = "wm8750",
.stream_name = "WM8750",
.cpu_dai_name = "pxa2xx-i2s",
.codec_dai_name = "wm8750-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm8750.0-001b",
.init = spitz_wm8750_init,
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
.ops = &spitz_ops,
};
/* spitz audio machine driver */
static struct snd_soc_card snd_soc_spitz = {
.name = "Spitz",
.owner = THIS_MODULE,
.dai_link = &spitz_dai,
.num_links = 1,
.controls = wm8750_spitz_controls,
.num_controls = ARRAY_SIZE(wm8750_spitz_controls),
.dapm_widgets = wm8750_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets),
.dapm_routes = spitz_audio_map,
.num_dapm_routes = ARRAY_SIZE(spitz_audio_map),
};
static struct platform_device *spitz_snd_device;
static int __init spitz_init(void)
{
int ret;
if (!(machine_is_spitz() || machine_is_borzoi() || machine_is_akita()))
return -ENODEV;
if (machine_is_borzoi() || machine_is_spitz())
spitz_mic_gpio = SPITZ_GPIO_MIC_BIAS;
else
spitz_mic_gpio = AKITA_GPIO_MIC_BIAS;
ret = gpio_request(spitz_mic_gpio, "MIC GPIO");
if (ret)
goto err1;
ret = gpio_direction_output(spitz_mic_gpio, 0);
if (ret)
goto err2;
spitz_snd_device = platform_device_alloc("soc-audio", -1);
if (!spitz_snd_device) {
ret = -ENOMEM;
goto err2;
}
platform_set_drvdata(spitz_snd_device, &snd_soc_spitz);
ret = platform_device_add(spitz_snd_device);
if (ret)
goto err3;
return 0;
err3:
platform_device_put(spitz_snd_device);
err2:
gpio_free(spitz_mic_gpio);
err1:
return ret;
}
static void __exit spitz_exit(void)
{
platform_device_unregister(spitz_snd_device);
gpio_free(spitz_mic_gpio);
}
module_init(spitz_init);
module_exit(spitz_exit);
MODULE_AUTHOR("Richard Purdie");
MODULE_DESCRIPTION("ALSA SoC Spitz");
MODULE_LICENSE("GPL");
| gpl-2.0 |
garwedgess/LuPuS_honami_stock | arch/parisc/lib/bitops.c | 8737 | 1819 | /*
* bitops.c: atomic operations which got too long to be inlined all over
* the place.
*
* Copyright 1999 Philipp Rumpf (prumpf@tux.org)
* Copyright 2000 Grant Grundler (grundler@cup.hp.com)
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#ifdef CONFIG_SMP
arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
[0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
};
#endif
#ifdef CONFIG_64BIT
unsigned long __xchg64(unsigned long x, unsigned long *ptr)
{
unsigned long temp, flags;
_atomic_spin_lock_irqsave(ptr, flags);
temp = *ptr;
*ptr = x;
_atomic_spin_unlock_irqrestore(ptr, flags);
return temp;
}
#endif
unsigned long __xchg32(int x, int *ptr)
{
unsigned long flags;
long temp;
_atomic_spin_lock_irqsave(ptr, flags);
temp = (long) *ptr; /* XXX - sign extension wanted? */
*ptr = x;
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)temp;
}
unsigned long __xchg8(char x, char *ptr)
{
unsigned long flags;
long temp;
_atomic_spin_lock_irqsave(ptr, flags);
temp = (long) *ptr; /* XXX - sign extension wanted? */
*ptr = x;
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)temp;
}
#ifdef CONFIG_64BIT
unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new)
{
unsigned long flags;
unsigned long prev;
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
_atomic_spin_unlock_irqrestore(ptr, flags);
return prev;
}
#endif
unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
{
unsigned long flags;
unsigned int prev;
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)prev;
}
| gpl-2.0 |
ystk/sched-deadline | block/partitions/sun.c | 13089 | 3827 | /*
* fs/partitions/sun.c
*
* Code extracted from drivers/block/genhd.c
*
* Copyright (C) 1991-1998 Linus Torvalds
* Re-organised Feb 1998 Russell King
*/
#include "check.h"
#include "sun.h"
int sun_partition(struct parsed_partitions *state)
{
int i;
__be16 csum;
int slot = 1;
__be16 *ush;
Sector sect;
struct sun_disklabel {
unsigned char info[128]; /* Informative text string */
struct sun_vtoc {
__be32 version; /* Layout version */
char volume[8]; /* Volume name */
__be16 nparts; /* Number of partitions */
struct sun_info { /* Partition hdrs, sec 2 */
__be16 id;
__be16 flags;
} infos[8];
__be16 padding; /* Alignment padding */
__be32 bootinfo[3]; /* Info needed by mboot */
__be32 sanity; /* To verify vtoc sanity */
__be32 reserved[10]; /* Free space */
__be32 timestamp[8]; /* Partition timestamp */
} vtoc;
__be32 write_reinstruct; /* sectors to skip, writes */
__be32 read_reinstruct; /* sectors to skip, reads */
unsigned char spare[148]; /* Padding */
__be16 rspeed; /* Disk rotational speed */
__be16 pcylcount; /* Physical cylinder count */
__be16 sparecyl; /* extra sects per cylinder */
__be16 obs1; /* gap1 */
__be16 obs2; /* gap2 */
__be16 ilfact; /* Interleave factor */
__be16 ncyl; /* Data cylinder count */
__be16 nacyl; /* Alt. cylinder count */
__be16 ntrks; /* Tracks per cylinder */
__be16 nsect; /* Sectors per track */
__be16 obs3; /* bhead - Label head offset */
__be16 obs4; /* ppart - Physical Partition */
struct sun_partition {
__be32 start_cylinder;
__be32 num_sectors;
} partitions[8];
__be16 magic; /* Magic number */
__be16 csum; /* Label xor'd checksum */
} * label;
struct sun_partition *p;
unsigned long spc;
char b[BDEVNAME_SIZE];
int use_vtoc;
int nparts;
label = read_part_sector(state, 0, §);
if (!label)
return -1;
p = label->partitions;
if (be16_to_cpu(label->magic) != SUN_LABEL_MAGIC) {
/* printk(KERN_INFO "Dev %s Sun disklabel: bad magic %04x\n",
bdevname(bdev, b), be16_to_cpu(label->magic)); */
put_dev_sector(sect);
return 0;
}
/* Look at the checksum */
ush = ((__be16 *) (label+1)) - 1;
for (csum = 0; ush >= ((__be16 *) label);)
csum ^= *ush--;
if (csum) {
printk("Dev %s Sun disklabel: Csum bad, label corrupted\n",
bdevname(state->bdev, b));
put_dev_sector(sect);
return 0;
}
/* Check to see if we can use the VTOC table */
use_vtoc = ((be32_to_cpu(label->vtoc.sanity) == SUN_VTOC_SANITY) &&
(be32_to_cpu(label->vtoc.version) == 1) &&
(be16_to_cpu(label->vtoc.nparts) <= 8));
/* Use 8 partition entries if not specified in validated VTOC */
nparts = (use_vtoc) ? be16_to_cpu(label->vtoc.nparts) : 8;
/*
* So that old Linux-Sun partitions continue to work,
* alow the VTOC to be used under the additional condition ...
*/
use_vtoc = use_vtoc || !(label->vtoc.sanity ||
label->vtoc.version || label->vtoc.nparts);
spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect);
for (i = 0; i < nparts; i++, p++) {
unsigned long st_sector;
unsigned int num_sectors;
st_sector = be32_to_cpu(p->start_cylinder) * spc;
num_sectors = be32_to_cpu(p->num_sectors);
if (num_sectors) {
put_partition(state, slot, st_sector, num_sectors);
state->parts[slot].flags = 0;
if (use_vtoc) {
if (be16_to_cpu(label->vtoc.infos[i].id) == LINUX_RAID_PARTITION)
state->parts[slot].flags |= ADDPART_FLAG_RAID;
else if (be16_to_cpu(label->vtoc.infos[i].id) == SUN_WHOLE_DISK)
state->parts[slot].flags |= ADDPART_FLAG_WHOLEDISK;
}
}
slot++;
}
strlcat(state->pp_buf, "\n", PAGE_SIZE);
put_dev_sector(sect);
return 1;
}
| gpl-2.0 |
JCROM-Android/jcrom_kernel_omap | arch/arm/mach-w90x900/clock.c | 13601 | 1846 | /*
* linux/arch/arm/mach-w90x900/clock.c
*
* Copyright (c) 2008 Nuvoton technology corporation
*
* Wan ZongShun <mcuos.com@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/clk.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include "clock.h"
#define SUBCLK 0x24
static DEFINE_SPINLOCK(clocks_lock);
int clk_enable(struct clk *clk)
{
unsigned long flags;
spin_lock_irqsave(&clocks_lock, flags);
if (clk->enabled++ == 0)
(clk->enable)(clk, 1);
spin_unlock_irqrestore(&clocks_lock, flags);
return 0;
}
EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
unsigned long flags;
WARN_ON(clk->enabled == 0);
spin_lock_irqsave(&clocks_lock, flags);
if (--clk->enabled == 0)
(clk->enable)(clk, 0);
spin_unlock_irqrestore(&clocks_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
unsigned long clk_get_rate(struct clk *clk)
{
return 15000000;
}
EXPORT_SYMBOL(clk_get_rate);
void nuc900_clk_enable(struct clk *clk, int enable)
{
unsigned int clocks = clk->cken;
unsigned long clken;
clken = __raw_readl(W90X900_VA_CLKPWR);
if (enable)
clken |= clocks;
else
clken &= ~clocks;
__raw_writel(clken, W90X900_VA_CLKPWR);
}
void nuc900_subclk_enable(struct clk *clk, int enable)
{
unsigned int clocks = clk->cken;
unsigned long clken;
clken = __raw_readl(W90X900_VA_CLKPWR + SUBCLK);
if (enable)
clken |= clocks;
else
clken &= ~clocks;
__raw_writel(clken, W90X900_VA_CLKPWR + SUBCLK);
}
| gpl-2.0 |
flar2/m8-GPE | drivers/media/video/ivtv/ivtv-yuv.c | 34 | 32597 | /*
yuv support
Copyright (C) 2007 Ian Armstrong <ian@iarmst.demon.co.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "ivtv-driver.h"
#include "ivtv-udma.h"
#include "ivtv-yuv.h"
const u32 yuv_offset[IVTV_YUV_BUFFERS] = {
0x001a8600,
0x00240400,
0x002d8200,
0x00370000,
0x00029000,
0x000C0E00,
0x006B0400,
0x00748200
};
static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
struct ivtv_dma_frame *args)
{
struct ivtv_dma_page_info y_dma;
struct ivtv_dma_page_info uv_dma;
struct yuv_playback_info *yi = &itv->yuv_info;
u8 frame = yi->draw_frame;
struct yuv_frame_info *f = &yi->new_frame_info[frame];
int i;
int y_pages, uv_pages;
unsigned long y_buffer_offset, uv_buffer_offset;
int y_decode_height, uv_decode_height, y_size;
y_buffer_offset = IVTV_DECODER_OFFSET + yuv_offset[frame];
uv_buffer_offset = y_buffer_offset + IVTV_YUV_BUFFER_UV_OFFSET;
y_decode_height = uv_decode_height = f->src_h + f->src_y;
if (f->offset_y)
y_buffer_offset += 720 * 16;
if (y_decode_height & 15)
y_decode_height = (y_decode_height + 16) & ~15;
if (uv_decode_height & 31)
uv_decode_height = (uv_decode_height + 32) & ~31;
y_size = 720 * y_decode_height;
if (dma->SG_length || dma->page_count) {
IVTV_DEBUG_WARN
("prep_user_dma: SG_length %d page_count %d still full?\n",
dma->SG_length, dma->page_count);
return -EBUSY;
}
ivtv_udma_get_page_info (&y_dma, (unsigned long)args->y_source, 720 * y_decode_height);
ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height);
down_read(¤t->mm->mmap_sem);
y_pages = get_user_pages(current, current->mm, y_dma.uaddr, y_dma.page_count, 0, 1, &dma->map[0], NULL);
uv_pages = 0;
if (y_pages == y_dma.page_count) {
uv_pages = get_user_pages(current, current->mm,
uv_dma.uaddr, uv_dma.page_count, 0, 1,
&dma->map[y_pages], NULL);
}
up_read(¤t->mm->mmap_sem);
if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
int rc = -EFAULT;
if (y_pages == y_dma.page_count) {
IVTV_DEBUG_WARN
("failed to map uv user pages, returned %d "
"expecting %d\n", uv_pages, uv_dma.page_count);
if (uv_pages >= 0) {
for (i = 0; i < uv_pages; i++)
put_page(dma->map[y_pages + i]);
rc = -EFAULT;
} else {
rc = uv_pages;
}
} else {
IVTV_DEBUG_WARN
("failed to map y user pages, returned %d "
"expecting %d\n", y_pages, y_dma.page_count);
}
if (y_pages >= 0) {
for (i = 0; i < y_pages; i++)
put_page(dma->map[i]);
} else {
rc = y_pages;
}
return rc;
}
dma->page_count = y_pages + uv_pages;
if (ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0)) < 0) {
IVTV_DEBUG_WARN("could not allocate bounce buffers for highmem userspace buffers\n");
for (i = 0; i < dma->page_count; i++) {
put_page(dma->map[i]);
}
dma->page_count = 0;
return -ENOMEM;
}
dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size);
if (f->offset_y && yi->blanking_dmaptr) {
dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16);
dma->SGarray[dma->SG_length].src = cpu_to_le32(yi->blanking_dmaptr);
dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DECODER_OFFSET + yuv_offset[frame]);
dma->SG_length++;
}
dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
ivtv_udma_sync_for_device(itv);
return 0;
}
int ivtv_yuv_filter_check(struct ivtv *itv)
{
int i, y, uv;
for (i = 0, y = 16, uv = 4; i < 16; i++, y += 24, uv += 12) {
if ((read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + y) != i << 16) ||
(read_dec(IVTV_YUV_VERTICAL_FILTER_OFFSET + uv) != i << 16)) {
IVTV_WARN ("YUV filter table not found in firmware.\n");
return -1;
}
}
return 0;
}
static void ivtv_yuv_filter(struct ivtv *itv, int h_filter, int v_filter_1, int v_filter_2)
{
u32 i, line;
if (h_filter > -1) {
if (h_filter > 4)
h_filter = 4;
i = IVTV_YUV_HORIZONTAL_FILTER_OFFSET + (h_filter * 384);
for (line = 0; line < 16; line++) {
write_reg(read_dec(i), 0x02804);
write_reg(read_dec(i), 0x0281c);
i += 4;
write_reg(read_dec(i), 0x02808);
write_reg(read_dec(i), 0x02820);
i += 4;
write_reg(read_dec(i), 0x0280c);
write_reg(read_dec(i), 0x02824);
i += 4;
write_reg(read_dec(i), 0x02810);
write_reg(read_dec(i), 0x02828);
i += 4;
write_reg(read_dec(i), 0x02814);
write_reg(read_dec(i), 0x0282c);
i += 8;
write_reg(0, 0x02818);
write_reg(0, 0x02830);
}
IVTV_DEBUG_YUV("h_filter -> %d\n", h_filter);
}
if (v_filter_1 > -1) {
if (v_filter_1 > 4)
v_filter_1 = 4;
i = IVTV_YUV_VERTICAL_FILTER_OFFSET + (v_filter_1 * 192);
for (line = 0; line < 16; line++) {
write_reg(read_dec(i), 0x02900);
i += 4;
write_reg(read_dec(i), 0x02904);
i += 8;
write_reg(0, 0x02908);
}
IVTV_DEBUG_YUV("v_filter_1 -> %d\n", v_filter_1);
}
if (v_filter_2 > -1) {
if (v_filter_2 > 4)
v_filter_2 = 4;
i = IVTV_YUV_VERTICAL_FILTER_OFFSET + (v_filter_2 * 192);
for (line = 0; line < 16; line++) {
write_reg(read_dec(i), 0x0290c);
i += 4;
write_reg(read_dec(i), 0x02910);
i += 8;
write_reg(0, 0x02914);
}
IVTV_DEBUG_YUV("v_filter_2 -> %d\n", v_filter_2);
}
}
static void ivtv_yuv_handle_horizontal(struct ivtv *itv, struct yuv_frame_info *f)
{
struct yuv_playback_info *yi = &itv->yuv_info;
u32 reg_2834, reg_2838, reg_283c;
u32 reg_2844, reg_2854, reg_285c;
u32 reg_2864, reg_2874, reg_2890;
u32 reg_2870, reg_2870_base, reg_2870_offset;
int x_cutoff;
int h_filter;
u32 master_width;
IVTV_DEBUG_WARN
("Adjust to width %d src_w %d dst_w %d src_x %d dst_x %d\n",
f->tru_w, f->src_w, f->dst_w, f->src_x, f->dst_x);
x_cutoff = f->src_w + f->src_x;
reg_2834 = f->dst_w;
reg_2838 = reg_2834;
reg_2890 = f->dst_x;
reg_2870 = 0;
if (f->vis_w == 720) {
if ((f->tru_x - f->pan_x > -1) && (f->tru_x - f->pan_x <= 40) && (f->dst_w >= 680))
reg_2870 = 10 - (f->tru_x - f->pan_x) / 4;
else if ((f->tru_x - f->pan_x < 0) && (f->tru_x - f->pan_x >= -20) && (f->dst_w >= 660))
reg_2870 = (10 + (f->tru_x - f->pan_x) / 2);
if (f->dst_w >= f->src_w)
reg_2870 = reg_2870 << 16 | reg_2870;
else
reg_2870 = ((reg_2870 & ~1) << 15) | (reg_2870 & ~1);
}
if (f->dst_w < f->src_w)
reg_2870 = 0x000d000e - reg_2870;
else
reg_2870 = 0x0012000e - reg_2870;
reg_2870_offset = (f->src_x * ((f->dst_w << 21) / f->src_w)) >> 19;
if (f->dst_w >= f->src_w) {
x_cutoff &= ~1;
master_width = (f->src_w * 0x00200000) / (f->dst_w);
if (master_width * f->dst_w != f->src_w * 0x00200000)
master_width++;
reg_2834 = (reg_2834 << 16) | x_cutoff;
reg_2838 = (reg_2838 << 16) | x_cutoff;
reg_283c = master_width >> 2;
reg_2844 = master_width >> 2;
reg_2854 = master_width;
reg_285c = master_width >> 1;
reg_2864 = master_width >> 1;
if (f->dst_w > f->src_w)
reg_2870_base = ((f->dst_w - f->src_w)<<16) / (f->src_w <<14);
else
reg_2870_base = 0;
reg_2870 += (((reg_2870_offset << 14) & 0xFFFF0000) | reg_2870_offset >> 2) + (reg_2870_base << 17 | reg_2870_base);
reg_2874 = 0;
} else if (f->dst_w < f->src_w / 2) {
master_width = (f->src_w * 0x00080000) / f->dst_w;
if (master_width * f->dst_w != f->src_w * 0x00080000)
master_width++;
reg_2834 = (reg_2834 << 16) | x_cutoff;
reg_2838 = (reg_2838 << 16) | x_cutoff;
reg_283c = master_width >> 2;
reg_2844 = master_width >> 1;
reg_2854 = master_width;
reg_285c = master_width >> 1;
reg_2864 = master_width >> 1;
reg_2870 += ((reg_2870_offset << 15) & 0xFFFF0000) | reg_2870_offset;
reg_2870 += (5 - (((f->src_w + f->src_w / 2) - 1) / f->dst_w)) << 16;
reg_2874 = 0x00000012;
} else {
master_width = (f->src_w * 0x00100000) / f->dst_w;
if (master_width * f->dst_w != f->src_w * 0x00100000)
master_width++;
reg_2834 = (reg_2834 << 16) | x_cutoff;
reg_2838 = (reg_2838 << 16) | x_cutoff;
reg_283c = master_width >> 2;
reg_2844 = master_width >> 1;
reg_2854 = master_width;
reg_285c = master_width >> 1;
reg_2864 = master_width >> 1;
reg_2870 += ((reg_2870_offset << 14) & 0xFFFF0000) | reg_2870_offset >> 1;
reg_2870 += (5 - (((f->src_w * 3) - 1) / f->dst_w)) << 16;
reg_2874 = 0x00000001;
}
if (f->src_w == f->dst_w) {
h_filter = 0;
} else {
h_filter = ((f->src_w << 16) / f->dst_w) >> 15;
h_filter = (h_filter >> 1) + (h_filter & 1);
h_filter += !h_filter;
}
write_reg(reg_2834, 0x02834);
write_reg(reg_2838, 0x02838);
IVTV_DEBUG_YUV("Update reg 0x2834 %08x->%08x 0x2838 %08x->%08x\n",
yi->reg_2834, reg_2834, yi->reg_2838, reg_2838);
write_reg(reg_283c, 0x0283c);
write_reg(reg_2844, 0x02844);
IVTV_DEBUG_YUV("Update reg 0x283c %08x->%08x 0x2844 %08x->%08x\n",
yi->reg_283c, reg_283c, yi->reg_2844, reg_2844);
write_reg(0x00080514, 0x02840);
write_reg(0x00100514, 0x02848);
IVTV_DEBUG_YUV("Update reg 0x2840 %08x->%08x 0x2848 %08x->%08x\n",
yi->reg_2840, 0x00080514, yi->reg_2848, 0x00100514);
write_reg(reg_2854, 0x02854);
IVTV_DEBUG_YUV("Update reg 0x2854 %08x->%08x \n",
yi->reg_2854, reg_2854);
write_reg(reg_285c, 0x0285c);
write_reg(reg_2864, 0x02864);
IVTV_DEBUG_YUV("Update reg 0x285c %08x->%08x 0x2864 %08x->%08x\n",
yi->reg_285c, reg_285c, yi->reg_2864, reg_2864);
write_reg(reg_2874, 0x02874);
IVTV_DEBUG_YUV("Update reg 0x2874 %08x->%08x\n",
yi->reg_2874, reg_2874);
write_reg(reg_2870, 0x02870);
IVTV_DEBUG_YUV("Update reg 0x2870 %08x->%08x\n",
yi->reg_2870, reg_2870);
write_reg(reg_2890, 0x02890);
IVTV_DEBUG_YUV("Update reg 0x2890 %08x->%08x\n",
yi->reg_2890, reg_2890);
if (h_filter != yi->h_filter) {
ivtv_yuv_filter(itv, h_filter, -1, -1);
yi->h_filter = h_filter;
}
}
static void ivtv_yuv_handle_vertical(struct ivtv *itv, struct yuv_frame_info *f)
{
struct yuv_playback_info *yi = &itv->yuv_info;
u32 master_height;
u32 reg_2918, reg_291c, reg_2920, reg_2928;
u32 reg_2930, reg_2934, reg_293c;
u32 reg_2940, reg_2944, reg_294c;
u32 reg_2950, reg_2954, reg_2958, reg_295c;
u32 reg_2960, reg_2964, reg_2968, reg_296c;
u32 reg_289c;
u32 src_major_y, src_minor_y;
u32 src_major_uv, src_minor_uv;
u32 reg_2964_base, reg_2968_base;
int v_filter_1, v_filter_2;
IVTV_DEBUG_WARN
("Adjust to height %d src_h %d dst_h %d src_y %d dst_y %d\n",
f->tru_h, f->src_h, f->dst_h, f->src_y, f->dst_y);
IVTV_DEBUG_YUV("Scaling mode Y: %s\n",
f->interlaced_y ? "Interlaced" : "Progressive");
IVTV_DEBUG_YUV("Scaling mode UV: %s\n",
f->interlaced_uv ? "Interlaced" : "Progressive");
IVTV_DEBUG_WARN("Source video: %s\n",
f->interlaced ? "Interlaced" : "Progressive");
if (f->src_y < 8) {
src_minor_uv = f->src_y;
src_major_uv = 0;
} else {
src_minor_uv = 8;
src_major_uv = f->src_y - 8;
}
src_minor_y = src_minor_uv;
src_major_y = src_major_uv;
if (f->offset_y)
src_minor_y += 16;
if (f->interlaced_y)
reg_2918 = (f->dst_h << 16) | (f->src_h + src_minor_y);
else
reg_2918 = (f->dst_h << 16) | ((f->src_h + src_minor_y) << 1);
if (f->interlaced_uv)
reg_291c = (f->dst_h << 16) | ((f->src_h + src_minor_uv) >> 1);
else
reg_291c = (f->dst_h << 16) | (f->src_h + src_minor_uv);
reg_2964_base = (src_minor_y * ((f->dst_h << 16) / f->src_h)) >> 14;
reg_2968_base = (src_minor_uv * ((f->dst_h << 16) / f->src_h)) >> 14;
if (f->dst_h / 2 >= f->src_h && !f->interlaced_y) {
master_height = (f->src_h * 0x00400000) / f->dst_h;
if ((f->src_h * 0x00400000) - (master_height * f->dst_h) >= f->dst_h / 2)
master_height++;
reg_2920 = master_height >> 2;
reg_2928 = master_height >> 3;
reg_2930 = master_height;
reg_2940 = master_height >> 1;
reg_2964_base >>= 3;
reg_2968_base >>= 3;
reg_296c = 0x00000000;
} else if (f->dst_h >= f->src_h) {
master_height = (f->src_h * 0x00400000) / f->dst_h;
master_height = (master_height >> 1) + (master_height & 1);
reg_2920 = master_height >> 2;
reg_2928 = master_height >> 2;
reg_2930 = master_height;
reg_2940 = master_height >> 1;
reg_296c = 0x00000000;
if (f->interlaced_y) {
reg_2964_base >>= 3;
} else {
reg_296c++;
reg_2964_base >>= 2;
}
if (f->interlaced_uv)
reg_2928 >>= 1;
reg_2968_base >>= 3;
} else if (f->dst_h >= f->src_h / 2) {
master_height = (f->src_h * 0x00200000) / f->dst_h;
master_height = (master_height >> 1) + (master_height & 1);
reg_2920 = master_height >> 2;
reg_2928 = master_height >> 2;
reg_2930 = master_height;
reg_2940 = master_height;
reg_296c = 0x00000101;
if (f->interlaced_y) {
reg_2964_base >>= 2;
} else {
reg_296c++;
reg_2964_base >>= 1;
}
if (f->interlaced_uv)
reg_2928 >>= 1;
reg_2968_base >>= 2;
} else {
master_height = (f->src_h * 0x00100000) / f->dst_h;
master_height = (master_height >> 1) + (master_height & 1);
reg_2920 = master_height >> 2;
reg_2928 = master_height >> 2;
reg_2930 = master_height;
reg_2940 = master_height;
reg_2964_base >>= 1;
reg_2968_base >>= 2;
reg_296c = 0x00000102;
}
if (f->src_h == f->dst_h) {
reg_2934 = 0x00020000;
reg_293c = 0x00100000;
reg_2944 = 0x00040000;
reg_294c = 0x000b0000;
} else {
reg_2934 = 0x00000FF0;
reg_293c = 0x00000FF0;
reg_2944 = 0x00000FF0;
reg_294c = 0x00000FF0;
}
reg_2950 = 0x00010000 + src_major_y;
if (f->interlaced_y)
reg_2950 += 0x00010000;
reg_2954 = reg_2950 + 1;
reg_2958 = 0x00010000 + (src_major_y >> 1);
if (f->interlaced_uv)
reg_2958 += 0x00010000;
reg_295c = reg_2958 + 1;
if (yi->decode_height == 480)
reg_289c = 0x011e0017;
else
reg_289c = 0x01500017;
if (f->dst_y < 0)
reg_289c = (reg_289c - ((f->dst_y & ~1)<<15))-(f->dst_y >>1);
else
reg_289c = (reg_289c + ((f->dst_y & ~1)<<15))+(f->dst_y >>1);
reg_2960 = ((src_minor_y + f->src_h + src_major_y) - 1) |
(((src_minor_uv + f->src_h + src_major_uv - 1) & ~1) << 15);
if (f->src_h == f->dst_h) {
reg_2964 = 1;
} else {
reg_2964 = 2 + ((f->dst_h << 1) / f->src_h);
reg_2964 = (reg_2964 >> 1) + (reg_2964 & 1);
}
reg_2968 = (reg_2964 << 16) + reg_2964 + (reg_2964 >> 1);
reg_2964 = (reg_2964 << 16) + reg_2964 + (reg_2964 * 46 / 94);
reg_2964 = 0x00010001 + ((reg_2964 & 0x0000FFFF) - (reg_2964 >> 16));
reg_2968 = 0x00010001 + ((reg_2968 & 0x0000FFFF) - (reg_2968 >> 16));
if ((reg_2964 != 0x00010001) && (f->dst_h / 2 <= f->src_h))
reg_2964 = (reg_2964 & 0xFFFF0000) + ((reg_2964 & 0x0000FFFF) / 2);
if (!f->interlaced_y)
reg_2964 -= 0x00010001;
if (!f->interlaced_uv)
reg_2968 -= 0x00010001;
reg_2964 += ((reg_2964_base << 16) | reg_2964_base);
reg_2968 += ((reg_2968_base << 16) | reg_2968_base);
if (f->src_h == f->dst_h) {
v_filter_1 = 0;
v_filter_2 = 1;
} else {
v_filter_1 = ((f->src_h << 16) / f->dst_h) >> 15;
v_filter_1 = (v_filter_1 >> 1) + (v_filter_1 & 1);
v_filter_1 += !v_filter_1;
v_filter_2 = v_filter_1;
}
write_reg(reg_2934, 0x02934);
write_reg(reg_293c, 0x0293c);
IVTV_DEBUG_YUV("Update reg 0x2934 %08x->%08x 0x293c %08x->%08x\n",
yi->reg_2934, reg_2934, yi->reg_293c, reg_293c);
write_reg(reg_2944, 0x02944);
write_reg(reg_294c, 0x0294c);
IVTV_DEBUG_YUV("Update reg 0x2944 %08x->%08x 0x294c %08x->%08x\n",
yi->reg_2944, reg_2944, yi->reg_294c, reg_294c);
write_reg(reg_2930, 0x02938);
write_reg(reg_2930, 0x02930);
IVTV_DEBUG_YUV("Update reg 0x2930 %08x->%08x 0x2938 %08x->%08x\n",
yi->reg_2930, reg_2930, yi->reg_2938, reg_2930);
write_reg(reg_2928, 0x02928);
write_reg(reg_2928 + 0x514, 0x0292C);
IVTV_DEBUG_YUV("Update reg 0x2928 %08x->%08x 0x292c %08x->%08x\n",
yi->reg_2928, reg_2928, yi->reg_292c, reg_2928 + 0x514);
write_reg(reg_2920, 0x02920);
write_reg(reg_2920 + 0x514, 0x02924);
IVTV_DEBUG_YUV("Update reg 0x2920 %08x->%08x 0x2924 %08x->%08x\n",
yi->reg_2920, reg_2920, yi->reg_2924, reg_2920 + 0x514);
write_reg(reg_2918, 0x02918);
write_reg(reg_291c, 0x0291C);
IVTV_DEBUG_YUV("Update reg 0x2918 %08x->%08x 0x291C %08x->%08x\n",
yi->reg_2918, reg_2918, yi->reg_291c, reg_291c);
write_reg(reg_296c, 0x0296c);
IVTV_DEBUG_YUV("Update reg 0x296c %08x->%08x\n",
yi->reg_296c, reg_296c);
write_reg(reg_2940, 0x02948);
write_reg(reg_2940, 0x02940);
IVTV_DEBUG_YUV("Update reg 0x2940 %08x->%08x 0x2948 %08x->%08x\n",
yi->reg_2940, reg_2940, yi->reg_2948, reg_2940);
write_reg(reg_2950, 0x02950);
write_reg(reg_2954, 0x02954);
IVTV_DEBUG_YUV("Update reg 0x2950 %08x->%08x 0x2954 %08x->%08x\n",
yi->reg_2950, reg_2950, yi->reg_2954, reg_2954);
write_reg(reg_2958, 0x02958);
write_reg(reg_295c, 0x0295C);
IVTV_DEBUG_YUV("Update reg 0x2958 %08x->%08x 0x295C %08x->%08x\n",
yi->reg_2958, reg_2958, yi->reg_295c, reg_295c);
write_reg(reg_2960, 0x02960);
IVTV_DEBUG_YUV("Update reg 0x2960 %08x->%08x \n",
yi->reg_2960, reg_2960);
write_reg(reg_2964, 0x02964);
write_reg(reg_2968, 0x02968);
IVTV_DEBUG_YUV("Update reg 0x2964 %08x->%08x 0x2968 %08x->%08x\n",
yi->reg_2964, reg_2964, yi->reg_2968, reg_2968);
write_reg(reg_289c, 0x0289c);
IVTV_DEBUG_YUV("Update reg 0x289c %08x->%08x\n",
yi->reg_289c, reg_289c);
if (v_filter_1 != yi->v_filter_1) {
ivtv_yuv_filter(itv, -1, v_filter_1, -1);
yi->v_filter_1 = v_filter_1;
}
if (v_filter_2 != yi->v_filter_2) {
ivtv_yuv_filter(itv, -1, -1, v_filter_2);
yi->v_filter_2 = v_filter_2;
}
}
static u32 ivtv_yuv_window_setup(struct ivtv *itv, struct yuv_frame_info *f)
{
struct yuv_frame_info *of = &itv->yuv_info.old_frame_info;
int osd_crop;
u32 osd_scale;
u32 yuv_update = 0;
if (f->src_x < 0)
f->src_x = 0;
if (f->src_y < 0)
f->src_y = 0;
if ((osd_crop = f->src_w - 4 * f->dst_w) > 0) {
f->src_x += osd_crop / 2;
f->src_w = (f->src_w - osd_crop) & ~3;
f->dst_w = f->src_w / 4;
f->dst_w += f->dst_w & 1;
}
if (f->src_h / f->dst_h >= 2) {
f->interlaced_y = 1;
if ((osd_crop = f->src_h - 4 * f->dst_h) > 0) {
f->src_y += osd_crop / 2;
f->src_h = (f->src_h - osd_crop) & ~3;
f->dst_h = f->src_h / 4;
f->dst_h += f->dst_h & 1;
}
}
if ((int)f->dst_w <= 2 || (int)f->dst_h <= 2 ||
(int)f->src_w <= 2 || (int)f->src_h <= 2) {
return IVTV_YUV_UPDATE_INVALID;
}
osd_scale = (f->src_h << 16) / f->dst_h;
if ((osd_crop = f->pan_y - f->dst_y) > 0) {
f->src_y += (osd_scale * osd_crop) >> 16;
f->src_h -= (osd_scale * osd_crop) >> 16;
f->dst_h -= osd_crop;
f->dst_y = 0;
} else {
f->dst_y -= f->pan_y;
}
if ((osd_crop = f->dst_h + f->dst_y - f->vis_h) > 0) {
f->dst_h -= osd_crop;
f->src_h -= (osd_scale * osd_crop) >> 16;
}
osd_scale = (f->src_w << 16) / f->dst_w;
if ((osd_crop = f->pan_x - f->dst_x) > 0) {
f->src_x += (osd_scale * osd_crop) >> 16;
f->src_w -= (osd_scale * osd_crop) >> 16;
f->dst_w -= osd_crop;
f->dst_x = 0;
} else {
f->dst_x -= f->pan_x;
}
if ((osd_crop = f->dst_w + f->dst_x - f->vis_w) > 0) {
f->dst_w -= osd_crop;
f->src_w -= (osd_scale * osd_crop) >> 16;
}
if (itv->yuv_info.track_osd) {
f->dst_x += itv->yuv_info.osd_x_offset;
f->dst_y += itv->yuv_info.osd_y_offset;
}
f->dst_w &= ~1;
f->dst_x &= ~1;
f->src_w += f->src_x & 1;
f->src_x &= ~1;
f->src_w &= ~1;
f->dst_w &= ~1;
f->dst_h &= ~1;
f->dst_y &= ~1;
f->src_h += f->src_y & 1;
f->src_y &= ~1;
f->src_h &= ~1;
f->dst_h &= ~1;
if (f->dst_w < f->src_w / 4) {
f->src_w &= ~3;
f->dst_w = f->src_w / 4;
f->dst_w += f->dst_w & 1;
}
if (f->dst_h < f->src_h / 4) {
f->src_h &= ~3;
f->dst_h = f->src_h / 4;
f->dst_h += f->dst_h & 1;
}
if ((int)f->dst_w <= 2 || (int)f->dst_h <= 2 ||
(int)f->src_w <= 2 || (int)f->src_h <= 2) {
return IVTV_YUV_UPDATE_INVALID;
}
if ((of->dst_w != f->dst_w) || (of->src_w != f->src_w) ||
(of->dst_x != f->dst_x) || (of->src_x != f->src_x) ||
(of->pan_x != f->pan_x) || (of->vis_w != f->vis_w)) {
yuv_update |= IVTV_YUV_UPDATE_HORIZONTAL;
}
if ((of->src_h != f->src_h) || (of->dst_h != f->dst_h) ||
(of->dst_y != f->dst_y) || (of->src_y != f->src_y) ||
(of->pan_y != f->pan_y) || (of->vis_h != f->vis_h) ||
(of->lace_mode != f->lace_mode) ||
(of->interlaced_y != f->interlaced_y) ||
(of->interlaced_uv != f->interlaced_uv)) {
yuv_update |= IVTV_YUV_UPDATE_VERTICAL;
}
return yuv_update;
}
void ivtv_yuv_work_handler(struct ivtv *itv)
{
struct yuv_playback_info *yi = &itv->yuv_info;
struct yuv_frame_info f;
int frame = yi->update_frame;
u32 yuv_update;
IVTV_DEBUG_YUV("Update yuv registers for frame %d\n", frame);
f = yi->new_frame_info[frame];
if (yi->track_osd) {
f.pan_x = yi->osd_x_pan;
f.pan_y = yi->osd_y_pan;
f.vis_w = yi->osd_vis_w;
f.vis_h = yi->osd_vis_h;
} else {
f.pan_x = 0;
f.pan_y = 0;
f.vis_w = 720;
f.vis_h = yi->decode_height;
}
if (!(yuv_update = ivtv_yuv_window_setup(itv, &f)))
return;
if (yuv_update & IVTV_YUV_UPDATE_INVALID) {
write_reg(0x01008080, 0x2898);
} else if (yuv_update) {
write_reg(0x00108080, 0x2898);
if (yuv_update & IVTV_YUV_UPDATE_HORIZONTAL)
ivtv_yuv_handle_horizontal(itv, &f);
if (yuv_update & IVTV_YUV_UPDATE_VERTICAL)
ivtv_yuv_handle_vertical(itv, &f);
}
yi->old_frame_info = f;
}
static void ivtv_yuv_init(struct ivtv *itv)
{
struct yuv_playback_info *yi = &itv->yuv_info;
IVTV_DEBUG_YUV("ivtv_yuv_init\n");
yi->reg_2834 = read_reg(0x02834);
yi->reg_2838 = read_reg(0x02838);
yi->reg_283c = read_reg(0x0283c);
yi->reg_2840 = read_reg(0x02840);
yi->reg_2844 = read_reg(0x02844);
yi->reg_2848 = read_reg(0x02848);
yi->reg_2854 = read_reg(0x02854);
yi->reg_285c = read_reg(0x0285c);
yi->reg_2864 = read_reg(0x02864);
yi->reg_2870 = read_reg(0x02870);
yi->reg_2874 = read_reg(0x02874);
yi->reg_2898 = read_reg(0x02898);
yi->reg_2890 = read_reg(0x02890);
yi->reg_289c = read_reg(0x0289c);
yi->reg_2918 = read_reg(0x02918);
yi->reg_291c = read_reg(0x0291c);
yi->reg_2920 = read_reg(0x02920);
yi->reg_2924 = read_reg(0x02924);
yi->reg_2928 = read_reg(0x02928);
yi->reg_292c = read_reg(0x0292c);
yi->reg_2930 = read_reg(0x02930);
yi->reg_2934 = read_reg(0x02934);
yi->reg_2938 = read_reg(0x02938);
yi->reg_293c = read_reg(0x0293c);
yi->reg_2940 = read_reg(0x02940);
yi->reg_2944 = read_reg(0x02944);
yi->reg_2948 = read_reg(0x02948);
yi->reg_294c = read_reg(0x0294c);
yi->reg_2950 = read_reg(0x02950);
yi->reg_2954 = read_reg(0x02954);
yi->reg_2958 = read_reg(0x02958);
yi->reg_295c = read_reg(0x0295c);
yi->reg_2960 = read_reg(0x02960);
yi->reg_2964 = read_reg(0x02964);
yi->reg_2968 = read_reg(0x02968);
yi->reg_296c = read_reg(0x0296c);
yi->reg_2970 = read_reg(0x02970);
yi->v_filter_1 = -1;
yi->v_filter_2 = -1;
yi->h_filter = -1;
yi->osd_x_offset = read_reg(0x02a04) & 0x00000FFF;
yi->osd_y_offset = (read_reg(0x02a04) >> 16) & 0x00000FFF;
if (read_reg(0x2878) & 4)
yi->decode_height = 576;
else
yi->decode_height = 480;
if (!itv->osd_info) {
yi->osd_vis_w = 720 - yi->osd_x_offset;
yi->osd_vis_h = yi->decode_height - yi->osd_y_offset;
} else {
if (!yi->osd_vis_w)
yi->osd_vis_w = 720 - yi->osd_x_offset;
if (!yi->osd_vis_h) {
yi->osd_vis_h = yi->decode_height - yi->osd_y_offset;
} else if (yi->osd_vis_h + yi->osd_y_offset > yi->decode_height) {
IVTV_DEBUG_WARN("Clipping yuv output - fb size (%d) exceeds video standard limit (%d)\n",
yi->osd_vis_h + yi->osd_y_offset,
yi->decode_height);
yi->osd_vis_h = yi->decode_height - yi->osd_y_offset;
}
}
yi->blanking_ptr = kzalloc(720 * 16, GFP_KERNEL|__GFP_NOWARN);
if (yi->blanking_ptr) {
yi->blanking_dmaptr = pci_map_single(itv->pdev, yi->blanking_ptr, 720*16, PCI_DMA_TODEVICE);
} else {
yi->blanking_dmaptr = 0;
IVTV_DEBUG_WARN("Failed to allocate yuv blanking buffer\n");
}
write_reg_sync(0x01, IVTV_REG_VDM);
set_bit(IVTV_F_I_DECODING_YUV, &itv->i_flags);
atomic_set(&yi->next_dma_frame, 0);
}
static void ivtv_yuv_next_free(struct ivtv *itv)
{
int draw, display;
struct yuv_playback_info *yi = &itv->yuv_info;
if (atomic_read(&yi->next_dma_frame) == -1)
ivtv_yuv_init(itv);
draw = atomic_read(&yi->next_fill_frame);
display = atomic_read(&yi->next_dma_frame);
if (display > draw)
display -= IVTV_YUV_BUFFERS;
if (draw - display >= yi->max_frames_buffered)
draw = (u8)(draw - 1) % IVTV_YUV_BUFFERS;
else
yi->new_frame_info[draw].update = 0;
yi->draw_frame = draw;
}
static void ivtv_yuv_setup_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
{
struct yuv_playback_info *yi = &itv->yuv_info;
u8 frame = yi->draw_frame;
u8 last_frame = (u8)(frame - 1) % IVTV_YUV_BUFFERS;
struct yuv_frame_info *nf = &yi->new_frame_info[frame];
struct yuv_frame_info *of = &yi->new_frame_info[last_frame];
int lace_threshold = yi->lace_threshold;
int update = nf->update;
nf->src_x = args->src.left;
nf->src_y = args->src.top;
nf->src_w = args->src.width;
nf->src_h = args->src.height;
nf->dst_x = args->dst.left;
nf->dst_y = args->dst.top;
nf->dst_w = args->dst.width;
nf->dst_h = args->dst.height;
nf->tru_x = args->dst.left;
nf->tru_w = args->src_width;
nf->tru_h = args->src_height;
nf->offset_y = (nf->tru_h + nf->src_x < 512 - 16) ? 1 : 0;
nf->update = 0;
nf->interlaced_y = 0;
nf->interlaced_uv = 0;
nf->delay = 0;
nf->sync_field = 0;
nf->lace_mode = yi->lace_mode & IVTV_YUV_MODE_MASK;
if (lace_threshold < 0)
lace_threshold = yi->decode_height - 1;
switch (nf->lace_mode) {
case IVTV_YUV_MODE_PROGRESSIVE:
nf->interlaced = 0;
if (nf->tru_h < 512 || (nf->tru_h > 576 && nf->tru_h < 1021))
nf->interlaced_y = 0;
else
nf->interlaced_y = 1;
if (nf->tru_h < 1021 && (nf->dst_h >= nf->src_h / 2))
nf->interlaced_uv = 0;
else
nf->interlaced_uv = 1;
break;
case IVTV_YUV_MODE_AUTO:
if (nf->tru_h <= lace_threshold || nf->tru_h > 576 || nf->tru_w > 720) {
nf->interlaced = 0;
if ((nf->tru_h < 512) ||
(nf->tru_h > 576 && nf->tru_h < 1021) ||
(nf->tru_w > 720 && nf->tru_h < 1021))
nf->interlaced_y = 0;
else
nf->interlaced_y = 1;
if (nf->tru_h < 1021 && (nf->dst_h >= nf->src_h / 2))
nf->interlaced_uv = 0;
else
nf->interlaced_uv = 1;
} else {
nf->interlaced = 1;
nf->interlaced_y = 1;
nf->interlaced_uv = 1;
}
break;
case IVTV_YUV_MODE_INTERLACED:
default:
nf->interlaced = 1;
nf->interlaced_y = 1;
nf->interlaced_uv = 1;
break;
}
if (memcmp(&yi->old_frame_info_args, nf, sizeof(*nf))) {
yi->old_frame_info_args = *nf;
nf->update = 1;
IVTV_DEBUG_YUV("Requesting reg update for frame %d\n", frame);
}
nf->update |= update;
nf->sync_field = yi->lace_sync_field;
nf->delay = nf->sync_field != of->sync_field;
}
void ivtv_yuv_frame_complete(struct ivtv *itv)
{
atomic_set(&itv->yuv_info.next_fill_frame,
(itv->yuv_info.draw_frame + 1) % IVTV_YUV_BUFFERS);
}
static int ivtv_yuv_udma_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
{
DEFINE_WAIT(wait);
int rc = 0;
int got_sig = 0;
mutex_lock(&itv->udma.lock);
if ((rc = ivtv_yuv_prep_user_dma(itv, &itv->udma, args)) != 0) {
mutex_unlock(&itv->udma.lock);
return rc;
}
ivtv_udma_prepare(itv);
prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
while (test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags) ||
test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
got_sig = signal_pending(current);
if (got_sig && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
break;
got_sig = 0;
schedule();
}
finish_wait(&itv->dma_waitq, &wait);
ivtv_udma_unmap(itv);
if (got_sig) {
IVTV_DEBUG_INFO("User stopped YUV UDMA\n");
mutex_unlock(&itv->udma.lock);
return -EINTR;
}
ivtv_yuv_frame_complete(itv);
mutex_unlock(&itv->udma.lock);
return rc;
}
void ivtv_yuv_setup_stream_frame(struct ivtv *itv)
{
struct yuv_playback_info *yi = &itv->yuv_info;
struct ivtv_dma_frame dma_args;
ivtv_yuv_next_free(itv);
dma_args.y_source = NULL;
dma_args.uv_source = NULL;
dma_args.src.left = 0;
dma_args.src.top = 0;
dma_args.src.width = yi->v4l2_src_w;
dma_args.src.height = yi->v4l2_src_h;
dma_args.dst = yi->main_rect;
dma_args.src_width = yi->v4l2_src_w;
dma_args.src_height = yi->v4l2_src_h;
ivtv_yuv_setup_frame(itv, &dma_args);
if (!itv->dma_data_req_offset)
itv->dma_data_req_offset = yuv_offset[yi->draw_frame];
}
int ivtv_yuv_udma_stream_frame(struct ivtv *itv, void __user *src)
{
struct yuv_playback_info *yi = &itv->yuv_info;
struct ivtv_dma_frame dma_args;
int res;
ivtv_yuv_setup_stream_frame(itv);
dma_args.y_source = src;
dma_args.uv_source = src + 720 * ((yi->v4l2_src_h + 31) & ~31);
mutex_unlock(&itv->serialize_lock);
res = ivtv_yuv_udma_frame(itv, &dma_args);
mutex_lock(&itv->serialize_lock);
return res;
}
int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
{
int res;
ivtv_yuv_next_free(itv);
ivtv_yuv_setup_frame(itv, args);
mutex_unlock(&itv->serialize_lock);
res = ivtv_yuv_udma_frame(itv, args);
mutex_lock(&itv->serialize_lock);
return res;
}
void ivtv_yuv_close(struct ivtv *itv)
{
struct yuv_playback_info *yi = &itv->yuv_info;
int h_filter, v_filter_1, v_filter_2;
IVTV_DEBUG_YUV("ivtv_yuv_close\n");
mutex_unlock(&itv->serialize_lock);
ivtv_waitq(&itv->vsync_waitq);
mutex_lock(&itv->serialize_lock);
yi->running = 0;
atomic_set(&yi->next_dma_frame, -1);
atomic_set(&yi->next_fill_frame, 0);
write_reg(yi->reg_2898 | 0x01000000, 0x2898);
write_reg(yi->reg_2834, 0x02834);
write_reg(yi->reg_2838, 0x02838);
write_reg(yi->reg_283c, 0x0283c);
write_reg(yi->reg_2840, 0x02840);
write_reg(yi->reg_2844, 0x02844);
write_reg(yi->reg_2848, 0x02848);
write_reg(yi->reg_2854, 0x02854);
write_reg(yi->reg_285c, 0x0285c);
write_reg(yi->reg_2864, 0x02864);
write_reg(yi->reg_2870, 0x02870);
write_reg(yi->reg_2874, 0x02874);
write_reg(yi->reg_2890, 0x02890);
write_reg(yi->reg_289c, 0x0289c);
write_reg(yi->reg_2918, 0x02918);
write_reg(yi->reg_291c, 0x0291c);
write_reg(yi->reg_2920, 0x02920);
write_reg(yi->reg_2924, 0x02924);
write_reg(yi->reg_2928, 0x02928);
write_reg(yi->reg_292c, 0x0292c);
write_reg(yi->reg_2930, 0x02930);
write_reg(yi->reg_2934, 0x02934);
write_reg(yi->reg_2938, 0x02938);
write_reg(yi->reg_293c, 0x0293c);
write_reg(yi->reg_2940, 0x02940);
write_reg(yi->reg_2944, 0x02944);
write_reg(yi->reg_2948, 0x02948);
write_reg(yi->reg_294c, 0x0294c);
write_reg(yi->reg_2950, 0x02950);
write_reg(yi->reg_2954, 0x02954);
write_reg(yi->reg_2958, 0x02958);
write_reg(yi->reg_295c, 0x0295c);
write_reg(yi->reg_2960, 0x02960);
write_reg(yi->reg_2964, 0x02964);
write_reg(yi->reg_2968, 0x02968);
write_reg(yi->reg_296c, 0x0296c);
write_reg(yi->reg_2970, 0x02970);
if ((yi->reg_2834 & 0x0000FFFF) == (yi->reg_2834 >> 16)) {
h_filter = 0;
} else {
h_filter = ((yi->reg_2834 << 16) / (yi->reg_2834 >> 16)) >> 15;
h_filter = (h_filter >> 1) + (h_filter & 1);
h_filter += !h_filter;
}
if ((yi->reg_2918 & 0x0000FFFF) == (yi->reg_2918 >> 16)) {
v_filter_1 = 0;
v_filter_2 = 1;
} else {
v_filter_1 = ((yi->reg_2918 << 16) / (yi->reg_2918 >> 16)) >> 15;
v_filter_1 = (v_filter_1 >> 1) + (v_filter_1 & 1);
v_filter_1 += !v_filter_1;
v_filter_2 = v_filter_1;
}
ivtv_yuv_filter(itv, h_filter, v_filter_1, v_filter_2);
write_reg(0, 0x02814);
write_reg(0, 0x0282c);
write_reg(0, 0x02904);
write_reg(0, 0x02910);
if (yi->blanking_ptr) {
kfree(yi->blanking_ptr);
yi->blanking_ptr = NULL;
pci_unmap_single(itv->pdev, yi->blanking_dmaptr, 720*16, PCI_DMA_TODEVICE);
}
yi->old_frame_info.src_w = 0;
yi->old_frame_info.src_h = 0;
yi->old_frame_info_args.src_w = 0;
yi->old_frame_info_args.src_h = 0;
clear_bit(IVTV_F_I_DECODING_YUV, &itv->i_flags);
}
| gpl-2.0 |
techomancer/kernel-galaxytab | drivers/usb/host/ehci-hub.c | 34 | 28326 | /*
* Copyright (C) 2001-2004 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
/*-------------------------------------------------------------------------*/
/*
* EHCI Root Hub ... the nonsharable stuff
*
* Registers don't need cpu_to_le32, that happens transparently
*/
/*-------------------------------------------------------------------------*/
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
#ifdef CONFIG_PM
static int ehci_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
u16 wIndex,
char *buf,
u16 wLength
);
/* After a power loss, ports that were owned by the companion must be
* reset so that the companion can still own them.
*/
static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
{
u32 __iomem *reg;
u32 status;
int port;
__le32 buf;
struct usb_hcd *hcd = ehci_to_hcd(ehci);
if (!ehci->owned_ports)
return;
/* Give the connections some time to appear */
msleep(20);
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
if (test_bit(port, &ehci->owned_ports)) {
reg = &ehci->regs->port_status[port];
status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
/* Port already owned by companion? */
if (status & PORT_OWNER)
clear_bit(port, &ehci->owned_ports);
else if (test_bit(port, &ehci->companion_ports))
ehci_writel(ehci, status & ~PORT_PE, reg);
else
ehci_hub_control(hcd, SetPortFeature,
USB_PORT_FEAT_RESET, port + 1,
NULL, 0);
}
}
if (!ehci->owned_ports)
return;
msleep(90); /* Wait for resets to complete */
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
if (test_bit(port, &ehci->owned_ports)) {
ehci_hub_control(hcd, GetPortStatus,
0, port + 1,
(char *) &buf, sizeof(buf));
/* The companion should now own the port,
* but if something went wrong the port must not
* remain enabled.
*/
reg = &ehci->regs->port_status[port];
status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
if (status & PORT_OWNER)
ehci_writel(ehci, status | PORT_CSC, reg);
else {
ehci_dbg(ehci, "failed handover port %d: %x\n",
port + 1, status);
ehci_writel(ehci, status & ~PORT_PE, reg);
}
}
}
ehci->owned_ports = 0;
}
static int ehci_bus_suspend (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
int port;
int mask;
u32 __iomem *hostpc_reg = NULL;
ehci_dbg(ehci, "suspend root hub\n");
if (time_before (jiffies, ehci->next_statechange))
msleep(5);
del_timer_sync(&ehci->watchdog);
del_timer_sync(&ehci->iaa_watchdog);
spin_lock_irq (&ehci->lock);
/* Once the controller is stopped, port resumes that are already
* in progress won't complete. Hence if remote wakeup is enabled
* for the root hub and any ports are in the middle of a resume or
* remote wakeup, we must fail the suspend.
*/
if (hcd->self.root_hub->do_remote_wakeup) {
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
if (ehci->reset_done[port] != 0) {
spin_unlock_irq(&ehci->lock);
ehci_dbg(ehci, "suspend failed because "
"port %d is resuming\n",
port + 1);
return -EBUSY;
}
}
}
/* stop schedules, clean any completed work */
if (HC_IS_RUNNING(hcd->state)) {
ehci_quiesce (ehci);
hcd->state = HC_STATE_QUIESCING;
}
ehci->command = ehci_readl(ehci, &ehci->regs->command);
ehci_work(ehci);
/* Unlike other USB host controller types, EHCI doesn't have
* any notion of "global" or bus-wide suspend. The driver has
* to manually suspend all the active unsuspended ports, and
* then manually resume them in the bus_resume() routine.
*/
ehci->bus_suspended = 0;
ehci->owned_ports = 0;
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status [port];
u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
u32 t2 = t1;
if (ehci->has_hostpc)
hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs
+ HOSTPC0 + 4 * (port & 0xff));
/* keep track of which ports we suspend */
if (t1 & PORT_OWNER)
set_bit(port, &ehci->owned_ports);
else if ((t1 & PORT_PE) && !(t1 & PORT_SUSPEND)) {
t2 |= PORT_SUSPEND;
set_bit(port, &ehci->bus_suspended);
}
/* enable remote wakeup on all ports */
if (hcd->self.root_hub->do_remote_wakeup) {
/* only enable appropriate wake bits, otherwise the
* hardware can not go phy low power mode. If a race
* condition happens here(connection change during bits
* set), the port change detection will finally fix it.
*/
if (t1 & PORT_CONNECT) {
t2 |= PORT_WKOC_E | PORT_WKDISC_E;
t2 &= ~PORT_WKCONN_E;
} else {
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
t2 &= ~PORT_WKDISC_E;
}
} else
t2 &= ~PORT_WAKE_BITS;
if (t1 != t2) {
ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
port + 1, t1, t2);
ehci_writel(ehci, t2, reg);
if (hostpc_reg) {
u32 t3;
msleep(5);/* 5ms for HCD enter low pwr mode */
t3 = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
t3 = ehci_readl(ehci, hostpc_reg);
ehci_dbg(ehci, "Port%d phy low pwr mode %s\n",
port, (t3 & HOSTPC_PHCD) ?
"succeeded" : "failed");
}
}
}
/* Apparently some devices need a >= 1-uframe delay here */
if (ehci->bus_suspended)
udelay(150);
/* turn off now-idle HC */
ehci_halt (ehci);
hcd->state = HC_STATE_SUSPENDED;
if (ehci->reclaim)
end_unlink_async(ehci);
/* allow remote wakeup */
mask = INTR_MASK;
if (!hcd->self.root_hub->do_remote_wakeup)
mask &= ~STS_PCD;
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
ehci_readl(ehci, &ehci->regs->intr_enable);
ehci->next_statechange = jiffies + msecs_to_jiffies(10);
spin_unlock_irq (&ehci->lock);
/* ehci_work() may have re-enabled the watchdog timer, which we do not
* want, and so we must delete any pending watchdog timer events.
*/
del_timer_sync(&ehci->watchdog);
return 0;
}
/* caller has locked the root hub, and should reset/reinit on error */
static int ehci_bus_resume (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp;
u32 power_okay;
int i;
u8 resume_needed = 0;
if (time_before (jiffies, ehci->next_statechange))
msleep(5);
spin_lock_irq (&ehci->lock);
if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
spin_unlock_irq(&ehci->lock);
return -ESHUTDOWN;
}
if (unlikely(ehci->debug)) {
if (ehci->debug && !dbgp_reset_prep())
ehci->debug = NULL;
else
dbgp_external_startup();
}
/* Ideally and we've got a real resume here, and no port's power
* was lost. (For PCI, that means Vaux was maintained.) But we
* could instead be restoring a swsusp snapshot -- so that BIOS was
* the last user of the controller, not reset/pm hardware keeping
* state we gave to it.
*/
power_okay = ehci_readl(ehci, &ehci->regs->intr_enable);
ehci_dbg(ehci, "resume root hub%s\n",
power_okay ? "" : " after power loss");
/* at least some APM implementations will try to deliver
* IRQs right away, so delay them until we're ready.
*/
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
/* re-init operational registers */
ehci_writel(ehci, 0, &ehci->regs->segment);
ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next);
/* restore CMD_RUN, framelist size, and irq threshold */
ehci_writel(ehci, ehci->command, &ehci->regs->command);
/* Some controller/firmware combinations need a delay during which
* they set up the port statuses. See Bugzilla #8190. */
spin_unlock_irq(&ehci->lock);
msleep(8);
spin_lock_irq(&ehci->lock);
/* manually resume the ports we suspended during bus_suspend() */
i = HCS_N_PORTS (ehci->hcs_params);
while (i--) {
/* clear phy low power mode before resume */
if (ehci->has_hostpc) {
u32 __iomem *hostpc_reg =
(u32 __iomem *)((u8 *)ehci->regs
+ HOSTPC0 + 4 * (i & 0xff));
temp = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp & ~HOSTPC_PHCD,
hostpc_reg);
mdelay(5);
}
temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
if (test_bit(i, &ehci->bus_suspended) &&
(temp & PORT_SUSPEND)) {
temp |= PORT_RESUME;
resume_needed = 1;
}
ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
}
/* msleep for 20ms only if code is trying to resume port */
if (resume_needed) {
spin_unlock_irq(&ehci->lock);
msleep(20);
spin_lock_irq(&ehci->lock);
}
i = HCS_N_PORTS (ehci->hcs_params);
while (i--) {
temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
if (test_bit(i, &ehci->bus_suspended) &&
(temp & PORT_SUSPEND)) {
temp &= ~(PORT_RWC_BITS | PORT_RESUME);
ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
ehci_vdbg (ehci, "resumed port %d\n", i + 1);
}
}
(void) ehci_readl(ehci, &ehci->regs->command);
/* maybe re-activate the schedule(s) */
temp = 0;
if (ehci->async->qh_next.qh)
temp |= CMD_ASE;
if (ehci->periodic_sched)
temp |= CMD_PSE;
if (temp) {
ehci->command |= temp;
ehci_writel(ehci, ehci->command, &ehci->regs->command);
}
ehci->next_statechange = jiffies + msecs_to_jiffies(5);
hcd->state = HC_STATE_RUNNING;
/* Now we can safely re-enable irqs */
ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
spin_unlock_irq (&ehci->lock);
ehci_handover_companion_ports(ehci);
return 0;
}
#else
#define ehci_bus_suspend NULL
#define ehci_bus_resume NULL
#endif /* CONFIG_PM */
/*-------------------------------------------------------------------------*/
/* Display the ports dedicated to the companion controller */
static ssize_t show_companion(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ehci_hcd *ehci;
int nports, index, n;
int count = PAGE_SIZE;
char *ptr = buf;
ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
nports = HCS_N_PORTS(ehci->hcs_params);
for (index = 0; index < nports; ++index) {
if (test_bit(index, &ehci->companion_ports)) {
n = scnprintf(ptr, count, "%d\n", index + 1);
ptr += n;
count -= n;
}
}
return ptr - buf;
}
/*
* Sets the owner of a port
*/
static void set_owner(struct ehci_hcd *ehci, int portnum, int new_owner)
{
u32 __iomem *status_reg;
u32 port_status;
int try;
status_reg = &ehci->regs->port_status[portnum];
/*
* The controller won't set the OWNER bit if the port is
* enabled, so this loop will sometimes require at least two
* iterations: one to disable the port and one to set OWNER.
*/
for (try = 4; try > 0; --try) {
spin_lock_irq(&ehci->lock);
port_status = ehci_readl(ehci, status_reg);
if ((port_status & PORT_OWNER) == new_owner
|| (port_status & (PORT_OWNER | PORT_CONNECT))
== 0)
try = 0;
else {
port_status ^= PORT_OWNER;
port_status &= ~(PORT_PE | PORT_RWC_BITS);
ehci_writel(ehci, port_status, status_reg);
}
spin_unlock_irq(&ehci->lock);
if (try > 1)
msleep(5);
}
}
/*
* Dedicate or undedicate a port to the companion controller.
* Syntax is "[-]portnum", where a leading '-' sign means
* return control of the port to the EHCI controller.
*/
static ssize_t store_companion(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ehci_hcd *ehci;
int portnum, new_owner;
ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
new_owner = PORT_OWNER; /* Owned by companion */
if (sscanf(buf, "%d", &portnum) != 1)
return -EINVAL;
if (portnum < 0) {
portnum = - portnum;
new_owner = 0; /* Owned by EHCI */
}
if (portnum <= 0 || portnum > HCS_N_PORTS(ehci->hcs_params))
return -ENOENT;
portnum--;
if (new_owner)
set_bit(portnum, &ehci->companion_ports);
else
clear_bit(portnum, &ehci->companion_ports);
set_owner(ehci, portnum, new_owner);
return count;
}
static DEVICE_ATTR(companion, 0644, show_companion, store_companion);
static inline void create_companion_file(struct ehci_hcd *ehci)
{
int i;
/* with integrated TT there is no companion! */
if (!ehci_is_TDI(ehci))
i = device_create_file(ehci_to_hcd(ehci)->self.controller,
&dev_attr_companion);
}
static inline void remove_companion_file(struct ehci_hcd *ehci)
{
/* with integrated TT there is no companion! */
if (!ehci_is_TDI(ehci))
device_remove_file(ehci_to_hcd(ehci)->self.controller,
&dev_attr_companion);
}
/*-------------------------------------------------------------------------*/
static int check_reset_complete (
struct ehci_hcd *ehci,
int index,
u32 __iomem *status_reg,
int port_status
) {
if (!(port_status & PORT_CONNECT))
return port_status;
/* if reset finished and it's still not enabled -- handoff */
if (!(port_status & PORT_PE)) {
/* with integrated TT, there's nobody to hand it to! */
if (ehci_is_TDI(ehci)) {
ehci_dbg (ehci,
"Failed to enable port %d on root hub TT\n",
index+1);
return port_status;
}
ehci_dbg (ehci, "port %d full speed --> companion\n",
index + 1);
// what happens if HCS_N_CC(params) == 0 ?
port_status |= PORT_OWNER;
port_status &= ~PORT_RWC_BITS;
ehci_writel(ehci, port_status, status_reg);
/* ensure 440EPX ohci controller state is operational */
if (ehci->has_amcc_usb23)
set_ohci_hcfs(ehci, 1);
} else {
ehci_dbg (ehci, "port %d high speed\n", index + 1);
/* ensure 440EPx ohci controller state is suspended */
if (ehci->has_amcc_usb23)
set_ohci_hcfs(ehci, 0);
}
return port_status;
}
/*-------------------------------------------------------------------------*/
/* build "status change" packet (one or two bytes) from HC registers */
static int
ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp, status = 0;
u32 mask;
int ports, i, retval = 1;
unsigned long flags;
/* if !USB_SUSPEND, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
return 0;
/* init status to no-changes */
buf [0] = 0;
ports = HCS_N_PORTS (ehci->hcs_params);
if (ports > 7) {
buf [1] = 0;
retval++;
}
/* Some boards (mostly VIA?) report bogus overcurrent indications,
* causing massive log spam unless we completely ignore them. It
* may be relevant that VIA VT8235 controllers, where PORT_POWER is
* always set, seem to clear PORT_OCC and PORT_CSC when writing to
* PORT_POWER; that's surprising, but maybe within-spec.
*/
if (!ignore_oc)
mask = PORT_CSC | PORT_PEC | PORT_OCC;
else
mask = PORT_CSC | PORT_PEC;
// PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND
/* no hub change reports (bit 0) for now (power, ...) */
/* port N changes (bit N)? */
spin_lock_irqsave (&ehci->lock, flags);
for (i = 0; i < ports; i++) {
temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
/*
* Return status information even for ports with OWNER set.
* Otherwise khubd wouldn't see the disconnect event when a
* high-speed device is switched over to the companion
* controller by the user.
*/
if ((temp & mask) != 0 || test_bit(i, &ehci->port_c_suspend)
|| (ehci->reset_done[i] && time_after_eq(
jiffies, ehci->reset_done[i]))) {
if (i < 7)
buf [0] |= 1 << (i + 1);
else
buf [1] |= 1 << (i - 7);
status = STS_PCD;
}
}
/* FIXME autosuspend idle root hubs */
spin_unlock_irqrestore (&ehci->lock, flags);
return status ? retval : 0;
}
/*-------------------------------------------------------------------------*/
static void
ehci_hub_descriptor (
struct ehci_hcd *ehci,
struct usb_hub_descriptor *desc
) {
int ports = HCS_N_PORTS (ehci->hcs_params);
u16 temp;
desc->bDescriptorType = 0x29;
desc->bPwrOn2PwrGood = 10; /* ehci 1.0, 2.3.9 says 20ms max */
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
temp = 1 + (ports / 8);
desc->bDescLength = 7 + 2 * temp;
/* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
memset (&desc->bitmap [0], 0, temp);
memset (&desc->bitmap [temp], 0xff, temp);
temp = 0x0008; /* per-port overcurrent reporting */
if (HCS_PPC (ehci->hcs_params))
temp |= 0x0001; /* per-port power control */
else
temp |= 0x0002; /* no power switching */
#if 0
// re-enable when we support USB_PORT_FEAT_INDICATOR below.
if (HCS_INDICATOR (ehci->hcs_params))
temp |= 0x0080; /* per-port indicators (LEDs) */
#endif
desc->wHubCharacteristics = cpu_to_le16(temp);
}
/*-------------------------------------------------------------------------*/
static int ehci_hub_control (
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
u16 wIndex,
char *buf,
u16 wLength
) {
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
int ports = HCS_N_PORTS (ehci->hcs_params);
u32 __iomem *status_reg = &ehci->regs->port_status[
(wIndex & 0xff) - 1];
u32 __iomem *hostpc_reg = NULL;
u32 temp, temp1, status;
unsigned long flags;
int retval = 0;
unsigned selector;
/*
* FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
* HCS_INDICATOR may say we can change LEDs to off/amber/green.
* (track current state ourselves) ... blink for diagnostics,
* power, "this is the one", etc. EHCI spec supports this.
*/
if (ehci->has_hostpc)
hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs
+ HOSTPC0 + 4 * ((wIndex & 0xff) - 1));
spin_lock_irqsave (&ehci->lock, flags);
switch (typeReq) {
case ClearHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case ClearPortFeature:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
temp = ehci_readl(ehci, status_reg);
/*
* Even if OWNER is set, so the port is owned by the
* companion controller, khubd needs to be able to clear
* the port-change status bits (especially
* USB_PORT_FEAT_C_CONNECTION).
*/
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
ehci_writel(ehci, temp & ~PORT_PE, status_reg);
break;
case USB_PORT_FEAT_C_ENABLE:
ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_PEC,
status_reg);
break;
case USB_PORT_FEAT_SUSPEND:
if (temp & PORT_RESET)
goto error;
if (ehci->no_selective_suspend)
break;
if (temp & PORT_SUSPEND) {
if ((temp & PORT_PE) == 0)
goto error;
/* clear phy low power mode before resume */
if (hostpc_reg) {
temp1 = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
hostpc_reg);
mdelay(5);
}
/* resume signaling for 20 msec */
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
ehci_writel(ehci, temp | PORT_RESUME,
status_reg);
ehci->reset_done [wIndex] = jiffies
+ msecs_to_jiffies (20);
}
break;
case USB_PORT_FEAT_C_SUSPEND:
clear_bit(wIndex, &ehci->port_c_suspend);
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC (ehci->hcs_params))
ehci_writel(ehci,
temp & ~(PORT_RWC_BITS | PORT_POWER),
status_reg);
break;
case USB_PORT_FEAT_C_CONNECTION:
ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_CSC,
status_reg);
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_OCC,
status_reg);
break;
case USB_PORT_FEAT_C_RESET:
/* GetPortStatus clears reset */
break;
default:
goto error;
}
ehci_readl(ehci, &ehci->regs->command); /* unblock posted write */
break;
case GetHubDescriptor:
ehci_hub_descriptor (ehci, (struct usb_hub_descriptor *)
buf);
break;
case GetHubStatus:
/* no hub-wide feature/status flags */
memset (buf, 0, 4);
//cpu_to_le32s ((u32 *) buf);
break;
case GetPortStatus:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
status = 0;
temp = ehci_readl(ehci, status_reg);
// wPortChange bits
if (temp & PORT_CSC)
status |= 1 << USB_PORT_FEAT_C_CONNECTION;
if (temp & PORT_PEC)
status |= 1 << USB_PORT_FEAT_C_ENABLE;
if ((temp & PORT_OCC) && !ignore_oc){
status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
/*
* Hubs should disable port power on over-current.
* However, not all EHCI implementations do this
* automatically, even if they _do_ support per-port
* power switching; they're allowed to just limit the
* current. khubd will turn the power back on.
*/
if (HCS_PPC (ehci->hcs_params)){
ehci_writel(ehci,
temp & ~(PORT_RWC_BITS | PORT_POWER),
status_reg);
}
}
/* whoever resumes must GetPortStatus to complete it!! */
if (temp & PORT_RESUME) {
/* Remote Wakeup received? */
if (!ehci->reset_done[wIndex]) {
/* resume signaling for 20 msec */
ehci->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
/* check the port again */
mod_timer(&ehci_to_hcd(ehci)->rh_timer,
ehci->reset_done[wIndex]);
}
/* resume completed? */
else if (time_after_eq(jiffies,
ehci->reset_done[wIndex])) {
clear_bit(wIndex, &ehci->suspended_ports);
set_bit(wIndex, &ehci->port_c_suspend);
ehci->reset_done[wIndex] = 0;
/* stop resume signaling */
temp = ehci_readl(ehci, status_reg);
ehci_writel(ehci,
temp & ~(PORT_RWC_BITS | PORT_RESUME),
status_reg);
retval = handshake(ehci, status_reg,
PORT_RESUME, 0, 2000 /* 2msec */);
if (retval != 0) {
ehci_err(ehci,
"port %d resume error %d\n",
wIndex + 1, retval);
goto error;
}
temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
}
}
/* whoever resets must GetPortStatus to complete it!! */
if ((temp & PORT_RESET)
&& time_after_eq(jiffies,
ehci->reset_done[wIndex])) {
status |= 1 << USB_PORT_FEAT_C_RESET;
ehci->reset_done [wIndex] = 0;
/* force reset to complete */
ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET),
status_reg);
/* REVISIT: some hardware needs 550+ usec to clear
* this bit; seems too long to spin routinely...
*/
retval = handshake(ehci, status_reg,
PORT_RESET, 0, 750);
if (retval != 0) {
ehci_err (ehci, "port %d reset error %d\n",
wIndex + 1, retval);
goto error;
}
/* see what we found out */
temp = check_reset_complete (ehci, wIndex, status_reg,
ehci_readl(ehci, status_reg));
}
if (!(temp & (PORT_RESUME|PORT_RESET)))
ehci->reset_done[wIndex] = 0;
/* transfer dedicated ports to the companion hc */
if ((temp & PORT_CONNECT) &&
test_bit(wIndex, &ehci->companion_ports)) {
temp &= ~PORT_RWC_BITS;
temp |= PORT_OWNER;
ehci_writel(ehci, temp, status_reg);
ehci_dbg(ehci, "port %d --> companion\n", wIndex + 1);
temp = ehci_readl(ehci, status_reg);
}
/*
* Even if OWNER is set, there's no harm letting khubd
* see the wPortStatus values (they should all be 0 except
* for PORT_POWER anyway).
*/
if (temp & PORT_CONNECT) {
status |= 1 << USB_PORT_FEAT_CONNECTION;
// status may be from integrated TT
if (ehci->has_hostpc) {
temp1 = ehci_readl(ehci, hostpc_reg);
status |= ehci_port_speed(ehci, temp1);
} else
status |= ehci_port_speed(ehci, temp);
}
if (temp & PORT_PE)
status |= 1 << USB_PORT_FEAT_ENABLE;
/* maybe the port was unsuspended without our knowledge */
if (temp & (PORT_SUSPEND|PORT_RESUME)) {
status |= 1 << USB_PORT_FEAT_SUSPEND;
} else if (test_bit(wIndex, &ehci->suspended_ports)) {
clear_bit(wIndex, &ehci->suspended_ports);
ehci->reset_done[wIndex] = 0;
if (temp & PORT_PE)
set_bit(wIndex, &ehci->port_c_suspend);
}
if (temp & PORT_OC)
status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
if (temp & PORT_RESET)
status |= 1 << USB_PORT_FEAT_RESET;
if (temp & PORT_POWER)
status |= 1 << USB_PORT_FEAT_POWER;
if (test_bit(wIndex, &ehci->port_c_suspend))
status |= 1 << USB_PORT_FEAT_C_SUSPEND;
#ifndef VERBOSE_DEBUG
if (status & ~0xffff) /* only if wPortChange is interesting */
#endif
dbg_port (ehci, "GetStatus", wIndex + 1, temp);
put_unaligned_le32(status, buf);
break;
case SetHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case SetPortFeature:
selector = wIndex >> 8;
wIndex &= 0xff;
if (unlikely(ehci->debug)) {
/* If the debug port is active any port
* feature requests should get denied */
if (wIndex == HCS_DEBUG_PORT(ehci->hcs_params) &&
(readl(&ehci->debug->control) & DBGP_ENABLED)) {
retval = -ENODEV;
goto error_exit;
}
}
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
temp = ehci_readl(ehci, status_reg);
if (temp & PORT_OWNER)
break;
temp &= ~PORT_RWC_BITS;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if (ehci->no_selective_suspend)
break;
if ((temp & PORT_PE) == 0
|| (temp & PORT_RESET) != 0)
goto error;
ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
/* After above check the port must be connected.
* Set appropriate bit thus could put phy into low power
* mode if we have hostpc feature
*/
if (hostpc_reg) {
temp &= ~PORT_WKCONN_E;
temp |= (PORT_WKDISC_E | PORT_WKOC_E);
ehci_writel(ehci, temp | PORT_SUSPEND,
status_reg);
msleep(5);/* 5ms for HCD enter low pwr mode */
temp1 = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp1 | HOSTPC_PHCD,
hostpc_reg);
temp1 = ehci_readl(ehci, hostpc_reg);
ehci_dbg(ehci, "Port%d phy low pwr mode %s\n",
wIndex, (temp1 & HOSTPC_PHCD) ?
"succeeded" : "failed");
}
set_bit(wIndex, &ehci->suspended_ports);
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC (ehci->hcs_params))
ehci_writel(ehci, temp | PORT_POWER,
status_reg);
break;
case USB_PORT_FEAT_RESET:
if (temp & PORT_RESUME)
goto error;
/* line status bits may report this as low speed,
* which can be fine if this root hub has a
* transaction translator built in.
*/
if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT
&& !ehci_is_TDI(ehci)
&& PORT_USB11 (temp)) {
ehci_dbg (ehci,
"port %d low speed --> companion\n",
wIndex + 1);
temp |= PORT_OWNER;
} else {
ehci_vdbg (ehci, "port %d reset\n", wIndex + 1);
temp |= PORT_RESET;
temp &= ~PORT_PE;
/*
* caller must wait, then call GetPortStatus
* usb 2.0 spec says 50 ms resets on root
*/
ehci->reset_done [wIndex] = jiffies
+ msecs_to_jiffies (50);
}
ehci_writel(ehci, temp, status_reg);
break;
/* For downstream facing ports (these): one hub port is put
* into test mode according to USB2 11.24.2.13, then the hub
* must be reset (which for root hub now means rmmod+modprobe,
* or else system reboot). See EHCI 2.3.9 and 4.14 for info
* about the EHCI-specific stuff.
*/
case USB_PORT_FEAT_TEST:
if (!selector || selector > 5)
goto error;
ehci_quiesce(ehci);
ehci_halt(ehci);
temp |= selector << 16;
ehci_writel(ehci, temp, status_reg);
break;
default:
goto error;
}
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
break;
default:
error:
/* "stall" on error */
retval = -EPIPE;
}
error_exit:
spin_unlock_irqrestore (&ehci->lock, flags);
return retval;
}
static void ehci_relinquish_port(struct usb_hcd *hcd, int portnum)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
if (ehci_is_TDI(ehci))
return;
set_owner(ehci, --portnum, PORT_OWNER);
}
static int ehci_port_handed_over(struct usb_hcd *hcd, int portnum)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
u32 __iomem *reg;
if (ehci_is_TDI(ehci))
return 0;
reg = &ehci->regs->port_status[portnum - 1];
return ehci_readl(ehci, reg) & PORT_OWNER;
}
| gpl-2.0 |
standak3/ElementalX_4.4.2 | arch/parisc/math-emu/driver.c | 34 | 3011 | /*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* linux/arch/math-emu/driver.c.c
*
* decodes and dispatches unimplemented FPU instructions
*
* Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
* Copyright (C) 2001 Hewlett-Packard <bame@debian.org>
*/
#include <linux/sched.h>
#include "float.h"
#include "math-emu.h"
#define fptpos 31
#define fpr1pos 10
#define extru(r,pos,len) (((r) >> (31-(pos))) & (( 1 << (len)) - 1))
#define FPUDEBUG 0
struct exc_reg {
unsigned int exception : 6;
unsigned int ei : 26;
};
#define FP0CE_UID(i) (((i) >> 6) & 3)
#define FP0CE_CLASS(i) (((i) >> 9) & 3)
#define FP0CE_SUBOP(i) (((i) >> 13) & 7)
#define FP0CE_SUBOP1(i) (((i) >> 15) & 7)
#define FP0C_FORMAT(i) (((i) >> 11) & 3)
#define FP0E_FORMAT(i) (((i) >> 11) & 1)
#define FPPM_SUBOP(i) (((i) >> 9) & 0x1f)
#define FP2E_SUBOP(i) (((i) >> 5) & 1)
#define FP2E_FORMAT(i) (((i) >> 11) & 1)
#define FPx6_FORMAT(i) ((i) & 0x1f)
#define FPSW_FLAGS(w) ((w) >> 27)
#define FPSW_ENABLE(w) ((w) & 0x1f)
#define FPSW_V (1<<4)
#define FPSW_Z (1<<3)
#define FPSW_O (1<<2)
#define FPSW_U (1<<1)
#define FPSW_I (1<<0)
int
handle_fpe(struct pt_regs *regs)
{
extern void printbinary(unsigned long x, int nbits);
struct siginfo si;
unsigned int orig_sw, sw;
int signalcode;
__u64 frcopy[36];
memcpy(frcopy, regs->fr, sizeof regs->fr);
frcopy[32] = 0;
memcpy(&orig_sw, frcopy, sizeof(orig_sw));
if (FPUDEBUG) {
printk(KERN_DEBUG "FP VZOUICxxxxCQCQCQCQCQCRMxxTDVZOUI ->\n ");
printbinary(orig_sw, 32);
printk(KERN_DEBUG "\n");
}
signalcode = decode_fpu(frcopy, 0x666);
memcpy(&sw, frcopy, sizeof(sw));
if (FPUDEBUG) {
printk(KERN_DEBUG "VZOUICxxxxCQCQCQCQCQCRMxxTDVZOUI decode_fpu returns %d|0x%x\n",
signalcode >> 24, signalcode & 0xffffff);
printbinary(sw, 32);
printk(KERN_DEBUG "\n");
}
memcpy(regs->fr, frcopy, sizeof regs->fr);
if (signalcode != 0) {
si.si_signo = signalcode >> 24;
si.si_errno = 0;
si.si_code = signalcode & 0xffffff;
si.si_addr = (void __user *) regs->iaoq[0];
force_sig_info(si.si_signo, &si, current);
return -1;
}
return signalcode ? -1 : 0;
}
| gpl-2.0 |
maxrdlf95/htc_m8_maxkernel | drivers/input/touchscreen/intel-mid-touch.c | 34 | 12579 | /*
* Intel MID Resistive Touch Screen Driver
*
* Copyright (C) 2008 Intel Corp
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* Questions/Comments/Bug fixes to Sreedhara (sreedhara.ds@intel.com)
* Ramesh Agarwal (ramesh.agarwal@intel.com)
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* TODO:
* review conversion of r/m/w sequences
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/param.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <asm/intel_scu_ipc.h>
#define PMIC_REG_ID1 0x00
#define PMIC_REG_INT 0x04
#define PMIC_REG_MINT 0x05
#define PMIC_REG_ADCINT 0x5F
#define PMIC_REG_MADCINT 0x60
#define PMIC_REG_ADCCNTL1 0x61
#define PMICADDR0 0xA4
#define END_OF_CHANNEL 0x1F
#define PMIC_REG_ADCSNS0H 0x64
#define MRST_TS_CHAN10 0xA
#define MRST_TS_CHAN11 0xB
#define MRST_TS_CHAN12 0xC
#define MRST_TS_CHAN13 0xD
#define MRST_XBIAS 0x20
#define MRST_YBIAS 0x40
#define MRST_ZBIAS 0x80
#define MRST_X_MIN 10
#define MRST_X_MAX 1024
#define MRST_X_FUZZ 5
#define MRST_Y_MIN 10
#define MRST_Y_MAX 1024
#define MRST_Y_FUZZ 5
#define MRST_PRESSURE_MIN 0
#define MRST_PRESSURE_NOMINAL 50
#define MRST_PRESSURE_MAX 100
#define WAIT_ADC_COMPLETION 10
#define ADC_LOOP_DELAY0 0x0
#define ADC_LOOP_DELAY1 0x1
#define PMIC_VENDOR_FS 0
#define PMIC_VENDOR_MAXIM 1
#define PMIC_VENDOR_NEC 2
#define MRSTOUCH_MAX_CHANNELS 32
struct mrstouch_dev {
struct device *dev;
struct input_dev *input;
char phys[32];
u16 asr;
int irq;
unsigned int vendor;
unsigned int rev;
int (*read_prepare)(struct mrstouch_dev *tsdev);
int (*read)(struct mrstouch_dev *tsdev, u16 *x, u16 *y, u16 *z);
int (*read_finish)(struct mrstouch_dev *tsdev);
};
static int mrstouch_nec_adc_read_prepare(struct mrstouch_dev *tsdev)
{
return intel_scu_ipc_update_register(PMIC_REG_MADCINT, 0, 0x20);
}
static int mrstouch_nec_adc_read_finish(struct mrstouch_dev *tsdev)
{
int err;
err = intel_scu_ipc_update_register(PMIC_REG_MADCINT, 0x20, 0x20);
if (!err)
err = intel_scu_ipc_update_register(PMIC_REG_ADCCNTL1, 0, 0x05);
return err;
}
static int mrstouch_ts_chan_read(u16 offset, u16 chan, u16 *vp, u16 *vm)
{
int err;
u16 result;
u32 res;
result = PMIC_REG_ADCSNS0H + offset;
if (chan == MRST_TS_CHAN12)
result += 4;
err = intel_scu_ipc_ioread32(result, &res);
if (err)
return err;
*vp = (res & 0xFF) << 3;
*vp |= (res >> 8) & 0x07;
*vp &= 0x3FF;
res >>= 16;
*vm = (res & 0xFF) << 3;
*vm |= (res >> 8) & 0x07;
*vm &= 0x3FF;
return 0;
}
static int mrstouch_ts_bias_set(uint offset, uint bias)
{
int count;
u16 chan, start;
u16 reg[4];
u8 data[4];
chan = PMICADDR0 + offset;
start = MRST_TS_CHAN10;
for (count = 0; count <= 3; count++) {
reg[count] = chan++;
data[count] = bias | (start + count);
}
return intel_scu_ipc_writev(reg, data, 4);
}
static int mrstouch_nec_adc_read(struct mrstouch_dev *tsdev,
u16 *x, u16 *y, u16 *z)
{
int err;
u16 xm, ym, zm;
err = mrstouch_ts_bias_set(tsdev->asr, MRST_YBIAS);
if (err)
goto ipc_error;
msleep(WAIT_ADC_COMPLETION);
err = mrstouch_ts_chan_read(tsdev->asr, MRST_TS_CHAN10, x, &xm);
if (err)
goto ipc_error;
err = mrstouch_ts_bias_set(tsdev->asr, MRST_XBIAS);
if (err)
goto ipc_error;
msleep(WAIT_ADC_COMPLETION);
err = mrstouch_ts_chan_read(tsdev->asr, MRST_TS_CHAN12, y, &ym);
if (err)
goto ipc_error;
err = mrstouch_ts_bias_set(tsdev->asr, MRST_ZBIAS);
if (err)
goto ipc_error;
msleep(WAIT_ADC_COMPLETION);
err = mrstouch_ts_chan_read(tsdev->asr, MRST_TS_CHAN10, z, &zm);
if (err)
goto ipc_error;
return 0;
ipc_error:
dev_err(tsdev->dev, "ipc error during adc read\n");
return err;
}
static int mrstouch_fs_adc_read_prepare(struct mrstouch_dev *tsdev)
{
int err, count;
u16 chan;
u16 reg[5];
u8 data[5];
err = intel_scu_ipc_update_register(PMIC_REG_MADCINT, 0x00, 0x02);
if (err)
goto ipc_error;
chan = PMICADDR0 + tsdev->asr;
for (count = 0; count <= 3; count++) {
reg[count] = chan++;
data[count] = 0x2A;
}
reg[count] = chan++;
data[count] = 0;
err = intel_scu_ipc_writev(reg, data, 5);
if (err)
goto ipc_error;
msleep(WAIT_ADC_COMPLETION);
for (count = 0; count <= 3; count++) {
reg[count] = chan++;
data[count] = 0x4A;
}
reg[count] = chan++;
data[count] = 0;
err = intel_scu_ipc_writev(reg, data, 5);
if (err)
goto ipc_error;
msleep(WAIT_ADC_COMPLETION);
err = intel_scu_ipc_iowrite32(chan + 2, 0x8A8A8A8A);
if (err)
goto ipc_error;
msleep(WAIT_ADC_COMPLETION);
return 0;
ipc_error:
dev_err(tsdev->dev, "ipc error during %s\n", __func__);
return err;
}
static int mrstouch_fs_adc_read(struct mrstouch_dev *tsdev,
u16 *x, u16 *y, u16 *z)
{
int err;
u16 result;
u16 reg[4];
u8 data[4];
result = PMIC_REG_ADCSNS0H + tsdev->asr;
reg[0] = result + 4;
reg[1] = result + 5;
reg[2] = result + 16;
reg[3] = result + 17;
err = intel_scu_ipc_readv(reg, data, 4);
if (err)
goto ipc_error;
*x = data[0] << 3;
*x |= data[1] & 0x7;
*x &= 0x3FF;
*y = data[2] << 3;
*y |= data[3] & 0x7;
*y &= 0x3FF;
reg[0] = result + 28;
reg[1] = result + 29;
err = intel_scu_ipc_readv(reg, data, 4);
if (err)
goto ipc_error;
*z = data[0] << 3;
*z |= data[1] & 0x7;
*z &= 0x3FF;
return 0;
ipc_error:
dev_err(tsdev->dev, "ipc error during %s\n", __func__);
return err;
}
static int mrstouch_fs_adc_read_finish(struct mrstouch_dev *tsdev)
{
int err, count;
u16 chan;
u16 reg[5];
u8 data[5];
chan = PMICADDR0 + tsdev->asr;
for (count = 0; count <= 4; count++) {
reg[count] = chan++;
data[count] = 0;
}
err = intel_scu_ipc_writev(reg, data, 5);
if (err)
goto ipc_error;
for (count = 0; count <= 4; count++) {
reg[count] = chan++;
data[count] = 0;
}
err = intel_scu_ipc_writev(reg, data, 5);
if (err)
goto ipc_error;
err = intel_scu_ipc_iowrite32(chan + 2, 0x00000000);
if (err)
goto ipc_error;
err = intel_scu_ipc_update_register(PMIC_REG_MADCINT, 0x02, 0x02);
if (err)
goto ipc_error;
return 0;
ipc_error:
dev_err(tsdev->dev, "ipc error during %s\n", __func__);
return err;
}
static void mrstouch_report_event(struct input_dev *input,
unsigned int x, unsigned int y, unsigned int z)
{
if (z > MRST_PRESSURE_NOMINAL) {
input_report_key(input, BTN_TOUCH, 1);
input_report_abs(input, ABS_X, x);
input_report_abs(input, ABS_Y, y);
} else {
input_report_key(input, BTN_TOUCH, 0);
}
input_report_abs(input, ABS_PRESSURE, z);
input_sync(input);
}
static irqreturn_t mrstouch_pendet_irq(int irq, void *dev_id)
{
struct mrstouch_dev *tsdev = dev_id;
u16 x, y, z;
if (tsdev->read_prepare(tsdev))
goto out;
do {
if (tsdev->read(tsdev, &x, &y, &z))
break;
mrstouch_report_event(tsdev->input, x, y, z);
} while (z > MRST_PRESSURE_NOMINAL);
tsdev->read_finish(tsdev);
out:
return IRQ_HANDLED;
}
static int __devinit mrstouch_read_pmic_id(uint *vendor, uint *rev)
{
int err;
u8 r;
err = intel_scu_ipc_ioread8(PMIC_REG_ID1, &r);
if (err)
return err;
*vendor = r & 0x7;
*rev = (r >> 3) & 0x7;
return 0;
}
static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev)
{
int found = 0;
int err, i;
u8 r8;
for (i = 0; i < MRSTOUCH_MAX_CHANNELS; i++) {
err = intel_scu_ipc_ioread8(PMICADDR0 + i, &r8);
if (err)
return err;
if (r8 == END_OF_CHANNEL) {
found = i;
break;
}
}
if (tsdev->vendor == PMIC_VENDOR_FS) {
if (found > MRSTOUCH_MAX_CHANNELS - 18)
return -ENOSPC;
} else {
if (found > MRSTOUCH_MAX_CHANNELS - 4)
return -ENOSPC;
}
return found;
}
static int __devinit mrstouch_ts_chan_set(uint offset)
{
u16 chan;
int ret, count;
chan = PMICADDR0 + offset;
for (count = 0; count <= 3; count++) {
ret = intel_scu_ipc_iowrite8(chan++, MRST_TS_CHAN10 + count);
if (ret)
return ret;
}
return intel_scu_ipc_iowrite8(chan++, END_OF_CHANNEL);
}
static int __devinit mrstouch_adc_init(struct mrstouch_dev *tsdev)
{
int err, start;
u8 ra, rm;
err = mrstouch_read_pmic_id(&tsdev->vendor, &tsdev->rev);
if (err) {
dev_err(tsdev->dev, "Unable to read PMIC id\n");
return err;
}
switch (tsdev->vendor) {
case PMIC_VENDOR_NEC:
case PMIC_VENDOR_MAXIM:
tsdev->read_prepare = mrstouch_nec_adc_read_prepare;
tsdev->read = mrstouch_nec_adc_read;
tsdev->read_finish = mrstouch_nec_adc_read_finish;
break;
case PMIC_VENDOR_FS:
tsdev->read_prepare = mrstouch_fs_adc_read_prepare;
tsdev->read = mrstouch_fs_adc_read;
tsdev->read_finish = mrstouch_fs_adc_read_finish;
break;
default:
dev_err(tsdev->dev,
"Unsupported touchscreen: %d\n", tsdev->vendor);
return -ENXIO;
}
start = mrstouch_chan_parse(tsdev);
if (start < 0) {
dev_err(tsdev->dev, "Unable to parse channels\n");
return start;
}
tsdev->asr = start;
if (tsdev->vendor == PMIC_VENDOR_FS) {
ra = 0xE0 | ADC_LOOP_DELAY0;
rm = 0x5;
} else {
ra = 0xE0 | ADC_LOOP_DELAY1;
rm = 0x0;
err = mrstouch_ts_chan_set(tsdev->asr);
if (err)
return err;
}
err = intel_scu_ipc_update_register(PMIC_REG_ADCCNTL1, ra, 0xE7);
if (err)
return err;
err = intel_scu_ipc_update_register(PMIC_REG_MADCINT, rm, 0x03);
if (err)
return err;
return 0;
}
static int __devinit mrstouch_probe(struct platform_device *pdev)
{
struct mrstouch_dev *tsdev;
struct input_dev *input;
int err;
int irq;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no interrupt assigned\n");
return -EINVAL;
}
tsdev = kzalloc(sizeof(struct mrstouch_dev), GFP_KERNEL);
input = input_allocate_device();
if (!tsdev || !input) {
dev_err(&pdev->dev, "unable to allocate memory\n");
err = -ENOMEM;
goto err_free_mem;
}
tsdev->dev = &pdev->dev;
tsdev->input = input;
tsdev->irq = irq;
snprintf(tsdev->phys, sizeof(tsdev->phys),
"%s/input0", dev_name(tsdev->dev));
err = mrstouch_adc_init(tsdev);
if (err) {
dev_err(&pdev->dev, "ADC initialization failed\n");
goto err_free_mem;
}
input->name = "mrst_touchscreen";
input->phys = tsdev->phys;
input->dev.parent = tsdev->dev;
input->id.vendor = tsdev->vendor;
input->id.version = tsdev->rev;
input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(tsdev->input, ABS_X,
MRST_X_MIN, MRST_X_MAX, MRST_X_FUZZ, 0);
input_set_abs_params(tsdev->input, ABS_Y,
MRST_Y_MIN, MRST_Y_MAX, MRST_Y_FUZZ, 0);
input_set_abs_params(tsdev->input, ABS_PRESSURE,
MRST_PRESSURE_MIN, MRST_PRESSURE_MAX, 0, 0);
err = request_threaded_irq(tsdev->irq, NULL, mrstouch_pendet_irq,
0, "mrstouch", tsdev);
if (err) {
dev_err(tsdev->dev, "unable to allocate irq\n");
goto err_free_mem;
}
err = input_register_device(tsdev->input);
if (err) {
dev_err(tsdev->dev, "unable to register input device\n");
goto err_free_irq;
}
platform_set_drvdata(pdev, tsdev);
return 0;
err_free_irq:
free_irq(tsdev->irq, tsdev);
err_free_mem:
input_free_device(input);
kfree(tsdev);
return err;
}
static int __devexit mrstouch_remove(struct platform_device *pdev)
{
struct mrstouch_dev *tsdev = platform_get_drvdata(pdev);
free_irq(tsdev->irq, tsdev);
input_unregister_device(tsdev->input);
kfree(tsdev);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver mrstouch_driver = {
.driver = {
.name = "pmic_touch",
.owner = THIS_MODULE,
},
.probe = mrstouch_probe,
.remove = __devexit_p(mrstouch_remove),
};
module_platform_driver(mrstouch_driver);
MODULE_AUTHOR("Sreedhara Murthy. D.S, sreedhara.ds@intel.com");
MODULE_DESCRIPTION("Intel Moorestown Resistive Touch Screen Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
invisiblek/android_kernel_htc_m8 | drivers/net/wireless/p54/txrx.c | 34 | 22597 | /*
* Common code for mac80211 Prism54 drivers
*
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de>
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
*
* Based on:
* - the islsm (softmac prism54) driver, which is:
* Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
* - stlc45xx driver
* Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <asm/div64.h>
#include <net/mac80211.h>
#include "p54.h"
#include "lmac.h"
#ifdef P54_MM_DEBUG
static void p54_dump_tx_queue(struct p54_common *priv)
{
unsigned long flags;
struct ieee80211_tx_info *info;
struct p54_tx_info *range;
struct sk_buff *skb;
struct p54_hdr *hdr;
unsigned int i = 0;
u32 prev_addr;
u32 largest_hole = 0, free;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
wiphy_debug(priv->hw->wiphy, "/ --- tx queue dump (%d entries) ---\n",
skb_queue_len(&priv->tx_queue));
prev_addr = priv->rx_start;
skb_queue_walk(&priv->tx_queue, skb) {
info = IEEE80211_SKB_CB(skb);
range = (void *) info->rate_driver_data;
hdr = (void *) skb->data;
free = range->start_addr - prev_addr;
wiphy_debug(priv->hw->wiphy,
"| [%02d] => [skb:%p skb_len:0x%04x "
"hdr:{flags:%02x len:%04x req_id:%04x type:%02x} "
"mem:{start:%04x end:%04x, free:%d}]\n",
i++, skb, skb->len,
le16_to_cpu(hdr->flags), le16_to_cpu(hdr->len),
le32_to_cpu(hdr->req_id), le16_to_cpu(hdr->type),
range->start_addr, range->end_addr, free);
prev_addr = range->end_addr;
largest_hole = max(largest_hole, free);
}
free = priv->rx_end - prev_addr;
largest_hole = max(largest_hole, free);
wiphy_debug(priv->hw->wiphy,
"\\ --- [free: %d], largest free block: %d ---\n",
free, largest_hole);
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
}
#endif
static int p54_assign_address(struct p54_common *priv, struct sk_buff *skb)
{
struct sk_buff *entry, *target_skb = NULL;
struct ieee80211_tx_info *info;
struct p54_tx_info *range;
struct p54_hdr *data = (void *) skb->data;
unsigned long flags;
u32 last_addr = priv->rx_start;
u32 target_addr = priv->rx_start;
u16 len = priv->headroom + skb->len + priv->tailroom + 3;
info = IEEE80211_SKB_CB(skb);
range = (void *) info->rate_driver_data;
len = (range->extra_len + len) & ~0x3;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
if (unlikely(skb_queue_len(&priv->tx_queue) == 32)) {
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
return -EBUSY;
}
skb_queue_walk(&priv->tx_queue, entry) {
u32 hole_size;
info = IEEE80211_SKB_CB(entry);
range = (void *) info->rate_driver_data;
hole_size = range->start_addr - last_addr;
if (!target_skb && hole_size >= len) {
target_skb = entry->prev;
hole_size -= len;
target_addr = last_addr;
break;
}
last_addr = range->end_addr;
}
if (unlikely(!target_skb)) {
if (priv->rx_end - last_addr >= len) {
target_skb = priv->tx_queue.prev;
if (!skb_queue_empty(&priv->tx_queue)) {
info = IEEE80211_SKB_CB(target_skb);
range = (void *)info->rate_driver_data;
target_addr = range->end_addr;
}
} else {
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
return -ENOSPC;
}
}
info = IEEE80211_SKB_CB(skb);
range = (void *) info->rate_driver_data;
range->start_addr = target_addr;
range->end_addr = target_addr + len;
data->req_id = cpu_to_le32(target_addr + priv->headroom);
if (IS_DATA_FRAME(skb) &&
unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON))
priv->beacon_req_id = data->req_id;
__skb_queue_after(&priv->tx_queue, target_skb, skb);
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
return 0;
}
static void p54_tx_pending(struct p54_common *priv)
{
struct sk_buff *skb;
int ret;
skb = skb_dequeue(&priv->tx_pending);
if (unlikely(!skb))
return ;
ret = p54_assign_address(priv, skb);
if (unlikely(ret))
skb_queue_head(&priv->tx_pending, skb);
else
priv->tx(priv->hw, skb);
}
static void p54_wake_queues(struct p54_common *priv)
{
unsigned long flags;
unsigned int i;
if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
return ;
p54_tx_pending(priv);
spin_lock_irqsave(&priv->tx_stats_lock, flags);
for (i = 0; i < priv->hw->queues; i++) {
if (priv->tx_stats[i + P54_QUEUE_DATA].len <
priv->tx_stats[i + P54_QUEUE_DATA].limit)
ieee80211_wake_queue(priv->hw, i);
}
spin_unlock_irqrestore(&priv->tx_stats_lock, flags);
}
static int p54_tx_qos_accounting_alloc(struct p54_common *priv,
struct sk_buff *skb,
const u16 p54_queue)
{
struct p54_tx_queue_stats *queue;
unsigned long flags;
if (WARN_ON(p54_queue >= P54_QUEUE_NUM))
return -EINVAL;
queue = &priv->tx_stats[p54_queue];
spin_lock_irqsave(&priv->tx_stats_lock, flags);
if (unlikely(queue->len >= queue->limit && IS_QOS_QUEUE(p54_queue))) {
spin_unlock_irqrestore(&priv->tx_stats_lock, flags);
return -ENOSPC;
}
queue->len++;
queue->count++;
if (unlikely(queue->len == queue->limit && IS_QOS_QUEUE(p54_queue))) {
u16 ac_queue = p54_queue - P54_QUEUE_DATA;
ieee80211_stop_queue(priv->hw, ac_queue);
}
spin_unlock_irqrestore(&priv->tx_stats_lock, flags);
return 0;
}
static void p54_tx_qos_accounting_free(struct p54_common *priv,
struct sk_buff *skb)
{
if (IS_DATA_FRAME(skb)) {
unsigned long flags;
spin_lock_irqsave(&priv->tx_stats_lock, flags);
priv->tx_stats[GET_HW_QUEUE(skb)].len--;
spin_unlock_irqrestore(&priv->tx_stats_lock, flags);
if (unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON)) {
if (priv->beacon_req_id == GET_REQ_ID(skb)) {
priv->beacon_req_id = 0;
}
complete(&priv->beacon_comp);
}
}
p54_wake_queues(priv);
}
void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct p54_common *priv = dev->priv;
if (unlikely(!skb))
return ;
skb_unlink(skb, &priv->tx_queue);
p54_tx_qos_accounting_free(priv, skb);
ieee80211_free_txskb(dev, skb);
}
EXPORT_SYMBOL_GPL(p54_free_skb);
static struct sk_buff *p54_find_and_unlink_skb(struct p54_common *priv,
const __le32 req_id)
{
struct sk_buff *entry;
unsigned long flags;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
skb_queue_walk(&priv->tx_queue, entry) {
struct p54_hdr *hdr = (struct p54_hdr *) entry->data;
if (hdr->req_id == req_id) {
__skb_unlink(entry, &priv->tx_queue);
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
p54_tx_qos_accounting_free(priv, entry);
return entry;
}
}
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
return NULL;
}
void p54_tx(struct p54_common *priv, struct sk_buff *skb)
{
skb_queue_tail(&priv->tx_pending, skb);
p54_tx_pending(priv);
}
static int p54_rssi_to_dbm(struct p54_common *priv, int rssi)
{
if (priv->rxhw != 5) {
return ((rssi * priv->cur_rssi->mul) / 64 +
priv->cur_rssi->add) / 4;
} else {
return rssi / 2 - 110;
}
}
static void p54_pspoll_workaround(struct p54_common *priv, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *) skb->data;
struct ieee80211_tim_ie *tim_ie;
u8 *tim;
u8 tim_len;
bool new_psm;
if (!ieee80211_is_beacon(hdr->frame_control))
return;
if (!priv->aid)
return;
if (compare_ether_addr(hdr->addr3, priv->bssid))
return;
tim = p54_find_ie(skb, WLAN_EID_TIM);
if (!tim)
return;
tim_len = tim[1];
tim_ie = (struct ieee80211_tim_ie *) &tim[2];
new_psm = ieee80211_check_tim(tim_ie, tim_len, priv->aid);
if (new_psm != priv->powersave_override) {
priv->powersave_override = new_psm;
p54_set_ps(priv);
}
}
static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
{
struct p54_rx_data *hdr = (struct p54_rx_data *) skb->data;
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
u16 freq = le16_to_cpu(hdr->freq);
size_t header_len = sizeof(*hdr);
u32 tsf32;
u8 rate = hdr->rate & 0xf;
if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
return 0;
if (!(hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_IN_FCS_GOOD)))
return 0;
if (hdr->decrypt_status == P54_DECRYPT_OK)
rx_status->flag |= RX_FLAG_DECRYPTED;
if ((hdr->decrypt_status == P54_DECRYPT_FAIL_MICHAEL) ||
(hdr->decrypt_status == P54_DECRYPT_FAIL_TKIP))
rx_status->flag |= RX_FLAG_MMIC_ERROR;
rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
if (hdr->rate & 0x10)
rx_status->flag |= RX_FLAG_SHORTPRE;
if (priv->hw->conf.channel->band == IEEE80211_BAND_5GHZ)
rx_status->rate_idx = (rate < 4) ? 0 : rate - 4;
else
rx_status->rate_idx = rate;
rx_status->freq = freq;
rx_status->band = priv->hw->conf.channel->band;
rx_status->antenna = hdr->antenna;
tsf32 = le32_to_cpu(hdr->tsf32);
if (tsf32 < priv->tsf_low32)
priv->tsf_high32++;
rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32;
priv->tsf_low32 = tsf32;
rx_status->flag |= RX_FLAG_MACTIME_MPDU;
if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
header_len += hdr->align[0];
skb_pull(skb, header_len);
skb_trim(skb, le16_to_cpu(hdr->len));
if (unlikely(priv->hw->conf.flags & IEEE80211_CONF_PS))
p54_pspoll_workaround(priv, skb);
ieee80211_rx_irqsafe(priv->hw, skb);
ieee80211_queue_delayed_work(priv->hw, &priv->work,
msecs_to_jiffies(P54_STATISTICS_UPDATE));
return -1;
}
static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
{
struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
struct p54_frame_sent *payload = (struct p54_frame_sent *) hdr->data;
struct ieee80211_tx_info *info;
struct p54_hdr *entry_hdr;
struct p54_tx_data *entry_data;
struct sk_buff *entry;
unsigned int pad = 0, frame_len;
int count, idx;
entry = p54_find_and_unlink_skb(priv, hdr->req_id);
if (unlikely(!entry))
return ;
frame_len = entry->len;
info = IEEE80211_SKB_CB(entry);
entry_hdr = (struct p54_hdr *) entry->data;
entry_data = (struct p54_tx_data *) entry_hdr->data;
priv->stats.dot11ACKFailureCount += payload->tries - 1;
if (unlikely(entry_data->hw_queue < P54_QUEUE_FWSCAN)) {
dev_kfree_skb_any(entry);
return ;
}
memset(&info->status.ampdu_ack_len, 0,
sizeof(struct ieee80211_tx_info) -
offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
BUILD_BUG_ON(offsetof(struct ieee80211_tx_info,
status.ampdu_ack_len) != 23);
if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
pad = entry_data->align[0];
count = payload->tries;
for (idx = 0; idx < 4; idx++) {
if (count >= info->status.rates[idx].count) {
count -= info->status.rates[idx].count;
} else if (count > 0) {
info->status.rates[idx].count = count;
count = 0;
} else {
info->status.rates[idx].idx = -1;
info->status.rates[idx].count = 0;
}
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
!(payload->status & P54_TX_FAILED))
info->flags |= IEEE80211_TX_STAT_ACK;
if (payload->status & P54_TX_PSM_CANCELLED)
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
info->status.ack_signal = p54_rssi_to_dbm(priv,
(int)payload->ack_rssi);
switch (entry_data->key_type) {
case P54_CRYPTO_TKIPMICHAEL: {
u8 *iv = (u8 *)(entry_data->align + pad +
entry_data->crypt_offset);
iv[2] = iv[0];
iv[0] = iv[1];
iv[1] = (iv[0] | 0x20) & 0x7f;
frame_len -= 12;
break;
}
case P54_CRYPTO_AESCCMP:
frame_len -= 8;
break;
case P54_CRYPTO_WEP:
frame_len -= 4;
break;
}
skb_trim(entry, frame_len);
skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
ieee80211_tx_status_irqsafe(priv->hw, entry);
}
static void p54_rx_eeprom_readback(struct p54_common *priv,
struct sk_buff *skb)
{
struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
struct p54_eeprom_lm86 *eeprom = (struct p54_eeprom_lm86 *) hdr->data;
struct sk_buff *tmp;
if (!priv->eeprom)
return ;
if (priv->fw_var >= 0x509) {
memcpy(priv->eeprom, eeprom->v2.data,
le16_to_cpu(eeprom->v2.len));
} else {
memcpy(priv->eeprom, eeprom->v1.data,
le16_to_cpu(eeprom->v1.len));
}
priv->eeprom = NULL;
tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
dev_kfree_skb_any(tmp);
complete(&priv->eeprom_comp);
}
static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
{
struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
struct p54_statistics *stats = (struct p54_statistics *) hdr->data;
struct sk_buff *tmp;
struct ieee80211_channel *chan;
unsigned int i, rssi, tx, cca, dtime, dtotal, dcca, dtx, drssi, unit;
u32 tsf32;
if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
return ;
tsf32 = le32_to_cpu(stats->tsf32);
if (tsf32 < priv->tsf_low32)
priv->tsf_high32++;
priv->tsf_low32 = tsf32;
priv->stats.dot11RTSFailureCount = le32_to_cpu(stats->rts_fail);
priv->stats.dot11RTSSuccessCount = le32_to_cpu(stats->rts_success);
priv->stats.dot11FCSErrorCount = le32_to_cpu(stats->rx_bad_fcs);
priv->noise = p54_rssi_to_dbm(priv, le32_to_cpu(stats->noise));
dtime = tsf32 - priv->survey_raw.timestamp;
cca = le32_to_cpu(stats->sample_cca);
tx = le32_to_cpu(stats->sample_tx);
rssi = 0;
for (i = 0; i < ARRAY_SIZE(stats->sample_noise); i++)
rssi += le32_to_cpu(stats->sample_noise[i]);
dcca = cca - priv->survey_raw.cached_cca;
drssi = rssi - priv->survey_raw.cached_rssi;
dtx = tx - priv->survey_raw.cached_tx;
dtotal = dcca + drssi + dtx;
if (dtotal && (priv->update_stats || dtime >= USEC_PER_SEC) &&
dtime >= dtotal) {
priv->survey_raw.timestamp = tsf32;
priv->update_stats = false;
unit = dtime / dtotal;
if (dcca) {
priv->survey_raw.cca += dcca * unit;
priv->survey_raw.cached_cca = cca;
}
if (dtx) {
priv->survey_raw.tx += dtx * unit;
priv->survey_raw.cached_tx = tx;
}
if (drssi) {
priv->survey_raw.rssi += drssi * unit;
priv->survey_raw.cached_rssi = rssi;
}
if (!(priv->phy_ps || priv->phy_idle))
priv->survey_raw.active += dtotal * unit;
else
priv->survey_raw.active += (dcca + dtx) * unit;
}
chan = priv->curchan;
if (chan) {
struct survey_info *survey = &priv->survey[chan->hw_value];
survey->noise = clamp_t(s8, priv->noise, -128, 127);
survey->channel_time = priv->survey_raw.active;
survey->channel_time_tx = priv->survey_raw.tx;
survey->channel_time_busy = priv->survey_raw.tx +
priv->survey_raw.cca;
do_div(survey->channel_time, 1024);
do_div(survey->channel_time_tx, 1024);
do_div(survey->channel_time_busy, 1024);
}
tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
dev_kfree_skb_any(tmp);
complete(&priv->stat_comp);
}
static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb)
{
struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
struct p54_trap *trap = (struct p54_trap *) hdr->data;
u16 event = le16_to_cpu(trap->event);
u16 freq = le16_to_cpu(trap->frequency);
switch (event) {
case P54_TRAP_BEACON_TX:
break;
case P54_TRAP_RADAR:
wiphy_info(priv->hw->wiphy, "radar (freq:%d MHz)\n", freq);
break;
case P54_TRAP_NO_BEACON:
if (priv->vif)
ieee80211_beacon_loss(priv->vif);
break;
case P54_TRAP_SCAN:
break;
case P54_TRAP_TBTT:
break;
case P54_TRAP_TIMER:
break;
case P54_TRAP_FAA_RADIO_OFF:
wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
break;
case P54_TRAP_FAA_RADIO_ON:
wiphy_rfkill_set_hw_state(priv->hw->wiphy, false);
break;
default:
wiphy_info(priv->hw->wiphy, "received event:%x freq:%d\n",
event, freq);
break;
}
}
static int p54_rx_control(struct p54_common *priv, struct sk_buff *skb)
{
struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
switch (le16_to_cpu(hdr->type)) {
case P54_CONTROL_TYPE_TXDONE:
p54_rx_frame_sent(priv, skb);
break;
case P54_CONTROL_TYPE_TRAP:
p54_rx_trap(priv, skb);
break;
case P54_CONTROL_TYPE_BBP:
break;
case P54_CONTROL_TYPE_STAT_READBACK:
p54_rx_stats(priv, skb);
break;
case P54_CONTROL_TYPE_EEPROM_READBACK:
p54_rx_eeprom_readback(priv, skb);
break;
default:
wiphy_debug(priv->hw->wiphy,
"not handling 0x%02x type control frame\n",
le16_to_cpu(hdr->type));
break;
}
return 0;
}
int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct p54_common *priv = dev->priv;
u16 type = le16_to_cpu(*((__le16 *)skb->data));
if (type & P54_HDR_FLAG_CONTROL)
return p54_rx_control(priv, skb);
else
return p54_rx_data(priv, skb);
}
EXPORT_SYMBOL_GPL(p54_rx);
static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
struct ieee80211_tx_info *info, u8 *queue,
u32 *extra_len, u16 *flags, u16 *aid,
bool *burst_possible)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_data_qos(hdr->frame_control))
*burst_possible = true;
else
*burst_possible = false;
if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
*flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)
*flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL;
if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
*flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL;
*queue = skb_get_queue_mapping(skb) + P54_QUEUE_DATA;
switch (priv->mode) {
case NL80211_IFTYPE_MONITOR:
*aid = 0;
*flags |= P54_HDR_FLAG_DATA_OUT_PROMISC;
break;
case NL80211_IFTYPE_STATION:
*aid = 1;
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
*aid = 0;
*queue = P54_QUEUE_CAB;
return;
}
if (unlikely(ieee80211_is_mgmt(hdr->frame_control))) {
if (ieee80211_is_probe_resp(hdr->frame_control)) {
*aid = 0;
*flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP |
P54_HDR_FLAG_DATA_OUT_NOCANCEL;
return;
} else if (ieee80211_is_beacon(hdr->frame_control)) {
*aid = 0;
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
return;
}
*flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP;
*queue = P54_QUEUE_BEACON;
*extra_len = IEEE80211_MAX_TIM_LEN;
return;
}
}
if (info->control.sta)
*aid = info->control.sta->aid;
break;
}
}
static u8 p54_convert_algo(u32 cipher)
{
switch (cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
return P54_CRYPTO_WEP;
case WLAN_CIPHER_SUITE_TKIP:
return P54_CRYPTO_TKIPMICHAEL;
case WLAN_CIPHER_SUITE_CCMP:
return P54_CRYPTO_AESCCMP;
default:
return 0;
}
}
void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct p54_common *priv = dev->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct p54_tx_info *p54info;
struct p54_hdr *hdr;
struct p54_tx_data *txhdr;
unsigned int padding, len, extra_len = 0;
int i, j, ridx;
u16 hdr_flags = 0, aid = 0;
u8 rate, queue = 0, crypt_offset = 0;
u8 cts_rate = 0x20;
u8 rc_flags;
u8 calculated_tries[4];
u8 nrates = 0, nremaining = 8;
bool burst_allowed = false;
p54_tx_80211_header(priv, skb, info, &queue, &extra_len,
&hdr_flags, &aid, &burst_allowed);
if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
ieee80211_free_txskb(dev, skb);
return;
}
padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3;
len = skb->len;
if (info->control.hw_key) {
crypt_offset = ieee80211_get_hdrlen_from_skb(skb);
if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
u8 *iv = (u8 *)(skb->data + crypt_offset);
iv[1] = iv[0];
iv[0] = iv[2];
iv[2] = 0;
}
}
txhdr = (struct p54_tx_data *) skb_push(skb, sizeof(*txhdr) + padding);
hdr = (struct p54_hdr *) skb_push(skb, sizeof(*hdr));
if (padding)
hdr_flags |= P54_HDR_FLAG_DATA_ALIGN;
hdr->type = cpu_to_le16(aid);
hdr->rts_tries = info->control.rates[0].count;
cts_rate = info->control.rts_cts_rate_idx;
memset(&txhdr->rateset, 0, sizeof(txhdr->rateset));
for (i = 0; i < dev->max_rates; i++) {
if (info->control.rates[i].idx < 0)
break;
nrates++;
}
for (i = 0; i < nrates; i++) {
calculated_tries[i] = min_t(int, ((15 >> nrates) | 1) + 1,
info->control.rates[i].count);
nremaining -= calculated_tries[i];
}
for (i = nrates - 1; nremaining > 0 && i >= 0; i--) {
int tmp = info->control.rates[i].count - calculated_tries[i];
if (tmp <= 0)
continue;
tmp = min_t(int, tmp, nremaining);
calculated_tries[i] += tmp;
nremaining -= tmp;
}
ridx = 0;
for (i = 0; i < nrates && ridx < 8; i++) {
rate = info->control.rates[i].idx;
if (info->band == IEEE80211_BAND_5GHZ)
rate += 4;
info->control.rates[i].count = calculated_tries[i];
rc_flags = info->control.rates[i].flags;
if (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) {
rate |= 0x10;
cts_rate |= 0x10;
}
if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
burst_allowed = false;
rate |= 0x40;
} else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
rate |= 0x20;
burst_allowed = false;
}
for (j = 0; j < calculated_tries[i] && ridx < 8; j++) {
txhdr->rateset[ridx] = rate;
ridx++;
}
}
if (burst_allowed)
hdr_flags |= P54_HDR_FLAG_DATA_OUT_BURST;
hdr->flags = cpu_to_le16(hdr_flags);
hdr->tries = ridx;
txhdr->rts_rate_idx = 0;
if (info->control.hw_key) {
txhdr->key_type = p54_convert_algo(info->control.hw_key->cipher);
txhdr->key_len = min((u8)16, info->control.hw_key->keylen);
memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len);
if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
len += 8;
memcpy(skb_put(skb, 8), &(info->control.hw_key->key
[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]), 8);
}
len += info->control.hw_key->icv_len;
memset(skb_put(skb, info->control.hw_key->icv_len), 0,
info->control.hw_key->icv_len);
} else {
txhdr->key_type = 0;
txhdr->key_len = 0;
}
txhdr->crypt_offset = crypt_offset;
txhdr->hw_queue = queue;
txhdr->backlog = priv->tx_stats[queue].len - 1;
memset(txhdr->durations, 0, sizeof(txhdr->durations));
txhdr->tx_antenna = ((info->antenna_sel_tx == 0) ?
2 : info->antenna_sel_tx - 1) & priv->tx_diversity_mask;
if (priv->rxhw == 5) {
txhdr->longbow.cts_rate = cts_rate;
txhdr->longbow.output_power = cpu_to_le16(priv->output_power);
} else {
txhdr->normal.output_power = priv->output_power;
txhdr->normal.cts_rate = cts_rate;
}
if (padding)
txhdr->align[0] = padding;
hdr->len = cpu_to_le16(len);
p54info = (void *) info->rate_driver_data;
p54info->extra_len = extra_len;
p54_tx(priv, skb);
}
| gpl-2.0 |
Grauniad/valgrind | memcheck/tests/solaris/scalar_tsol_clearance.c | 34 | 1591 | /* Scalar test for new labelsys syscall subcodes TSOL_GETCLEARANCE
and TSOL_SETCLEARANCE available on Solaris 11. */
#include "scalar.h"
#include <sys/syscall.h>
#include <sys/tsol/tsyscall.h>
#include <tsol/label.h>
__attribute__((noinline))
static void sys_labelsys(void)
{
GO(SYS_labelsys, "(TSOL_GETCLEARANCE) 2s 1m");
SY(SYS_labelsys, x0 + TSOL_GETCLEARANCE, x0 + 1); FAIL;
}
__attribute__((noinline))
static void sys_labelsys2(void)
{
m_label_t *label = m_label_alloc(USER_CLEAR);
if (label == NULL) {
perror("m_label_alloc");
return;
}
GO(SYS_labelsys, "(TSOL_GETCLEARANCE) 1s 0m");
SY(SYS_labelsys, x0 + TSOL_GETCLEARANCE, label); SUCC;
m_label_free(label);
}
__attribute__((noinline))
static void sys_labelsys3(void)
{
GO(SYS_labelsys, "(TSOL_SETCLEARANCE) 2s 1m");
SY(SYS_labelsys, x0 + TSOL_SETCLEARANCE, x0 + 1); FAIL;
}
__attribute__((noinline))
static void sys_labelsys4(void)
{
m_label_t *label = m_label_alloc(USER_CLEAR);
if (label == NULL) {
perror("m_label_alloc");
return;
}
int ret = getclearance(label);
if (ret != 0) {
perror("getclearance");
m_label_free(label);
return;
}
GO(SYS_labelsys, "(TSOL_SETCLEARANCE) 1s 0m");
SY(SYS_labelsys, x0 + TSOL_SETCLEARANCE, label); SUCC;
m_label_free(label);
}
int main(void)
{
/* Uninitialised, but we know px[0] is 0x0. */
long *px = malloc(sizeof(long));
x0 = px[0];
/* SYS_labelsys 52 */
sys_labelsys();
sys_labelsys2();
sys_labelsys3();
sys_labelsys4();
return 0;
}
| gpl-2.0 |
jfdsmabalot/kernel_sense_m8 | arch/mips/pci/fixup-tb0287.c | 34 | 1554 | /*
* fixup-tb0287.c, The TANBAC TB0287 specific PCI fixups.
*
* Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <asm/vr41xx/tb0287.h>
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
unsigned char bus;
int irq = -1;
bus = dev->bus->number;
if (bus == 0) {
switch (slot) {
case 16:
irq = TB0287_SM501_IRQ;
break;
case 17:
irq = TB0287_SIL680A_IRQ;
break;
default:
break;
}
} else if (bus == 1) {
switch (PCI_SLOT(dev->devfn)) {
case 0:
irq = TB0287_PCI_SLOT_IRQ;
break;
case 2:
case 3:
irq = TB0287_RTL8110_IRQ;
break;
default:
break;
}
} else if (bus > 1) {
irq = TB0287_PCI_SLOT_IRQ;
}
return irq;
}
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
| gpl-2.0 |
lonelydra/furnace_kernel_htc_m8 | drivers/net/wireless/ath/ath5k/pci.c | 34 | 6871 | /*
* Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/nl80211.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
#include "../ath.h"
#include "ath5k.h"
#include "debug.h"
#include "base.h"
#include "reg.h"
static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0207) },
{ PCI_VDEVICE(ATHEROS, 0x0007) },
{ PCI_VDEVICE(ATHEROS, 0x0011) },
{ PCI_VDEVICE(ATHEROS, 0x0012) },
{ PCI_VDEVICE(ATHEROS, 0x0013) },
{ PCI_VDEVICE(3COM_2, 0x0013) },
{ PCI_VDEVICE(3COM, 0x0013) },
{ PCI_VDEVICE(ATHEROS, 0x1014) },
{ PCI_VDEVICE(ATHEROS, 0x0014) },
{ PCI_VDEVICE(ATHEROS, 0x0015) },
{ PCI_VDEVICE(ATHEROS, 0x0016) },
{ PCI_VDEVICE(ATHEROS, 0x0017) },
{ PCI_VDEVICE(ATHEROS, 0x0018) },
{ PCI_VDEVICE(ATHEROS, 0x0019) },
{ PCI_VDEVICE(ATHEROS, 0x001a) },
{ PCI_VDEVICE(ATHEROS, 0x001b) },
{ PCI_VDEVICE(ATHEROS, 0x001c) },
{ PCI_VDEVICE(ATHEROS, 0x001d) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
{
struct ath5k_hw *ah = (struct ath5k_hw *) common->priv;
u8 u8tmp;
pci_read_config_byte(ah->pdev, PCI_CACHE_LINE_SIZE, &u8tmp);
*csz = (int)u8tmp;
if (*csz == 0)
*csz = L1_CACHE_BYTES >> 2;
}
static bool
ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
{
struct ath5k_hw *ah = (struct ath5k_hw *) common->ah;
u32 status, timeout;
if (ah->ah_version == AR5K_AR5210) {
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
(void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
} else {
ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
AR5K_EEPROM_CMD_READ);
}
for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
if (status & AR5K_EEPROM_STAT_RDDONE) {
if (status & AR5K_EEPROM_STAT_RDERR)
return false;
*data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
0xffff);
return true;
}
usleep_range(15, 20);
}
return false;
}
int ath5k_hw_read_srev(struct ath5k_hw *ah)
{
ah->ah_mac_srev = ath5k_hw_reg_read(ah, AR5K_SREV);
return 0;
}
static int ath5k_pci_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
{
u8 mac_d[ETH_ALEN] = {};
u32 total, offset;
u16 data;
int octet;
AR5K_EEPROM_READ(0x20, data);
for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
AR5K_EEPROM_READ(offset, data);
total += data;
mac_d[octet + 1] = data & 0xff;
mac_d[octet] = data >> 8;
octet += 2;
}
if (!total || total == 3 * 0xffff)
return -EINVAL;
memcpy(mac, mac_d, ETH_ALEN);
return 0;
}
static const struct ath_bus_ops ath_pci_bus_ops = {
.ath_bus_type = ATH_PCI,
.read_cachesize = ath5k_pci_read_cachesize,
.eeprom_read = ath5k_pci_eeprom_read,
.eeprom_read_mac = ath5k_pci_eeprom_read_mac,
};
static int __devinit
ath5k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
void __iomem *mem;
struct ath5k_hw *ah;
struct ieee80211_hw *hw;
int ret;
u8 csz;
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "can't enable device\n");
goto err;
}
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "32-bit DMA not available\n");
goto err_dis;
}
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
if (csz == 0) {
csz = L1_CACHE_BYTES >> 2;
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
}
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
pci_set_master(pdev);
pci_write_config_byte(pdev, 0x41, 0);
ret = pci_request_region(pdev, 0, "ath5k");
if (ret) {
dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
goto err_dis;
}
mem = pci_iomap(pdev, 0, 0);
if (!mem) {
dev_err(&pdev->dev, "cannot remap PCI memory region\n");
ret = -EIO;
goto err_reg;
}
hw = ieee80211_alloc_hw(sizeof(*ah), &ath5k_hw_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
ret = -ENOMEM;
goto err_map;
}
dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
ah = hw->priv;
ah->hw = hw;
ah->pdev = pdev;
ah->dev = &pdev->dev;
ah->irq = pdev->irq;
ah->devid = id->device;
ah->iobase = mem;
ret = ath5k_init_ah(ah, &ath_pci_bus_ops);
if (ret)
goto err_free;
pci_set_drvdata(pdev, hw);
return 0;
err_free:
ieee80211_free_hw(hw);
err_map:
pci_iounmap(pdev, mem);
err_reg:
pci_release_region(pdev, 0);
err_dis:
pci_disable_device(pdev);
err:
return ret;
}
static void __devexit
ath5k_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_hw *ah = hw->priv;
ath5k_deinit_ah(ah);
pci_iounmap(pdev, ah->iobase);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
ieee80211_free_hw(hw);
}
#ifdef CONFIG_PM_SLEEP
static int ath5k_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_hw *ah = hw->priv;
ath5k_led_off(ah);
return 0;
}
static int ath5k_pci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_hw *ah = hw->priv;
pci_write_config_byte(pdev, 0x41, 0);
ath5k_led_enable(ah);
return 0;
}
static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
#define ATH5K_PM_OPS (&ath5k_pm_ops)
#else
#define ATH5K_PM_OPS NULL
#endif
static struct pci_driver ath5k_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = ath5k_pci_id_table,
.probe = ath5k_pci_probe,
.remove = __devexit_p(ath5k_pci_remove),
.driver.pm = ATH5K_PM_OPS,
};
static int __init
init_ath5k_pci(void)
{
int ret;
ret = pci_register_driver(&ath5k_pci_driver);
if (ret) {
printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
return ret;
}
return 0;
}
static void __exit
exit_ath5k_pci(void)
{
pci_unregister_driver(&ath5k_pci_driver);
}
module_init(init_ath5k_pci);
module_exit(exit_ath5k_pci);
| gpl-2.0 |
Hybrid-Rom/kernel_htc_msm8974 | fs/cachefiles/bind.c | 34 | 6347 | /* Bind and unbind a cache from the filesystem backing it
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/statfs.h>
#include <linux/ctype.h>
#include "internal.h"
static int cachefiles_daemon_add_cache(struct cachefiles_cache *caches);
int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
{
_enter("{%u,%u,%u,%u,%u,%u},%s",
cache->frun_percent,
cache->fcull_percent,
cache->fstop_percent,
cache->brun_percent,
cache->bcull_percent,
cache->bstop_percent,
args);
ASSERT(cache->fstop_percent >= 0 &&
cache->fstop_percent < cache->fcull_percent &&
cache->fcull_percent < cache->frun_percent &&
cache->frun_percent < 100);
ASSERT(cache->bstop_percent >= 0 &&
cache->bstop_percent < cache->bcull_percent &&
cache->bcull_percent < cache->brun_percent &&
cache->brun_percent < 100);
if (*args) {
kerror("'bind' command doesn't take an argument");
return -EINVAL;
}
if (!cache->rootdirname) {
kerror("No cache directory specified");
return -EINVAL;
}
if (test_bit(CACHEFILES_READY, &cache->flags)) {
kerror("Cache already bound");
return -EBUSY;
}
if (!cache->tag) {
cache->tag = kstrdup("CacheFiles", GFP_KERNEL);
if (!cache->tag)
return -ENOMEM;
}
return cachefiles_daemon_add_cache(cache);
}
static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
{
struct cachefiles_object *fsdef;
struct path path;
struct kstatfs stats;
struct dentry *graveyard, *cachedir, *root;
const struct cred *saved_cred;
int ret;
_enter("");
ret = cachefiles_get_security_ID(cache);
if (ret < 0)
return ret;
cachefiles_begin_secure(cache, &saved_cred);
ret = -ENOMEM;
fsdef = kmem_cache_alloc(cachefiles_object_jar, GFP_KERNEL);
if (!fsdef)
goto error_root_object;
ASSERTCMP(fsdef->backer, ==, NULL);
atomic_set(&fsdef->usage, 1);
fsdef->type = FSCACHE_COOKIE_TYPE_INDEX;
_debug("- fsdef %p", fsdef);
ret = kern_path(cache->rootdirname, LOOKUP_DIRECTORY, &path);
if (ret < 0)
goto error_open_root;
cache->mnt = path.mnt;
root = path.dentry;
ret = -EOPNOTSUPP;
if (!root->d_inode ||
!root->d_inode->i_op ||
!root->d_inode->i_op->lookup ||
!root->d_inode->i_op->mkdir ||
!root->d_inode->i_op->setxattr ||
!root->d_inode->i_op->getxattr ||
!root->d_sb->s_op->statfs ||
!root->d_sb->s_op->sync_fs)
goto error_unsupported;
ret = -EROFS;
if (root->d_sb->s_flags & MS_RDONLY)
goto error_unsupported;
ret = cachefiles_determine_cache_security(cache, root, &saved_cred);
if (ret < 0)
goto error_unsupported;
ret = vfs_statfs(&path, &stats);
if (ret < 0)
goto error_unsupported;
ret = -ERANGE;
if (stats.f_bsize <= 0)
goto error_unsupported;
ret = -EOPNOTSUPP;
if (stats.f_bsize > PAGE_SIZE)
goto error_unsupported;
cache->bsize = stats.f_bsize;
cache->bshift = 0;
if (stats.f_bsize < PAGE_SIZE)
cache->bshift = PAGE_SHIFT - ilog2(stats.f_bsize);
_debug("blksize %u (shift %u)",
cache->bsize, cache->bshift);
_debug("size %llu, avail %llu",
(unsigned long long) stats.f_blocks,
(unsigned long long) stats.f_bavail);
do_div(stats.f_files, 100);
cache->fstop = stats.f_files * cache->fstop_percent;
cache->fcull = stats.f_files * cache->fcull_percent;
cache->frun = stats.f_files * cache->frun_percent;
_debug("limits {%llu,%llu,%llu} files",
(unsigned long long) cache->frun,
(unsigned long long) cache->fcull,
(unsigned long long) cache->fstop);
stats.f_blocks >>= cache->bshift;
do_div(stats.f_blocks, 100);
cache->bstop = stats.f_blocks * cache->bstop_percent;
cache->bcull = stats.f_blocks * cache->bcull_percent;
cache->brun = stats.f_blocks * cache->brun_percent;
_debug("limits {%llu,%llu,%llu} blocks",
(unsigned long long) cache->brun,
(unsigned long long) cache->bcull,
(unsigned long long) cache->bstop);
cachedir = cachefiles_get_directory(cache, root, "cache");
if (IS_ERR(cachedir)) {
ret = PTR_ERR(cachedir);
goto error_unsupported;
}
fsdef->dentry = cachedir;
fsdef->fscache.cookie = NULL;
ret = cachefiles_check_object_type(fsdef);
if (ret < 0)
goto error_unsupported;
graveyard = cachefiles_get_directory(cache, root, "graveyard");
if (IS_ERR(graveyard)) {
ret = PTR_ERR(graveyard);
goto error_unsupported;
}
cache->graveyard = graveyard;
fscache_init_cache(&cache->cache,
&cachefiles_cache_ops,
"%s",
fsdef->dentry->d_sb->s_id);
fscache_object_init(&fsdef->fscache, NULL, &cache->cache);
ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag);
if (ret < 0)
goto error_add_cache;
set_bit(CACHEFILES_READY, &cache->flags);
dput(root);
printk(KERN_INFO "CacheFiles:"
" File cache on %s registered\n",
cache->cache.identifier);
cachefiles_has_space(cache, 0, 0);
cachefiles_end_secure(cache, saved_cred);
return 0;
error_add_cache:
dput(cache->graveyard);
cache->graveyard = NULL;
error_unsupported:
mntput(cache->mnt);
cache->mnt = NULL;
dput(fsdef->dentry);
fsdef->dentry = NULL;
dput(root);
error_open_root:
kmem_cache_free(cachefiles_object_jar, fsdef);
error_root_object:
cachefiles_end_secure(cache, saved_cred);
kerror("Failed to register: %d", ret);
return ret;
}
void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
{
_enter("");
if (test_bit(CACHEFILES_READY, &cache->flags)) {
printk(KERN_INFO "CacheFiles:"
" File cache on %s unregistering\n",
cache->cache.identifier);
fscache_withdraw_cache(&cache->cache);
}
dput(cache->graveyard);
mntput(cache->mnt);
kfree(cache->rootdirname);
kfree(cache->secctx);
kfree(cache->tag);
_leave("");
}
| gpl-2.0 |
GodsOffSpring/AOSP-m8 | arch/openrisc/kernel/idle.c | 34 | 1804 | /*
* OpenRISC idle.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Idle daemon for or32. Idle daemon will handle any action
* that needs to be taken when the system becomes idle.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/tick.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/cache.h>
#include <asm/pgalloc.h>
void (*powersave) (void) = NULL;
static inline void pm_idle(void)
{
barrier();
}
void cpu_idle(void)
{
set_thread_flag(TIF_POLLING_NRFLAG);
while (1) {
tick_nohz_idle_enter();
rcu_idle_enter();
while (!need_resched()) {
check_pgt_cache();
rmb();
clear_thread_flag(TIF_POLLING_NRFLAG);
local_irq_disable();
stop_critical_timings();
if (!need_resched() && powersave != NULL)
powersave();
start_critical_timings();
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
}
rcu_idle_exit();
tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
}
}
| gpl-2.0 |
GodsOffSpring/AOSP-m8 | drivers/net/ethernet/sfc/efx.c | 34 | 56326 | /****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2005-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/in.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/topology.h>
#include <linux/gfp.h>
#include <linux/cpu_rmap.h>
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
#include "selftest.h"
#include "mcdi.h"
#include "workarounds.h"
const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
const char *const efx_loopback_mode_names[] = {
[LOOPBACK_NONE] = "NONE",
[LOOPBACK_DATA] = "DATAPATH",
[LOOPBACK_GMAC] = "GMAC",
[LOOPBACK_XGMII] = "XGMII",
[LOOPBACK_XGXS] = "XGXS",
[LOOPBACK_XAUI] = "XAUI",
[LOOPBACK_GMII] = "GMII",
[LOOPBACK_SGMII] = "SGMII",
[LOOPBACK_XGBR] = "XGBR",
[LOOPBACK_XFI] = "XFI",
[LOOPBACK_XAUI_FAR] = "XAUI_FAR",
[LOOPBACK_GMII_FAR] = "GMII_FAR",
[LOOPBACK_SGMII_FAR] = "SGMII_FAR",
[LOOPBACK_XFI_FAR] = "XFI_FAR",
[LOOPBACK_GPHY] = "GPHY",
[LOOPBACK_PHYXS] = "PHYXS",
[LOOPBACK_PCS] = "PCS",
[LOOPBACK_PMAPMD] = "PMA/PMD",
[LOOPBACK_XPORT] = "XPORT",
[LOOPBACK_XGMII_WS] = "XGMII_WS",
[LOOPBACK_XAUI_WS] = "XAUI_WS",
[LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
[LOOPBACK_GMII_WS] = "GMII_WS",
[LOOPBACK_XFI_WS] = "XFI_WS",
[LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
[LOOPBACK_PHYXS_WS] = "PHYXS_WS",
};
const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
const char *const efx_reset_type_names[] = {
[RESET_TYPE_INVISIBLE] = "INVISIBLE",
[RESET_TYPE_ALL] = "ALL",
[RESET_TYPE_WORLD] = "WORLD",
[RESET_TYPE_DISABLE] = "DISABLE",
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
[RESET_TYPE_INT_ERROR] = "INT_ERROR",
[RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
[RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
[RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
};
#define EFX_MAX_MTU (9 * 1024)
static struct workqueue_struct *reset_workqueue;
static unsigned int separate_tx_channels;
module_param(separate_tx_channels, uint, 0444);
MODULE_PARM_DESC(separate_tx_channels,
"Use separate channels for TX and RX");
static int napi_weight = 64;
static unsigned int efx_monitor_interval = 1 * HZ;
static unsigned int rx_irq_mod_usec = 60;
static unsigned int tx_irq_mod_usec = 150;
static unsigned int interrupt_mode;
static unsigned int rss_cpus;
module_param(rss_cpus, uint, 0444);
MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
static int phy_flash_cfg;
module_param(phy_flash_cfg, int, 0644);
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
static unsigned irq_adapt_low_thresh = 8000;
module_param(irq_adapt_low_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_low_thresh,
"Threshold score for reducing IRQ moderation");
static unsigned irq_adapt_high_thresh = 16000;
module_param(irq_adapt_high_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_high_thresh,
"Threshold score for increasing IRQ moderation");
static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
NETIF_MSG_TX_ERR | NETIF_MSG_HW);
module_param(debug, uint, 0);
MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
static void efx_remove_channel(struct efx_channel *channel);
static void efx_remove_channels(struct efx_nic *efx);
static const struct efx_channel_type efx_default_channel_type;
static void efx_remove_port(struct efx_nic *efx);
static void efx_init_napi_channel(struct efx_channel *channel);
static void efx_fini_napi(struct efx_nic *efx);
static void efx_fini_napi_channel(struct efx_channel *channel);
static void efx_fini_struct(struct efx_nic *efx);
static void efx_start_all(struct efx_nic *efx);
static void efx_stop_all(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
if ((efx->state == STATE_RUNNING) || \
(efx->state == STATE_DISABLED)) \
ASSERT_RTNL(); \
} while (0)
static int efx_process_channel(struct efx_channel *channel, int budget)
{
int spent;
if (unlikely(!channel->enabled))
return 0;
spent = efx_nic_process_eventq(channel, budget);
if (spent && efx_channel_has_rx_queue(channel)) {
struct efx_rx_queue *rx_queue =
efx_channel_get_rx_queue(channel);
if (channel->rx_pkt) {
__efx_rx_packet(channel, channel->rx_pkt);
channel->rx_pkt = NULL;
}
if (rx_queue->enabled) {
efx_rx_strategy(channel);
efx_fast_push_rx_descriptors(rx_queue);
}
}
return spent;
}
static inline void efx_channel_processed(struct efx_channel *channel)
{
channel->work_pending = false;
smp_wmb();
efx_nic_eventq_read_ack(channel);
}
static int efx_poll(struct napi_struct *napi, int budget)
{
struct efx_channel *channel =
container_of(napi, struct efx_channel, napi_str);
struct efx_nic *efx = channel->efx;
int spent;
netif_vdbg(efx, intr, efx->net_dev,
"channel %d NAPI poll executing on CPU %d\n",
channel->channel, raw_smp_processor_id());
spent = efx_process_channel(channel, budget);
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
efx->irq_rx_adaptive &&
unlikely(++channel->irq_count == 1000)) {
if (unlikely(channel->irq_mod_score <
irq_adapt_low_thresh)) {
if (channel->irq_moderation > 1) {
channel->irq_moderation -= 1;
efx->type->push_irq_moderation(channel);
}
} else if (unlikely(channel->irq_mod_score >
irq_adapt_high_thresh)) {
if (channel->irq_moderation <
efx->irq_rx_moderation) {
channel->irq_moderation += 1;
efx->type->push_irq_moderation(channel);
}
}
channel->irq_count = 0;
channel->irq_mod_score = 0;
}
efx_filter_rfs_expire(channel);
napi_complete(napi);
efx_channel_processed(channel);
}
return spent;
}
void efx_process_channel_now(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
BUG_ON(channel->channel >= efx->n_channels);
BUG_ON(!channel->enabled);
BUG_ON(!efx->loopback_selftest);
efx_nic_disable_interrupts(efx);
if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
efx->legacy_irq_enabled = false;
}
if (channel->irq)
synchronize_irq(channel->irq);
napi_disable(&channel->napi_str);
efx_process_channel(channel, channel->eventq_mask + 1);
efx_channel_processed(channel);
napi_enable(&channel->napi_str);
if (efx->legacy_irq)
efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
}
static int efx_probe_eventq(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
unsigned long entries;
netif_dbg(efx, probe, efx->net_dev,
"chan %d create event queue\n", channel->channel);
entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
return efx_nic_probe_eventq(channel);
}
static void efx_init_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d init event queue\n", channel->channel);
channel->eventq_read_ptr = 0;
efx_nic_init_eventq(channel);
}
static void efx_start_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, ifup, channel->efx->net_dev,
"chan %d start event queue\n", channel->channel);
channel->work_pending = false;
channel->enabled = true;
smp_wmb();
napi_enable(&channel->napi_str);
efx_nic_eventq_read_ack(channel);
}
static void efx_stop_eventq(struct efx_channel *channel)
{
if (!channel->enabled)
return;
napi_disable(&channel->napi_str);
channel->enabled = false;
}
static void efx_fini_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d fini event queue\n", channel->channel);
efx_nic_fini_eventq(channel);
}
static void efx_remove_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d remove event queue\n", channel->channel);
efx_nic_remove_eventq(channel);
}
static struct efx_channel *
efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
{
struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
struct efx_tx_queue *tx_queue;
int j;
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
if (!channel)
return NULL;
channel->efx = efx;
channel->channel = i;
channel->type = &efx_default_channel_type;
for (j = 0; j < EFX_TXQ_TYPES; j++) {
tx_queue = &channel->tx_queue[j];
tx_queue->efx = efx;
tx_queue->queue = i * EFX_TXQ_TYPES + j;
tx_queue->channel = channel;
}
rx_queue = &channel->rx_queue;
rx_queue->efx = efx;
setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
(unsigned long)rx_queue);
return channel;
}
static struct efx_channel *
efx_copy_channel(const struct efx_channel *old_channel)
{
struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
struct efx_tx_queue *tx_queue;
int j;
channel = kmalloc(sizeof(*channel), GFP_KERNEL);
if (!channel)
return NULL;
*channel = *old_channel;
channel->napi_dev = NULL;
memset(&channel->eventq, 0, sizeof(channel->eventq));
for (j = 0; j < EFX_TXQ_TYPES; j++) {
tx_queue = &channel->tx_queue[j];
if (tx_queue->channel)
tx_queue->channel = channel;
tx_queue->buffer = NULL;
memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
}
rx_queue = &channel->rx_queue;
rx_queue->buffer = NULL;
memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
(unsigned long)rx_queue);
return channel;
}
static int efx_probe_channel(struct efx_channel *channel)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
int rc;
netif_dbg(channel->efx, probe, channel->efx->net_dev,
"creating channel %d\n", channel->channel);
rc = channel->type->pre_probe(channel);
if (rc)
goto fail;
rc = efx_probe_eventq(channel);
if (rc)
goto fail;
efx_for_each_channel_tx_queue(tx_queue, channel) {
rc = efx_probe_tx_queue(tx_queue);
if (rc)
goto fail;
}
efx_for_each_channel_rx_queue(rx_queue, channel) {
rc = efx_probe_rx_queue(rx_queue);
if (rc)
goto fail;
}
channel->n_rx_frm_trunc = 0;
return 0;
fail:
efx_remove_channel(channel);
return rc;
}
static void
efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
{
struct efx_nic *efx = channel->efx;
const char *type;
int number;
number = channel->channel;
if (efx->tx_channel_offset == 0) {
type = "";
} else if (channel->channel < efx->tx_channel_offset) {
type = "-rx";
} else {
type = "-tx";
number -= efx->tx_channel_offset;
}
snprintf(buf, len, "%s%s-%d", efx->name, type, number);
}
static void efx_set_channel_names(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_for_each_channel(channel, efx)
channel->type->get_name(channel,
efx->channel_name[channel->channel],
sizeof(efx->channel_name[0]));
}
static int efx_probe_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
int rc;
efx->next_buffer_table = 0;
efx_for_each_channel_rev(channel, efx) {
rc = efx_probe_channel(channel);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to create channel %d\n",
channel->channel);
goto fail;
}
}
efx_set_channel_names(efx);
return 0;
fail:
efx_remove_channels(efx);
return rc;
}
static void efx_start_datapath(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
struct efx_channel *channel;
efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_hash_size +
efx->type->rx_buffer_padding);
efx->rx_buffer_order = get_order(efx->rx_buffer_len +
sizeof(struct efx_rx_page_state));
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_init_tx_queue(tx_queue);
efx_rx_strategy(channel);
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
efx_nic_generate_fill_event(rx_queue);
}
WARN_ON(channel->rx_pkt != NULL);
efx_rx_strategy(channel);
}
if (netif_device_present(efx->net_dev))
netif_tx_wake_all_queues(efx->net_dev);
}
static void efx_stop_datapath(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->port_enabled);
rc = efx_nic_flush_queues(efx);
if (rc && EFX_WORKAROUND_7803(efx)) {
netif_err(efx, drv, efx->net_dev,
"Resetting to recover from flush failure\n");
efx_schedule_reset(efx, RESET_TYPE_ALL);
} else if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
} else {
netif_dbg(efx, drv, efx->net_dev,
"successfully flushed all queues\n");
}
efx_for_each_channel(channel, efx) {
if (efx_channel_has_rx_queue(channel)) {
efx_stop_eventq(channel);
efx_start_eventq(channel);
}
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue);
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_fini_tx_queue(tx_queue);
}
}
static void efx_remove_channel(struct efx_channel *channel)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"destroy chan %d\n", channel->channel);
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_remove_rx_queue(rx_queue);
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue);
efx_remove_eventq(channel);
}
static void efx_remove_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_for_each_channel(channel, efx)
efx_remove_channel(channel);
}
int
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
{
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
u32 old_rxq_entries, old_txq_entries;
unsigned i, next_buffer_table = 0;
int rc = 0;
efx_for_each_channel(channel, efx) {
struct efx_rx_queue *rx_queue;
struct efx_tx_queue *tx_queue;
if (channel->type->copy)
continue;
next_buffer_table = max(next_buffer_table,
channel->eventq.index +
channel->eventq.entries);
efx_for_each_channel_rx_queue(rx_queue, channel)
next_buffer_table = max(next_buffer_table,
rx_queue->rxd.index +
rx_queue->rxd.entries);
efx_for_each_channel_tx_queue(tx_queue, channel)
next_buffer_table = max(next_buffer_table,
tx_queue->txd.index +
tx_queue->txd.entries);
}
efx_stop_all(efx);
efx_stop_interrupts(efx, true);
memset(other_channel, 0, sizeof(other_channel));
for (i = 0; i < efx->n_channels; i++) {
channel = efx->channel[i];
if (channel->type->copy)
channel = channel->type->copy(channel);
if (!channel) {
rc = -ENOMEM;
goto out;
}
other_channel[i] = channel;
}
old_rxq_entries = efx->rxq_entries;
old_txq_entries = efx->txq_entries;
efx->rxq_entries = rxq_entries;
efx->txq_entries = txq_entries;
for (i = 0; i < efx->n_channels; i++) {
channel = efx->channel[i];
efx->channel[i] = other_channel[i];
other_channel[i] = channel;
}
efx->next_buffer_table = next_buffer_table;
for (i = 0; i < efx->n_channels; i++) {
channel = efx->channel[i];
if (!channel->type->copy)
continue;
rc = efx_probe_channel(channel);
if (rc)
goto rollback;
efx_init_napi_channel(efx->channel[i]);
}
out:
for (i = 0; i < efx->n_channels; i++) {
channel = other_channel[i];
if (channel && channel->type->copy) {
efx_fini_napi_channel(channel);
efx_remove_channel(channel);
kfree(channel);
}
}
efx_start_interrupts(efx, true);
efx_start_all(efx);
return rc;
rollback:
efx->rxq_entries = old_rxq_entries;
efx->txq_entries = old_txq_entries;
for (i = 0; i < efx->n_channels; i++) {
channel = efx->channel[i];
efx->channel[i] = other_channel[i];
other_channel[i] = channel;
}
goto out;
}
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
{
mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
}
static const struct efx_channel_type efx_default_channel_type = {
.pre_probe = efx_channel_dummy_op_int,
.get_name = efx_get_channel_name,
.copy = efx_copy_channel,
.keep_eventq = false,
};
int efx_channel_dummy_op_int(struct efx_channel *channel)
{
return 0;
}
void efx_link_status_changed(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
if (!netif_running(efx->net_dev))
return;
if (link_state->up != netif_carrier_ok(efx->net_dev)) {
efx->n_link_state_changes++;
if (link_state->up)
netif_carrier_on(efx->net_dev);
else
netif_carrier_off(efx->net_dev);
}
if (link_state->up)
netif_info(efx, link, efx->net_dev,
"link up at %uMbps %s-duplex (MTU %d)%s\n",
link_state->speed, link_state->fd ? "full" : "half",
efx->net_dev->mtu,
(efx->promiscuous ? " [PROMISC]" : ""));
else
netif_info(efx, link, efx->net_dev, "link down\n");
}
void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
{
efx->link_advertising = advertising;
if (advertising) {
if (advertising & ADVERTISED_Pause)
efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
else
efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
if (advertising & ADVERTISED_Asym_Pause)
efx->wanted_fc ^= EFX_FC_TX;
}
}
void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
{
efx->wanted_fc = wanted_fc;
if (efx->link_advertising) {
if (wanted_fc & EFX_FC_RX)
efx->link_advertising |= (ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
else
efx->link_advertising &= ~(ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
if (wanted_fc & EFX_FC_TX)
efx->link_advertising ^= ADVERTISED_Asym_Pause;
}
}
static void efx_fini_port(struct efx_nic *efx);
int __efx_reconfigure_port(struct efx_nic *efx)
{
enum efx_phy_mode phy_mode;
int rc;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
netif_addr_lock_bh(efx->net_dev);
netif_addr_unlock_bh(efx->net_dev);
phy_mode = efx->phy_mode;
if (LOOPBACK_INTERNAL(efx))
efx->phy_mode |= PHY_MODE_TX_DISABLED;
else
efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
rc = efx->type->reconfigure_port(efx);
if (rc)
efx->phy_mode = phy_mode;
return rc;
}
int efx_reconfigure_port(struct efx_nic *efx)
{
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
mutex_lock(&efx->mac_lock);
rc = __efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
return rc;
}
static void efx_mac_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
mutex_lock(&efx->mac_lock);
if (efx->port_enabled)
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
}
static int efx_probe_port(struct efx_nic *efx)
{
int rc;
netif_dbg(efx, probe, efx->net_dev, "create port\n");
if (phy_flash_cfg)
efx->phy_mode = PHY_MODE_SPECIAL;
rc = efx->type->probe_port(efx);
if (rc)
return rc;
memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN);
return 0;
}
static int efx_init_port(struct efx_nic *efx)
{
int rc;
netif_dbg(efx, drv, efx->net_dev, "init port\n");
mutex_lock(&efx->mac_lock);
rc = efx->phy_op->init(efx);
if (rc)
goto fail1;
efx->port_initialized = true;
efx->type->reconfigure_mac(efx);
rc = efx->phy_op->reconfigure(efx);
if (rc)
goto fail2;
mutex_unlock(&efx->mac_lock);
return 0;
fail2:
efx->phy_op->fini(efx);
fail1:
mutex_unlock(&efx->mac_lock);
return rc;
}
static void efx_start_port(struct efx_nic *efx)
{
netif_dbg(efx, ifup, efx->net_dev, "start port\n");
BUG_ON(efx->port_enabled);
mutex_lock(&efx->mac_lock);
efx->port_enabled = true;
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
}
static void efx_stop_port(struct efx_nic *efx)
{
netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
mutex_lock(&efx->mac_lock);
efx->port_enabled = false;
mutex_unlock(&efx->mac_lock);
netif_addr_lock_bh(efx->net_dev);
netif_addr_unlock_bh(efx->net_dev);
}
static void efx_fini_port(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
if (!efx->port_initialized)
return;
efx->phy_op->fini(efx);
efx->port_initialized = false;
efx->link_state.up = false;
efx_link_status_changed(efx);
}
static void efx_remove_port(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
efx->type->remove_port(efx);
}
static int efx_init_io(struct efx_nic *efx)
{
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
int rc;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
rc = pci_enable_device(pci_dev);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to enable PCI device\n");
goto fail1;
}
pci_set_master(pci_dev);
while (dma_mask > 0x7fffffffUL) {
if (pci_dma_supported(pci_dev, dma_mask)) {
rc = pci_set_dma_mask(pci_dev, dma_mask);
if (rc == 0)
break;
}
dma_mask >>= 1;
}
if (rc) {
netif_err(efx, probe, efx->net_dev,
"could not find a suitable DMA mask\n");
goto fail2;
}
netif_dbg(efx, probe, efx->net_dev,
"using DMA mask %llx\n", (unsigned long long) dma_mask);
rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to set consistent DMA mask\n");
goto fail2;
}
efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
if (rc) {
netif_err(efx, probe, efx->net_dev,
"request for memory BAR failed\n");
rc = -EIO;
goto fail3;
}
efx->membase = ioremap_nocache(efx->membase_phys,
efx->type->mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n",
(unsigned long long)efx->membase_phys,
efx->type->mem_map_size);
rc = -ENOMEM;
goto fail4;
}
netif_dbg(efx, probe, efx->net_dev,
"memory BAR at %llx+%x (virtual %p)\n",
(unsigned long long)efx->membase_phys,
efx->type->mem_map_size, efx->membase);
return 0;
fail4:
pci_release_region(efx->pci_dev, EFX_MEM_BAR);
fail3:
efx->membase_phys = 0;
fail2:
pci_disable_device(efx->pci_dev);
fail1:
return rc;
}
static void efx_fini_io(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
if (efx->membase) {
iounmap(efx->membase);
efx->membase = NULL;
}
if (efx->membase_phys) {
pci_release_region(efx->pci_dev, EFX_MEM_BAR);
efx->membase_phys = 0;
}
pci_disable_device(efx->pci_dev);
}
static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
{
cpumask_var_t thread_mask;
unsigned int count;
int cpu;
if (rss_cpus) {
count = rss_cpus;
} else {
if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
netif_warn(efx, probe, efx->net_dev,
"RSS disabled due to allocation failure\n");
return 1;
}
count = 0;
for_each_online_cpu(cpu) {
if (!cpumask_test_cpu(cpu, thread_mask)) {
++count;
cpumask_or(thread_mask, thread_mask,
topology_thread_cpumask(cpu));
}
}
free_cpumask_var(thread_mask);
}
if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
count > efx_vf_size(efx)) {
netif_warn(efx, probe, efx->net_dev,
"Reducing number of RSS channels from %u to %u for "
"VF support. Increase vf-msix-limit to use more "
"channels on the PF.\n",
count, efx_vf_size(efx));
count = efx_vf_size(efx);
}
return count;
}
static int
efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
{
#ifdef CONFIG_RFS_ACCEL
unsigned int i;
int rc;
efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
if (!efx->net_dev->rx_cpu_rmap)
return -ENOMEM;
for (i = 0; i < efx->n_rx_channels; i++) {
rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
xentries[i].vector);
if (rc) {
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL;
return rc;
}
}
#endif
return 0;
}
static int efx_probe_interrupts(struct efx_nic *efx)
{
unsigned int max_channels =
min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
unsigned int extra_channels = 0;
unsigned int i, j;
int rc;
for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
if (efx->extra_channel_type[i])
++extra_channels;
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
struct msix_entry xentries[EFX_MAX_CHANNELS];
unsigned int n_channels;
n_channels = efx_wanted_parallelism(efx);
if (separate_tx_channels)
n_channels *= 2;
n_channels += extra_channels;
n_channels = min(n_channels, max_channels);
for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
if (rc > 0) {
netif_err(efx, drv, efx->net_dev,
"WARNING: Insufficient MSI-X vectors"
" available (%d < %u).\n", rc, n_channels);
netif_err(efx, drv, efx->net_dev,
"WARNING: Performance may be reduced.\n");
EFX_BUG_ON_PARANOID(rc >= n_channels);
n_channels = rc;
rc = pci_enable_msix(efx->pci_dev, xentries,
n_channels);
}
if (rc == 0) {
efx->n_channels = n_channels;
if (n_channels > extra_channels)
n_channels -= extra_channels;
if (separate_tx_channels) {
efx->n_tx_channels = max(n_channels / 2, 1U);
efx->n_rx_channels = max(n_channels -
efx->n_tx_channels,
1U);
} else {
efx->n_tx_channels = n_channels;
efx->n_rx_channels = n_channels;
}
rc = efx_init_rx_cpu_rmap(efx, xentries);
if (rc) {
pci_disable_msix(efx->pci_dev);
return rc;
}
for (i = 0; i < efx->n_channels; i++)
efx_get_channel(efx, i)->irq =
xentries[i].vector;
} else {
efx->interrupt_mode = EFX_INT_MODE_MSI;
netif_err(efx, drv, efx->net_dev,
"could not enable MSI-X\n");
}
}
if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
efx->n_channels = 1;
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
} else {
netif_err(efx, drv, efx->net_dev,
"could not enable MSI\n");
efx->interrupt_mode = EFX_INT_MODE_LEGACY;
}
}
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
efx->legacy_irq = efx->pci_dev->irq;
}
j = efx->n_channels;
for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
if (!efx->extra_channel_type[i])
continue;
if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
efx->n_channels <= extra_channels) {
efx->extra_channel_type[i]->handle_no_channel(efx);
} else {
--j;
efx_get_channel(efx, j)->type =
efx->extra_channel_type[i];
}
}
efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
efx->n_rx_channels : efx_vf_size(efx));
return 0;
}
static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
{
struct efx_channel *channel;
if (efx->legacy_irq)
efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
efx_for_each_channel(channel, efx) {
if (!channel->type->keep_eventq || !may_keep_eventq)
efx_init_eventq(channel);
efx_start_eventq(channel);
}
efx_mcdi_mode_event(efx);
}
static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
{
struct efx_channel *channel;
efx_mcdi_mode_poll(efx);
efx_nic_disable_interrupts(efx);
if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
efx->legacy_irq_enabled = false;
}
efx_for_each_channel(channel, efx) {
if (channel->irq)
synchronize_irq(channel->irq);
efx_stop_eventq(channel);
if (!channel->type->keep_eventq || !may_keep_eventq)
efx_fini_eventq(channel);
}
}
static void efx_remove_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_for_each_channel(channel, efx)
channel->irq = 0;
pci_disable_msi(efx->pci_dev);
pci_disable_msix(efx->pci_dev);
efx->legacy_irq = 0;
}
static void efx_set_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
efx->tx_channel_offset =
separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel)
tx_queue->queue -= (efx->tx_channel_offset *
EFX_TXQ_TYPES);
}
}
static int efx_probe_nic(struct efx_nic *efx)
{
size_t i;
int rc;
netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
rc = efx->type->probe(efx);
if (rc)
return rc;
rc = efx_probe_interrupts(efx);
if (rc)
goto fail;
efx->type->dimension_resources(efx);
if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
efx->rx_indir_table[i] =
ethtool_rxfh_indir_default(i, efx->rss_spread);
efx_set_channels(efx);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
true);
return 0;
fail:
efx->type->remove(efx);
return rc;
}
static void efx_remove_nic(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
efx_remove_interrupts(efx);
efx->type->remove(efx);
}
static int efx_probe_all(struct efx_nic *efx)
{
int rc;
rc = efx_probe_nic(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
goto fail1;
}
rc = efx_probe_port(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to create port\n");
goto fail2;
}
efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
rc = efx_probe_filters(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to create filter tables\n");
goto fail3;
}
rc = efx_probe_channels(efx);
if (rc)
goto fail4;
return 0;
fail4:
efx_remove_filters(efx);
fail3:
efx_remove_port(efx);
fail2:
efx_remove_nic(efx);
fail1:
return rc;
}
static void efx_start_all(struct efx_nic *efx)
{
EFX_ASSERT_RESET_SERIALISED(efx);
if (efx->port_enabled)
return;
if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
return;
if (!netif_running(efx->net_dev))
return;
efx_start_port(efx);
efx_start_datapath(efx);
if (efx->type->monitor != NULL) {
queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx_monitor_interval);
} else {
mutex_lock(&efx->mac_lock);
if (efx->phy_op->poll(efx))
efx_link_status_changed(efx);
mutex_unlock(&efx->mac_lock);
}
efx->type->start_stats(efx);
}
static void efx_flush_all(struct efx_nic *efx)
{
cancel_delayed_work_sync(&efx->monitor_work);
efx_selftest_async_cancel(efx);
cancel_work_sync(&efx->mac_work);
}
static void efx_stop_all(struct efx_nic *efx)
{
EFX_ASSERT_RESET_SERIALISED(efx);
if (!efx->port_enabled)
return;
efx->type->stop_stats(efx);
efx_stop_port(efx);
efx_flush_all(efx);
netif_tx_disable(efx->net_dev);
efx_stop_datapath(efx);
}
static void efx_remove_all(struct efx_nic *efx)
{
efx_remove_channels(efx);
efx_remove_filters(efx);
efx_remove_port(efx);
efx_remove_nic(efx);
}
static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
{
if (usecs == 0)
return 0;
if (usecs * 1000 < quantum_ns)
return 1;
return usecs * 1000 / quantum_ns;
}
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx)
{
struct efx_channel *channel;
unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
efx->timer_quantum_ns,
1000);
unsigned int tx_ticks;
unsigned int rx_ticks;
EFX_ASSERT_RESET_SERIALISED(efx);
if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
return -EINVAL;
tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);
if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
!rx_may_override_tx) {
netif_err(efx, drv, efx->net_dev, "Channels are shared. "
"RX and TX IRQ moderation must be equal\n");
return -EINVAL;
}
efx->irq_rx_adaptive = rx_adaptive;
efx->irq_rx_moderation = rx_ticks;
efx_for_each_channel(channel, efx) {
if (efx_channel_has_rx_queue(channel))
channel->irq_moderation = rx_ticks;
else if (efx_channel_has_tx_queues(channel))
channel->irq_moderation = tx_ticks;
}
return 0;
}
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
unsigned int *rx_usecs, bool *rx_adaptive)
{
*rx_adaptive = efx->irq_rx_adaptive;
*rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
efx->timer_quantum_ns,
1000);
if (efx->tx_channel_offset == 0)
*tx_usecs = *rx_usecs;
else
*tx_usecs = DIV_ROUND_UP(
efx->channel[efx->tx_channel_offset]->irq_moderation *
efx->timer_quantum_ns,
1000);
}
static void efx_monitor(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic,
monitor_work.work);
netif_vdbg(efx, timer, efx->net_dev,
"hardware monitor executing on CPU %d\n",
raw_smp_processor_id());
BUG_ON(efx->type->monitor == NULL);
if (mutex_trylock(&efx->mac_lock)) {
if (efx->port_enabled)
efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock);
}
queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx_monitor_interval);
}
static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct mii_ioctl_data *data = if_mii(ifr);
EFX_ASSERT_RESET_SERIALISED(efx);
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
(data->phy_id & 0xfc00) == 0x0400)
data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
return mdio_mii_ioctl(&efx->mdio, data, cmd);
}
static void efx_init_napi_channel(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
channel->napi_dev = efx->net_dev;
netif_napi_add(channel->napi_dev, &channel->napi_str,
efx_poll, napi_weight);
}
static void efx_init_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_for_each_channel(channel, efx)
efx_init_napi_channel(channel);
}
static void efx_fini_napi_channel(struct efx_channel *channel)
{
if (channel->napi_dev)
netif_napi_del(&channel->napi_str);
channel->napi_dev = NULL;
}
static void efx_fini_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_for_each_channel(channel, efx)
efx_fini_napi_channel(channel);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void efx_netpoll(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
efx_for_each_channel(channel, efx)
efx_schedule_channel(channel);
}
#endif
static int efx_net_open(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
EFX_ASSERT_RESET_SERIALISED(efx);
netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
raw_smp_processor_id());
if (efx->state == STATE_DISABLED)
return -EIO;
if (efx->phy_mode & PHY_MODE_SPECIAL)
return -EBUSY;
if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
return -EIO;
efx_link_status_changed(efx);
efx_start_all(efx);
efx_selftest_async_start(efx);
return 0;
}
static int efx_net_stop(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id());
if (efx->state != STATE_DISABLED) {
efx_stop_all(efx);
}
return 0;
}
static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_mac_stats *mac_stats = &efx->mac_stats;
spin_lock_bh(&efx->stats_lock);
efx->type->update_stats(efx);
stats->rx_packets = mac_stats->rx_packets;
stats->tx_packets = mac_stats->tx_packets;
stats->rx_bytes = mac_stats->rx_bytes;
stats->tx_bytes = mac_stats->tx_bytes;
stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
stats->multicast = mac_stats->rx_multicast;
stats->collisions = mac_stats->tx_collision;
stats->rx_length_errors = (mac_stats->rx_gtjumbo +
mac_stats->rx_length_error);
stats->rx_crc_errors = mac_stats->rx_bad;
stats->rx_frame_errors = mac_stats->rx_align_error;
stats->rx_fifo_errors = mac_stats->rx_overflow;
stats->rx_missed_errors = mac_stats->rx_missed;
stats->tx_window_errors = mac_stats->tx_late_collision;
stats->rx_errors = (stats->rx_length_errors +
stats->rx_crc_errors +
stats->rx_frame_errors +
mac_stats->rx_symbol_error);
stats->tx_errors = (stats->tx_window_errors +
mac_stats->tx_bad);
spin_unlock_bh(&efx->stats_lock);
return stats;
}
static void efx_watchdog(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
netif_err(efx, tx_err, efx->net_dev,
"TX stuck with port_enabled=%d: resetting channels\n",
efx->port_enabled);
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{
struct efx_nic *efx = netdev_priv(net_dev);
EFX_ASSERT_RESET_SERIALISED(efx);
if (new_mtu > EFX_MAX_MTU)
return -EINVAL;
efx_stop_all(efx);
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu;
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
return 0;
}
static int efx_set_mac_address(struct net_device *net_dev, void *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct sockaddr *addr = data;
char *new_addr = addr->sa_data;
EFX_ASSERT_RESET_SERIALISED(efx);
if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev,
"invalid ethernet MAC address requested: %pM\n",
new_addr);
return -EADDRNOTAVAIL;
}
memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
efx_sriov_mac_address_changed(efx);
mutex_lock(&efx->mac_lock);
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
return 0;
}
static void efx_set_rx_mode(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct netdev_hw_addr *ha;
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
u32 crc;
int bit;
efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
netdev_for_each_mc_addr(ha, net_dev) {
crc = ether_crc_le(ETH_ALEN, ha->addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
set_bit_le(bit, mc_hash->byte);
}
set_bit_le(0xff, mc_hash->byte);
}
if (efx->port_enabled)
queue_work(efx->workqueue, &efx->mac_work);
}
static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
{
struct efx_nic *efx = netdev_priv(net_dev);
if (net_dev->features & ~data & NETIF_F_NTUPLE)
efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
return 0;
}
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
.ndo_get_stats64 = efx_net_stats,
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
#ifdef CONFIG_SFC_SRIOV
.ndo_set_vf_mac = efx_sriov_set_vf_mac,
.ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
.ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
.ndo_get_vf_config = efx_sriov_get_vf_config,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
#endif
.ndo_setup_tc = efx_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
};
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
efx_mtd_rename(efx);
efx_set_channel_names(efx);
}
static int efx_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *net_dev = ptr;
if (net_dev->netdev_ops == &efx_netdev_ops &&
event == NETDEV_CHANGENAME)
efx_update_name(netdev_priv(net_dev));
return NOTIFY_DONE;
}
static struct notifier_block efx_netdev_notifier = {
.notifier_call = efx_netdev_event,
};
static ssize_t
show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
return sprintf(buf, "%d\n", efx->phy_type);
}
static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
struct efx_channel *channel;
int rc;
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &efx_netdev_ops;
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
rtnl_lock();
rc = dev_alloc_name(net_dev, net_dev->name);
if (rc < 0)
goto fail_locked;
efx_update_name(efx);
rc = register_netdevice(net_dev);
if (rc)
goto fail_locked;
efx_for_each_channel(channel, efx) {
struct efx_tx_queue *tx_queue;
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_init_tx_queue_core_txq(tx_queue);
}
netif_carrier_off(net_dev);
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to init net dev attributes\n");
goto fail_registered;
}
return 0;
fail_locked:
rtnl_unlock();
netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
return rc;
fail_registered:
unregister_netdev(net_dev);
return rc;
}
static void efx_unregister_netdev(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
if (!efx->net_dev)
return;
BUG_ON(netdev_priv(efx->net_dev) != efx);
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_release_tx_buffers(tx_queue);
}
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
}
void efx_reset_down(struct efx_nic *efx, enum reset_type method)
{
EFX_ASSERT_RESET_SERIALISED(efx);
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
efx_stop_interrupts(efx, false);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
efx->phy_op->fini(efx);
efx->type->fini(efx);
}
int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
{
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
rc = efx->type->init(efx);
if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
goto fail;
}
if (!ok)
goto fail;
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
rc = efx->phy_op->init(efx);
if (rc)
goto fail;
if (efx->phy_op->reconfigure(efx))
netif_err(efx, drv, efx->net_dev,
"could not restore PHY settings\n");
}
efx->type->reconfigure_mac(efx);
efx_start_interrupts(efx, false);
efx_restore_filters(efx);
efx_sriov_reset(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
return 0;
fail:
efx->port_initialized = false;
mutex_unlock(&efx->mac_lock);
return rc;
}
int efx_reset(struct efx_nic *efx, enum reset_type method)
{
int rc, rc2;
bool disabled;
netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
RESET_TYPE(method));
netif_device_detach(efx->net_dev);
efx_reset_down(efx, method);
rc = efx->type->reset(efx, method);
if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
goto out;
}
efx->reset_pending &= -(1 << (method + 1));
pci_set_master(efx->pci_dev);
out:
disabled = rc || method == RESET_TYPE_DISABLE;
rc2 = efx_reset_up(efx, method, !disabled);
if (rc2) {
disabled = true;
if (!rc)
rc = rc2;
}
if (disabled) {
dev_close(efx->net_dev);
netif_err(efx, drv, efx->net_dev, "has been disabled\n");
efx->state = STATE_DISABLED;
} else {
netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
netif_device_attach(efx->net_dev);
}
return rc;
}
static void efx_reset_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
unsigned long pending = ACCESS_ONCE(efx->reset_pending);
if (!pending)
return;
if (efx->state != STATE_RUNNING) {
netif_info(efx, drv, efx->net_dev,
"scheduled reset quenched. NIC not RUNNING\n");
return;
}
rtnl_lock();
(void)efx_reset(efx, fls(pending) - 1);
rtnl_unlock();
}
void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
{
enum reset_type method;
switch (type) {
case RESET_TYPE_INVISIBLE:
case RESET_TYPE_ALL:
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method));
break;
default:
method = efx->type->map_reset_reason(type);
netif_dbg(efx, drv, efx->net_dev,
"scheduling %s reset for %s\n",
RESET_TYPE(method), RESET_TYPE(type));
break;
}
set_bit(method, &efx->reset_pending);
efx_mcdi_mode_poll(efx);
queue_work(reset_workqueue, &efx->reset_work);
}
static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
.driver_data = (unsigned long) &falcon_a1_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
.driver_data = (unsigned long) &falcon_b0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803),
.driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813),
.driver_data = (unsigned long) &siena_a0_nic_type},
{0}
};
int efx_port_dummy_op_int(struct efx_nic *efx)
{
return 0;
}
void efx_port_dummy_op_void(struct efx_nic *efx) {}
static bool efx_port_dummy_op_poll(struct efx_nic *efx)
{
return false;
}
static const struct efx_phy_operations efx_dummy_phy_operations = {
.init = efx_port_dummy_op_int,
.reconfigure = efx_port_dummy_op_int,
.poll = efx_port_dummy_op_poll,
.fini = efx_port_dummy_op_void,
};
static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
struct pci_dev *pci_dev, struct net_device *net_dev)
{
int i;
memset(efx, 0, sizeof(*efx));
spin_lock_init(&efx->biu_lock);
#ifdef CONFIG_SFC_MTD
INIT_LIST_HEAD(&efx->mtd_list);
#endif
INIT_WORK(&efx->reset_work, efx_reset_work);
INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_INIT;
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq);
for (i = 0; i < EFX_MAX_CHANNELS; i++) {
efx->channel[i] = efx_alloc_channel(efx, i, NULL);
if (!efx->channel[i])
goto fail;
}
efx->type = type;
EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
efx->interrupt_mode = max(efx->type->max_interrupt_mode,
interrupt_mode);
snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
pci_name(pci_dev));
efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
if (!efx->workqueue)
goto fail;
return 0;
fail:
efx_fini_struct(efx);
return -ENOMEM;
}
static void efx_fini_struct(struct efx_nic *efx)
{
int i;
for (i = 0; i < EFX_MAX_CHANNELS; i++)
kfree(efx->channel[i]);
if (efx->workqueue) {
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
}
}
static void efx_pci_remove_main(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL;
#endif
efx_stop_interrupts(efx, false);
efx_nic_fini_interrupt(efx);
efx_fini_port(efx);
efx->type->fini(efx);
efx_fini_napi(efx);
efx_remove_all(efx);
}
static void efx_pci_remove(struct pci_dev *pci_dev)
{
struct efx_nic *efx;
efx = pci_get_drvdata(pci_dev);
if (!efx)
return;
rtnl_lock();
efx->state = STATE_FINI;
dev_close(efx->net_dev);
rtnl_unlock();
efx_stop_interrupts(efx, false);
efx_sriov_fini(efx);
efx_unregister_netdev(efx);
efx_mtd_remove(efx);
cancel_work_sync(&efx->reset_work);
efx_pci_remove_main(efx);
efx_fini_io(efx);
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
pci_set_drvdata(pci_dev, NULL);
efx_fini_struct(efx);
free_netdev(efx->net_dev);
};
#define SFC_VPD_LEN 512
static void efx_print_product_vpd(struct efx_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
char vpd_data[SFC_VPD_LEN];
ssize_t vpd_size;
int i, j;
vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
if (vpd_size <= 0) {
netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
return;
}
i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
if (i < 0) {
netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
return;
}
j = pci_vpd_lrdt_size(&vpd_data[i]);
i += PCI_VPD_LRDT_TAG_SIZE;
if (i + j > vpd_size)
j = vpd_size - i;
i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
if (i < 0) {
netif_err(efx, drv, efx->net_dev, "Part number not found\n");
return;
}
j = pci_vpd_info_field_size(&vpd_data[i]);
i += PCI_VPD_INFO_FLD_HDR_SIZE;
if (i + j > vpd_size) {
netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
return;
}
netif_info(efx, drv, efx->net_dev,
"Part Number : %.*s\n", j, &vpd_data[i]);
}
static int efx_pci_probe_main(struct efx_nic *efx)
{
int rc;
rc = efx_probe_all(efx);
if (rc)
goto fail1;
efx_init_napi(efx);
rc = efx->type->init(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to initialise NIC\n");
goto fail3;
}
rc = efx_init_port(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to initialise port\n");
goto fail4;
}
rc = efx_nic_init_interrupt(efx);
if (rc)
goto fail5;
efx_start_interrupts(efx, false);
return 0;
fail5:
efx_fini_port(efx);
fail4:
efx->type->fini(efx);
fail3:
efx_fini_napi(efx);
efx_remove_all(efx);
fail1:
return rc;
}
static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
struct net_device *net_dev;
struct efx_nic *efx;
int rc;
net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
EFX_MAX_RX_QUEUES);
if (!net_dev)
return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO |
NETIF_F_RXCSUM);
if (type->offload_features & NETIF_F_V6_CSUM)
net_dev->features |= NETIF_F_TSO6;
net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
NETIF_F_RXCSUM);
net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
efx = netdev_priv(net_dev);
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
rc = efx_init_struct(efx, type, pci_dev, net_dev);
if (rc)
goto fail1;
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
efx_print_product_vpd(efx);
rc = efx_init_io(efx);
if (rc)
goto fail2;
rc = efx_pci_probe_main(efx);
cancel_work_sync(&efx->reset_work);
if (rc)
goto fail3;
if (efx->reset_pending) {
rc = -EIO;
goto fail4;
}
efx->state = STATE_RUNNING;
rc = efx_register_netdev(efx);
if (rc)
goto fail4;
rc = efx_sriov_init(efx);
if (rc)
netif_err(efx, probe, efx->net_dev,
"SR-IOV can't be enabled rc %d\n", rc);
netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
rtnl_lock();
rc = efx_mtd_probe(efx);
rtnl_unlock();
if (rc)
netif_warn(efx, probe, efx->net_dev,
"failed to create MTDs (%d)\n", rc);
return 0;
fail4:
efx_pci_remove_main(efx);
fail3:
efx_fini_io(efx);
fail2:
efx_fini_struct(efx);
fail1:
WARN_ON(rc > 0);
netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);
return rc;
}
static int efx_pm_freeze(struct device *dev)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
efx->state = STATE_FINI;
netif_device_detach(efx->net_dev);
efx_stop_all(efx);
efx_stop_interrupts(efx, false);
return 0;
}
static int efx_pm_thaw(struct device *dev)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
efx->state = STATE_INIT;
efx_start_interrupts(efx, false);
mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
netif_device_attach(efx->net_dev);
efx->state = STATE_RUNNING;
efx->type->resume_wol(efx);
queue_work(reset_workqueue, &efx->reset_work);
return 0;
}
static int efx_pm_poweroff(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct efx_nic *efx = pci_get_drvdata(pci_dev);
efx->type->fini(efx);
efx->reset_pending = 0;
pci_save_state(pci_dev);
return pci_set_power_state(pci_dev, PCI_D3hot);
}
static int efx_pm_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct efx_nic *efx = pci_get_drvdata(pci_dev);
int rc;
rc = pci_set_power_state(pci_dev, PCI_D0);
if (rc)
return rc;
pci_restore_state(pci_dev);
rc = pci_enable_device(pci_dev);
if (rc)
return rc;
pci_set_master(efx->pci_dev);
rc = efx->type->reset(efx, RESET_TYPE_ALL);
if (rc)
return rc;
rc = efx->type->init(efx);
if (rc)
return rc;
efx_pm_thaw(dev);
return 0;
}
static int efx_pm_suspend(struct device *dev)
{
int rc;
efx_pm_freeze(dev);
rc = efx_pm_poweroff(dev);
if (rc)
efx_pm_resume(dev);
return rc;
}
static const struct dev_pm_ops efx_pm_ops = {
.suspend = efx_pm_suspend,
.resume = efx_pm_resume,
.freeze = efx_pm_freeze,
.thaw = efx_pm_thaw,
.poweroff = efx_pm_poweroff,
.restore = efx_pm_resume,
};
static struct pci_driver efx_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = efx_pci_table,
.probe = efx_pci_probe,
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
};
module_param(interrupt_mode, uint, 0444);
MODULE_PARM_DESC(interrupt_mode,
"Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
static int __init efx_init_module(void)
{
int rc;
printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
rc = register_netdevice_notifier(&efx_netdev_notifier);
if (rc)
goto err_notifier;
rc = efx_init_sriov();
if (rc)
goto err_sriov;
reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!reset_workqueue) {
rc = -ENOMEM;
goto err_reset;
}
rc = pci_register_driver(&efx_pci_driver);
if (rc < 0)
goto err_pci;
return 0;
err_pci:
destroy_workqueue(reset_workqueue);
err_reset:
efx_fini_sriov();
err_sriov:
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
return rc;
}
static void __exit efx_exit_module(void)
{
printk(KERN_INFO "Solarflare NET driver unloading\n");
pci_unregister_driver(&efx_pci_driver);
destroy_workqueue(reset_workqueue);
efx_fini_sriov();
unregister_netdevice_notifier(&efx_netdev_notifier);
}
module_init(efx_init_module);
module_exit(efx_exit_module);
MODULE_AUTHOR("Solarflare Communications and "
"Michael Brown <mbrown@fensystems.co.uk>");
MODULE_DESCRIPTION("Solarflare Communications network driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efx_pci_table);
| gpl-2.0 |
mikewadsten/asuswrt | release/src/router/dropbear/libtomcrypt/src/misc/crypt/crypt_find_hash_oid.c | 34 | 1030 | /* LibTomCrypt, modular cryptographic library -- Tom St Denis
*
* LibTomCrypt is a library that provides various cryptographic
* algorithms in a highly modular and flexible manner.
*
* The library is free for all purposes without any express
* guarantee it works.
*
* Tom St Denis, tomstdenis@gmail.com, http://libtomcrypt.com
*/
#include "tomcrypt.h"
/**
@file crypt_find_hash_oid.c
Find a hash, Tom St Denis
*/
int find_hash_oid(const unsigned long *ID, unsigned long IDlen)
{
int x;
LTC_ARGCHK(ID != NULL);
LTC_MUTEX_LOCK(<c_hash_mutex);
for (x = 0; x < TAB_SIZE; x++) {
if (hash_descriptor[x].name != NULL && hash_descriptor[x].OIDlen == IDlen && !XMEMCMP(hash_descriptor[x].OID, ID, sizeof(unsigned long) * IDlen)) {
LTC_MUTEX_UNLOCK(<c_hash_mutex);
return x;
}
}
LTC_MUTEX_UNLOCK(<c_hash_mutex);
return -1;
}
/* $Source: /cvs/libtom/libtomcrypt/src/misc/crypt/crypt_find_hash_oid.c,v $ */
/* $Revision: 1.4 $ */
/* $Date: 2006/11/01 09:28:17 $ */
| gpl-2.0 |
jfdsmabalot/kernel_sense_m8 | drivers/gpio/gpio-wm831x.c | 34 | 7375 | /*
* gpiolib support for Wolfson WM831x PMICs
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/mfd/core.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
#include <linux/mfd/wm831x/gpio.h>
#include <linux/mfd/wm831x/irq.h>
struct wm831x_gpio {
struct wm831x *wm831x;
struct gpio_chip gpio_chip;
};
static inline struct wm831x_gpio *to_wm831x_gpio(struct gpio_chip *chip)
{
return container_of(chip, struct wm831x_gpio, gpio_chip);
}
static int wm831x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
int val = WM831X_GPN_DIR;
if (wm831x->has_gpio_ena)
val |= WM831X_GPN_TRI;
return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + offset,
WM831X_GPN_DIR | WM831X_GPN_TRI |
WM831X_GPN_FN_MASK, val);
}
static int wm831x_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
int ret;
ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL);
if (ret < 0)
return ret;
if (ret & 1 << offset)
return 1;
else
return 0;
}
static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset,
value << offset);
}
static int wm831x_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
int val = 0;
int ret;
if (wm831x->has_gpio_ena)
val |= WM831X_GPN_TRI;
ret = wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + offset,
WM831X_GPN_DIR | WM831X_GPN_TRI |
WM831X_GPN_FN_MASK, val);
if (ret < 0)
return ret;
wm831x_gpio_set(chip, offset, value);
return 0;
}
static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
if (!wm831x->irq_base)
return -EINVAL;
return wm831x->irq_base + WM831X_IRQ_GPIO_1 + offset;
}
static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
unsigned debounce)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
int reg = WM831X_GPIO1_CONTROL + offset;
int ret, fn;
ret = wm831x_reg_read(wm831x, reg);
if (ret < 0)
return ret;
switch (ret & WM831X_GPN_FN_MASK) {
case 0:
case 1:
break;
default:
return -EBUSY;
}
if (debounce >= 32 && debounce <= 64)
fn = 0;
else if (debounce >= 4000 && debounce <= 8000)
fn = 1;
else
return -EINVAL;
return wm831x_set_bits(wm831x, reg, WM831X_GPN_FN_MASK, fn);
}
#ifdef CONFIG_DEBUG_FS
static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
int i, tristated;
for (i = 0; i < chip->ngpio; i++) {
int gpio = i + chip->base;
int reg;
const char *label, *pull, *powerdomain;
label = gpiochip_is_requested(chip, i);
if (!label)
label = "Unrequested";
seq_printf(s, " gpio-%-3d (%-20.20s) ", gpio, label);
reg = wm831x_reg_read(wm831x, WM831X_GPIO1_CONTROL + i);
if (reg < 0) {
dev_err(wm831x->dev,
"GPIO control %d read failed: %d\n",
gpio, reg);
seq_printf(s, "\n");
continue;
}
switch (reg & WM831X_GPN_PULL_MASK) {
case WM831X_GPIO_PULL_NONE:
pull = "nopull";
break;
case WM831X_GPIO_PULL_DOWN:
pull = "pulldown";
break;
case WM831X_GPIO_PULL_UP:
pull = "pullup";
break;
default:
pull = "INVALID PULL";
break;
}
switch (i + 1) {
case 1 ... 3:
case 7 ... 9:
if (reg & WM831X_GPN_PWR_DOM)
powerdomain = "VPMIC";
else
powerdomain = "DBVDD";
break;
case 4 ... 6:
case 10 ... 12:
if (reg & WM831X_GPN_PWR_DOM)
powerdomain = "SYSVDD";
else
powerdomain = "DBVDD";
break;
case 13 ... 16:
powerdomain = "TPVDD";
break;
default:
BUG();
break;
}
tristated = reg & WM831X_GPN_TRI;
if (wm831x->has_gpio_ena)
tristated = !tristated;
seq_printf(s, " %s %s %s %s%s\n"
" %s%s (0x%4x)\n",
reg & WM831X_GPN_DIR ? "in" : "out",
wm831x_gpio_get(chip, i) ? "high" : "low",
pull,
powerdomain,
reg & WM831X_GPN_POL ? "" : " inverted",
reg & WM831X_GPN_OD ? "open-drain" : "CMOS",
tristated ? " tristated" : "",
reg);
}
}
#else
#define wm831x_gpio_dbg_show NULL
#endif
static struct gpio_chip template_chip = {
.label = "wm831x",
.owner = THIS_MODULE,
.direction_input = wm831x_gpio_direction_in,
.get = wm831x_gpio_get,
.direction_output = wm831x_gpio_direction_out,
.set = wm831x_gpio_set,
.to_irq = wm831x_gpio_to_irq,
.set_debounce = wm831x_gpio_set_debounce,
.dbg_show = wm831x_gpio_dbg_show,
.can_sleep = 1,
};
static int __devinit wm831x_gpio_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
struct wm831x_gpio *wm831x_gpio;
int ret;
wm831x_gpio = kzalloc(sizeof(*wm831x_gpio), GFP_KERNEL);
if (wm831x_gpio == NULL)
return -ENOMEM;
wm831x_gpio->wm831x = wm831x;
wm831x_gpio->gpio_chip = template_chip;
wm831x_gpio->gpio_chip.ngpio = wm831x->num_gpio;
wm831x_gpio->gpio_chip.dev = &pdev->dev;
if (pdata && pdata->gpio_base)
wm831x_gpio->gpio_chip.base = pdata->gpio_base;
else
wm831x_gpio->gpio_chip.base = -1;
ret = gpiochip_add(&wm831x_gpio->gpio_chip);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
ret);
goto err;
}
platform_set_drvdata(pdev, wm831x_gpio);
return ret;
err:
kfree(wm831x_gpio);
return ret;
}
static int __devexit wm831x_gpio_remove(struct platform_device *pdev)
{
struct wm831x_gpio *wm831x_gpio = platform_get_drvdata(pdev);
int ret;
ret = gpiochip_remove(&wm831x_gpio->gpio_chip);
if (ret == 0)
kfree(wm831x_gpio);
return ret;
}
static struct platform_driver wm831x_gpio_driver = {
.driver.name = "wm831x-gpio",
.driver.owner = THIS_MODULE,
.probe = wm831x_gpio_probe,
.remove = __devexit_p(wm831x_gpio_remove),
};
static int __init wm831x_gpio_init(void)
{
return platform_driver_register(&wm831x_gpio_driver);
}
subsys_initcall(wm831x_gpio_init);
static void __exit wm831x_gpio_exit(void)
{
platform_driver_unregister(&wm831x_gpio_driver);
}
module_exit(wm831x_gpio_exit);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("GPIO interface for WM831x PMICs");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm831x-gpio");
| gpl-2.0 |
GodsOffSpring/AOSP-m8 | net/netfilter/ipvs/ip_vs_conn.c | 34 | 27997 | /*
* IPVS An implementation of the IP virtual server support for the
* LINUX operating system. IPVS is now implemented as a module
* over the Netfilter framework. IPVS can be used to build a
* high-performance and highly available server based on a
* cluster of servers.
*
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
* Peter Kese <peter.kese@ijs.si>
* Julian Anastasov <ja@ssi.bg>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
* with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
* and others. Many code here is taken from IP MASQ code of kernel 2.2.
*
* Changes:
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/net.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <net/net_namespace.h>
#include <net/ip_vs.h>
#ifndef CONFIG_IP_VS_TAB_BITS
#define CONFIG_IP_VS_TAB_BITS 12
#endif
static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
int ip_vs_conn_tab_size __read_mostly;
static int ip_vs_conn_tab_mask __read_mostly;
static struct hlist_head *ip_vs_conn_tab __read_mostly;
static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0);
static unsigned int ip_vs_conn_rnd __read_mostly;
#define CT_LOCKARRAY_BITS 5
#define CT_LOCKARRAY_SIZE (1<<CT_LOCKARRAY_BITS)
#define CT_LOCKARRAY_MASK (CT_LOCKARRAY_SIZE-1)
struct ip_vs_aligned_lock
{
rwlock_t l;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
static struct ip_vs_aligned_lock
__ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned;
static inline void ct_read_lock(unsigned key)
{
read_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_read_unlock(unsigned key)
{
read_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_write_lock(unsigned key)
{
write_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_write_unlock(unsigned key)
{
write_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_read_lock_bh(unsigned key)
{
read_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_read_unlock_bh(unsigned key)
{
read_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_write_lock_bh(unsigned key)
{
write_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_write_unlock_bh(unsigned key)
{
write_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned proto,
const union nf_inet_addr *addr,
__be16 port)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
(__force u32)port, proto, ip_vs_conn_rnd) ^
((size_t)net>>8)) & ip_vs_conn_tab_mask;
#endif
return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
ip_vs_conn_rnd) ^
((size_t)net>>8)) & ip_vs_conn_tab_mask;
}
static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
bool inverse)
{
const union nf_inet_addr *addr;
__be16 port;
if (p->pe_data && p->pe->hashkey_raw)
return p->pe->hashkey_raw(p, ip_vs_conn_rnd, inverse) &
ip_vs_conn_tab_mask;
if (likely(!inverse)) {
addr = p->caddr;
port = p->cport;
} else {
addr = p->vaddr;
port = p->vport;
}
return ip_vs_conn_hashkey(p->net, p->af, p->protocol, addr, port);
}
static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
{
struct ip_vs_conn_param p;
ip_vs_conn_fill_param(ip_vs_conn_net(cp), cp->af, cp->protocol,
&cp->caddr, cp->cport, NULL, 0, &p);
if (cp->pe) {
p.pe = cp->pe;
p.pe_data = cp->pe_data;
p.pe_data_len = cp->pe_data_len;
}
return ip_vs_conn_hashkey_param(&p, false);
}
static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
{
unsigned hash;
int ret;
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
return 0;
hash = ip_vs_conn_hashkey_conn(cp);
ct_write_lock(hash);
spin_lock(&cp->lock);
if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
hlist_add_head(&cp->c_list, &ip_vs_conn_tab[hash]);
cp->flags |= IP_VS_CONN_F_HASHED;
atomic_inc(&cp->refcnt);
ret = 1;
} else {
pr_err("%s(): request for already hashed, called from %pF\n",
__func__, __builtin_return_address(0));
ret = 0;
}
spin_unlock(&cp->lock);
ct_write_unlock(hash);
return ret;
}
static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
{
unsigned hash;
int ret;
hash = ip_vs_conn_hashkey_conn(cp);
ct_write_lock(hash);
spin_lock(&cp->lock);
if (cp->flags & IP_VS_CONN_F_HASHED) {
hlist_del(&cp->c_list);
cp->flags &= ~IP_VS_CONN_F_HASHED;
atomic_dec(&cp->refcnt);
ret = 1;
} else
ret = 0;
spin_unlock(&cp->lock);
ct_write_unlock(hash);
return ret;
}
static inline struct ip_vs_conn *
__ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
{
unsigned hash;
struct ip_vs_conn *cp;
struct hlist_node *n;
hash = ip_vs_conn_hashkey_param(p, false);
ct_read_lock(hash);
hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
if (cp->af == p->af &&
p->cport == cp->cport && p->vport == cp->vport &&
ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
p->protocol == cp->protocol &&
ip_vs_conn_net_eq(cp, p->net)) {
atomic_inc(&cp->refcnt);
ct_read_unlock(hash);
return cp;
}
}
ct_read_unlock(hash);
return NULL;
}
struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
{
struct ip_vs_conn *cp;
cp = __ip_vs_conn_in_get(p);
if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) {
struct ip_vs_conn_param cport_zero_p = *p;
cport_zero_p.cport = 0;
cp = __ip_vs_conn_in_get(&cport_zero_p);
}
IP_VS_DBG_BUF(9, "lookup/in %s %s:%d->%s:%d %s\n",
ip_vs_proto_name(p->protocol),
IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
cp ? "hit" : "not hit");
return cp;
}
static int
ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb,
const struct ip_vs_iphdr *iph,
unsigned int proto_off, int inverse,
struct ip_vs_conn_param *p)
{
__be16 _ports[2], *pptr;
struct net *net = skb_net(skb);
pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
if (pptr == NULL)
return 1;
if (likely(!inverse))
ip_vs_conn_fill_param(net, af, iph->protocol, &iph->saddr,
pptr[0], &iph->daddr, pptr[1], p);
else
ip_vs_conn_fill_param(net, af, iph->protocol, &iph->daddr,
pptr[1], &iph->saddr, pptr[0], p);
return 0;
}
struct ip_vs_conn *
ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
const struct ip_vs_iphdr *iph,
unsigned int proto_off, int inverse)
{
struct ip_vs_conn_param p;
if (ip_vs_conn_fill_param_proto(af, skb, iph, proto_off, inverse, &p))
return NULL;
return ip_vs_conn_in_get(&p);
}
EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto);
struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
{
unsigned hash;
struct ip_vs_conn *cp;
struct hlist_node *n;
hash = ip_vs_conn_hashkey_param(p, false);
ct_read_lock(hash);
hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
if (!ip_vs_conn_net_eq(cp, p->net))
continue;
if (p->pe_data && p->pe->ct_match) {
if (p->pe == cp->pe && p->pe->ct_match(p, cp))
goto out;
continue;
}
if (cp->af == p->af &&
ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
ip_vs_addr_equal(p->protocol == IPPROTO_IP ? AF_UNSPEC :
p->af, p->vaddr, &cp->vaddr) &&
p->cport == cp->cport && p->vport == cp->vport &&
cp->flags & IP_VS_CONN_F_TEMPLATE &&
p->protocol == cp->protocol)
goto out;
}
cp = NULL;
out:
if (cp)
atomic_inc(&cp->refcnt);
ct_read_unlock(hash);
IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n",
ip_vs_proto_name(p->protocol),
IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
cp ? "hit" : "not hit");
return cp;
}
struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
{
unsigned hash;
struct ip_vs_conn *cp, *ret=NULL;
struct hlist_node *n;
hash = ip_vs_conn_hashkey_param(p, true);
ct_read_lock(hash);
hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
if (cp->af == p->af &&
p->vport == cp->cport && p->cport == cp->dport &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
p->protocol == cp->protocol &&
ip_vs_conn_net_eq(cp, p->net)) {
atomic_inc(&cp->refcnt);
ret = cp;
break;
}
}
ct_read_unlock(hash);
IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n",
ip_vs_proto_name(p->protocol),
IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
ret ? "hit" : "not hit");
return ret;
}
struct ip_vs_conn *
ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
const struct ip_vs_iphdr *iph,
unsigned int proto_off, int inverse)
{
struct ip_vs_conn_param p;
if (ip_vs_conn_fill_param_proto(af, skb, iph, proto_off, inverse, &p))
return NULL;
return ip_vs_conn_out_get(&p);
}
EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto);
void ip_vs_conn_put(struct ip_vs_conn *cp)
{
unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ?
0 : cp->timeout;
mod_timer(&cp->timer, jiffies+t);
__ip_vs_conn_put(cp);
}
void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport)
{
if (ip_vs_conn_unhash(cp)) {
spin_lock(&cp->lock);
if (cp->flags & IP_VS_CONN_F_NO_CPORT) {
atomic_dec(&ip_vs_conn_no_cport_cnt);
cp->flags &= ~IP_VS_CONN_F_NO_CPORT;
cp->cport = cport;
}
spin_unlock(&cp->lock);
ip_vs_conn_hash(cp);
}
}
static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp)
{
switch (IP_VS_FWD_METHOD(cp)) {
case IP_VS_CONN_F_MASQ:
cp->packet_xmit = ip_vs_nat_xmit;
break;
case IP_VS_CONN_F_TUNNEL:
cp->packet_xmit = ip_vs_tunnel_xmit;
break;
case IP_VS_CONN_F_DROUTE:
cp->packet_xmit = ip_vs_dr_xmit;
break;
case IP_VS_CONN_F_LOCALNODE:
cp->packet_xmit = ip_vs_null_xmit;
break;
case IP_VS_CONN_F_BYPASS:
cp->packet_xmit = ip_vs_bypass_xmit;
break;
}
}
#ifdef CONFIG_IP_VS_IPV6
static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp)
{
switch (IP_VS_FWD_METHOD(cp)) {
case IP_VS_CONN_F_MASQ:
cp->packet_xmit = ip_vs_nat_xmit_v6;
break;
case IP_VS_CONN_F_TUNNEL:
cp->packet_xmit = ip_vs_tunnel_xmit_v6;
break;
case IP_VS_CONN_F_DROUTE:
cp->packet_xmit = ip_vs_dr_xmit_v6;
break;
case IP_VS_CONN_F_LOCALNODE:
cp->packet_xmit = ip_vs_null_xmit;
break;
case IP_VS_CONN_F_BYPASS:
cp->packet_xmit = ip_vs_bypass_xmit_v6;
break;
}
}
#endif
static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest)
{
return atomic_read(&dest->activeconns)
+ atomic_read(&dest->inactconns);
}
static inline void
ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
{
unsigned int conn_flags;
if (!dest)
return;
atomic_inc(&dest->refcnt);
conn_flags = atomic_read(&dest->conn_flags);
if (cp->protocol != IPPROTO_UDP)
conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
if (cp->flags & IP_VS_CONN_F_SYNC) {
if (!(cp->flags & IP_VS_CONN_F_TEMPLATE))
conn_flags &= ~IP_VS_CONN_F_INACTIVE;
cp->flags &= ~IP_VS_CONN_F_FWD_MASK;
}
cp->flags |= conn_flags;
cp->dest = dest;
IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
"d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
"dest->refcnt:%d\n",
ip_vs_proto_name(cp->protocol),
IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
ip_vs_fwd_tag(cp), cp->state,
cp->flags, atomic_read(&cp->refcnt),
atomic_read(&dest->refcnt));
if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
if ((cp->flags & IP_VS_CONN_F_SYNC) &&
(!(cp->flags & IP_VS_CONN_F_INACTIVE)))
atomic_inc(&dest->activeconns);
else
atomic_inc(&dest->inactconns);
} else {
atomic_inc(&dest->persistconns);
}
if (dest->u_threshold != 0 &&
ip_vs_dest_totalconns(dest) >= dest->u_threshold)
dest->flags |= IP_VS_DEST_F_OVERLOAD;
}
struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
{
struct ip_vs_dest *dest;
if ((cp) && (!cp->dest)) {
dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
cp->dport, &cp->vaddr, cp->vport,
cp->protocol, cp->fwmark, cp->flags);
ip_vs_bind_dest(cp, dest);
return dest;
} else
return NULL;
}
static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
{
struct ip_vs_dest *dest = cp->dest;
if (!dest)
return;
IP_VS_DBG_BUF(7, "Unbind-dest %s c:%s:%d v:%s:%d "
"d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
"dest->refcnt:%d\n",
ip_vs_proto_name(cp->protocol),
IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
ip_vs_fwd_tag(cp), cp->state,
cp->flags, atomic_read(&cp->refcnt),
atomic_read(&dest->refcnt));
if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
if (cp->flags & IP_VS_CONN_F_INACTIVE) {
atomic_dec(&dest->inactconns);
} else {
atomic_dec(&dest->activeconns);
}
} else {
atomic_dec(&dest->persistconns);
}
if (dest->l_threshold != 0) {
if (ip_vs_dest_totalconns(dest) < dest->l_threshold)
dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
} else if (dest->u_threshold != 0) {
if (ip_vs_dest_totalconns(dest) * 4 < dest->u_threshold * 3)
dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
} else {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
}
atomic_dec(&dest->refcnt);
}
static int expire_quiescent_template(struct netns_ipvs *ipvs,
struct ip_vs_dest *dest)
{
#ifdef CONFIG_SYSCTL
return ipvs->sysctl_expire_quiescent_template &&
(atomic_read(&dest->weight) == 0);
#else
return 0;
#endif
}
int ip_vs_check_template(struct ip_vs_conn *ct)
{
struct ip_vs_dest *dest = ct->dest;
struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(ct));
if ((dest == NULL) ||
!(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
expire_quiescent_template(ipvs, dest)) {
IP_VS_DBG_BUF(9, "check_template: dest not available for "
"protocol %s s:%s:%d v:%s:%d "
"-> d:%s:%d\n",
ip_vs_proto_name(ct->protocol),
IP_VS_DBG_ADDR(ct->af, &ct->caddr),
ntohs(ct->cport),
IP_VS_DBG_ADDR(ct->af, &ct->vaddr),
ntohs(ct->vport),
IP_VS_DBG_ADDR(ct->af, &ct->daddr),
ntohs(ct->dport));
if (ct->vport != htons(0xffff)) {
if (ip_vs_conn_unhash(ct)) {
ct->dport = htons(0xffff);
ct->vport = htons(0xffff);
ct->cport = 0;
ip_vs_conn_hash(ct);
}
}
atomic_dec(&ct->refcnt);
return 0;
}
return 1;
}
static void ip_vs_conn_expire(unsigned long data)
{
struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
cp->timeout = 60*HZ;
atomic_inc(&cp->refcnt);
if (atomic_read(&cp->n_control))
goto expire_later;
if (!ip_vs_conn_unhash(cp) && !(cp->flags & IP_VS_CONN_F_ONE_PACKET))
goto expire_later;
if (likely(atomic_read(&cp->refcnt) == 1)) {
if (timer_pending(&cp->timer))
del_timer(&cp->timer);
if (cp->control)
ip_vs_control_del(cp);
if (cp->flags & IP_VS_CONN_F_NFCT) {
ip_vs_conn_drop_conntrack(cp);
smp_rmb();
if (ipvs->enable)
ip_vs_conn_drop_conntrack(cp);
}
ip_vs_pe_put(cp->pe);
kfree(cp->pe_data);
if (unlikely(cp->app != NULL))
ip_vs_unbind_app(cp);
ip_vs_unbind_dest(cp);
if (cp->flags & IP_VS_CONN_F_NO_CPORT)
atomic_dec(&ip_vs_conn_no_cport_cnt);
atomic_dec(&ipvs->conn_count);
kmem_cache_free(ip_vs_conn_cachep, cp);
return;
}
ip_vs_conn_hash(cp);
expire_later:
IP_VS_DBG(7, "delayed: conn->refcnt-1=%d conn->n_control=%d\n",
atomic_read(&cp->refcnt)-1,
atomic_read(&cp->n_control));
ip_vs_conn_put(cp);
}
void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
{
if (del_timer(&cp->timer))
mod_timer(&cp->timer, jiffies);
}
struct ip_vs_conn *
ip_vs_conn_new(const struct ip_vs_conn_param *p,
const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
struct ip_vs_dest *dest, __u32 fwmark)
{
struct ip_vs_conn *cp;
struct netns_ipvs *ipvs = net_ipvs(p->net);
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->net,
p->protocol);
cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC);
if (cp == NULL) {
IP_VS_ERR_RL("%s(): no memory\n", __func__);
return NULL;
}
INIT_HLIST_NODE(&cp->c_list);
setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
ip_vs_conn_net_set(cp, p->net);
cp->af = p->af;
cp->protocol = p->protocol;
ip_vs_addr_copy(p->af, &cp->caddr, p->caddr);
cp->cport = p->cport;
ip_vs_addr_copy(p->af, &cp->vaddr, p->vaddr);
cp->vport = p->vport;
ip_vs_addr_copy(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
&cp->daddr, daddr);
cp->dport = dport;
cp->flags = flags;
cp->fwmark = fwmark;
if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) {
ip_vs_pe_get(p->pe);
cp->pe = p->pe;
cp->pe_data = p->pe_data;
cp->pe_data_len = p->pe_data_len;
}
spin_lock_init(&cp->lock);
atomic_set(&cp->refcnt, 1);
atomic_set(&cp->n_control, 0);
atomic_set(&cp->in_pkts, 0);
atomic_inc(&ipvs->conn_count);
if (flags & IP_VS_CONN_F_NO_CPORT)
atomic_inc(&ip_vs_conn_no_cport_cnt);
ip_vs_bind_dest(cp, dest);
cp->state = 0;
cp->timeout = 3*HZ;
#ifdef CONFIG_IP_VS_IPV6
if (p->af == AF_INET6)
ip_vs_bind_xmit_v6(cp);
else
#endif
ip_vs_bind_xmit(cp);
if (unlikely(pd && atomic_read(&pd->appcnt)))
ip_vs_bind_app(cp, pd->pp);
if (ip_vs_conntrack_enabled(ipvs))
cp->flags |= IP_VS_CONN_F_NFCT;
ip_vs_conn_hash(cp);
return cp;
}
#ifdef CONFIG_PROC_FS
struct ip_vs_iter_state {
struct seq_net_private p;
struct hlist_head *l;
};
static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
{
int idx;
struct ip_vs_conn *cp;
struct ip_vs_iter_state *iter = seq->private;
struct hlist_node *n;
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
ct_read_lock_bh(idx);
hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
if (pos-- == 0) {
iter->l = &ip_vs_conn_tab[idx];
return cp;
}
}
ct_read_unlock_bh(idx);
}
return NULL;
}
static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
{
struct ip_vs_iter_state *iter = seq->private;
iter->l = NULL;
return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
}
static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_vs_conn *cp = v;
struct ip_vs_iter_state *iter = seq->private;
struct hlist_node *e;
struct hlist_head *l = iter->l;
int idx;
++*pos;
if (v == SEQ_START_TOKEN)
return ip_vs_conn_array(seq, 0);
if ((e = cp->c_list.next))
return hlist_entry(e, struct ip_vs_conn, c_list);
idx = l - ip_vs_conn_tab;
ct_read_unlock_bh(idx);
while (++idx < ip_vs_conn_tab_size) {
ct_read_lock_bh(idx);
hlist_for_each_entry(cp, e, &ip_vs_conn_tab[idx], c_list) {
iter->l = &ip_vs_conn_tab[idx];
return cp;
}
ct_read_unlock_bh(idx);
}
iter->l = NULL;
return NULL;
}
static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
{
struct ip_vs_iter_state *iter = seq->private;
struct hlist_head *l = iter->l;
if (l)
ct_read_unlock_bh(l - ip_vs_conn_tab);
}
static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"Pro FromIP FPrt ToIP TPrt DestIP DPrt State Expires PEName PEData\n");
else {
const struct ip_vs_conn *cp = v;
struct net *net = seq_file_net(seq);
char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3];
size_t len = 0;
if (!ip_vs_conn_net_eq(cp, net))
return 0;
if (cp->pe_data) {
pe_data[0] = ' ';
len = strlen(cp->pe->name);
memcpy(pe_data + 1, cp->pe->name, len);
pe_data[len + 1] = ' ';
len += 2;
len += cp->pe->show_pe_data(cp, pe_data + len);
}
pe_data[len] = '\0';
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
"%pI6 %04X %-11s %7lu%s\n",
ip_vs_proto_name(cp->protocol),
&cp->caddr.in6, ntohs(cp->cport),
&cp->vaddr.in6, ntohs(cp->vport),
&cp->daddr.in6, ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
(cp->timer.expires-jiffies)/HZ, pe_data);
else
#endif
seq_printf(seq,
"%-3s %08X %04X %08X %04X"
" %08X %04X %-11s %7lu%s\n",
ip_vs_proto_name(cp->protocol),
ntohl(cp->caddr.ip), ntohs(cp->cport),
ntohl(cp->vaddr.ip), ntohs(cp->vport),
ntohl(cp->daddr.ip), ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
(cp->timer.expires-jiffies)/HZ, pe_data);
}
return 0;
}
static const struct seq_operations ip_vs_conn_seq_ops = {
.start = ip_vs_conn_seq_start,
.next = ip_vs_conn_seq_next,
.stop = ip_vs_conn_seq_stop,
.show = ip_vs_conn_seq_show,
};
static int ip_vs_conn_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ip_vs_conn_seq_ops,
sizeof(struct ip_vs_iter_state));
}
static const struct file_operations ip_vs_conn_fops = {
.owner = THIS_MODULE,
.open = ip_vs_conn_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static const char *ip_vs_origin_name(unsigned flags)
{
if (flags & IP_VS_CONN_F_SYNC)
return "SYNC";
else
return "LOCAL";
}
static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"Pro FromIP FPrt ToIP TPrt DestIP DPrt State Origin Expires\n");
else {
const struct ip_vs_conn *cp = v;
struct net *net = seq_file_net(seq);
if (!ip_vs_conn_net_eq(cp, net))
return 0;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X %pI6 %04X %-11s %-6s %7lu\n",
ip_vs_proto_name(cp->protocol),
&cp->caddr.in6, ntohs(cp->cport),
&cp->vaddr.in6, ntohs(cp->vport),
&cp->daddr.in6, ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
ip_vs_origin_name(cp->flags),
(cp->timer.expires-jiffies)/HZ);
else
#endif
seq_printf(seq,
"%-3s %08X %04X %08X %04X "
"%08X %04X %-11s %-6s %7lu\n",
ip_vs_proto_name(cp->protocol),
ntohl(cp->caddr.ip), ntohs(cp->cport),
ntohl(cp->vaddr.ip), ntohs(cp->vport),
ntohl(cp->daddr.ip), ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
ip_vs_origin_name(cp->flags),
(cp->timer.expires-jiffies)/HZ);
}
return 0;
}
static const struct seq_operations ip_vs_conn_sync_seq_ops = {
.start = ip_vs_conn_seq_start,
.next = ip_vs_conn_seq_next,
.stop = ip_vs_conn_seq_stop,
.show = ip_vs_conn_sync_seq_show,
};
static int ip_vs_conn_sync_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ip_vs_conn_sync_seq_ops,
sizeof(struct ip_vs_iter_state));
}
static const struct file_operations ip_vs_conn_sync_fops = {
.owner = THIS_MODULE,
.open = ip_vs_conn_sync_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
#endif
static inline int todrop_entry(struct ip_vs_conn *cp)
{
static const char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
static char todrop_counter[9] = {0};
int i;
if (time_before(cp->timeout + jiffies, cp->timer.expires + 60*HZ))
return 0;
i = atomic_read(&cp->in_pkts);
if (i > 8 || i < 0) return 0;
if (!todrop_rate[i]) return 0;
if (--todrop_counter[i] > 0) return 0;
todrop_counter[i] = todrop_rate[i];
return 1;
}
void ip_vs_random_dropentry(struct net *net)
{
int idx;
struct ip_vs_conn *cp;
for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
unsigned hash = net_random() & ip_vs_conn_tab_mask;
struct hlist_node *n;
ct_write_lock_bh(hash);
hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
if (cp->flags & IP_VS_CONN_F_TEMPLATE)
continue;
if (!ip_vs_conn_net_eq(cp, net))
continue;
if (cp->protocol == IPPROTO_TCP) {
switch(cp->state) {
case IP_VS_TCP_S_SYN_RECV:
case IP_VS_TCP_S_SYNACK:
break;
case IP_VS_TCP_S_ESTABLISHED:
if (todrop_entry(cp))
break;
continue;
default:
continue;
}
} else {
if (!todrop_entry(cp))
continue;
}
IP_VS_DBG(4, "del connection\n");
ip_vs_conn_expire_now(cp);
if (cp->control) {
IP_VS_DBG(4, "del conn template\n");
ip_vs_conn_expire_now(cp->control);
}
}
ct_write_unlock_bh(hash);
}
}
static void ip_vs_conn_flush(struct net *net)
{
int idx;
struct ip_vs_conn *cp;
struct netns_ipvs *ipvs = net_ipvs(net);
flush_again:
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
struct hlist_node *n;
ct_write_lock_bh(idx);
hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
if (!ip_vs_conn_net_eq(cp, net))
continue;
IP_VS_DBG(4, "del connection\n");
ip_vs_conn_expire_now(cp);
if (cp->control) {
IP_VS_DBG(4, "del conn template\n");
ip_vs_conn_expire_now(cp->control);
}
}
ct_write_unlock_bh(idx);
}
if (atomic_read(&ipvs->conn_count) != 0) {
schedule();
goto flush_again;
}
}
int __net_init ip_vs_conn_net_init(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
atomic_set(&ipvs->conn_count, 0);
proc_net_fops_create(net, "ip_vs_conn", 0, &ip_vs_conn_fops);
proc_net_fops_create(net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
return 0;
}
void __net_exit ip_vs_conn_net_cleanup(struct net *net)
{
ip_vs_conn_flush(net);
proc_net_remove(net, "ip_vs_conn");
proc_net_remove(net, "ip_vs_conn_sync");
}
int __init ip_vs_conn_init(void)
{
int idx;
ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size * sizeof(*ip_vs_conn_tab));
if (!ip_vs_conn_tab)
return -ENOMEM;
ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn",
sizeof(struct ip_vs_conn), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!ip_vs_conn_cachep) {
vfree(ip_vs_conn_tab);
return -ENOMEM;
}
pr_info("Connection hash table configured "
"(size=%d, memory=%ldKbytes)\n",
ip_vs_conn_tab_size,
(long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024);
IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
sizeof(struct ip_vs_conn));
for (idx = 0; idx < ip_vs_conn_tab_size; idx++)
INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]);
for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) {
rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
}
get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
return 0;
}
void ip_vs_conn_cleanup(void)
{
kmem_cache_destroy(ip_vs_conn_cachep);
vfree(ip_vs_conn_tab);
}
| gpl-2.0 |
marcoxx626/M8_Kernel_Sense | drivers/block/aoe/aoechr.c | 34 | 6012 | /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
#include <linux/hdreg.h>
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/skbuff.h>
#include <linux/export.h>
#include "aoe.h"
enum {
MINOR_ERR = 2,
MINOR_DISCOVER,
MINOR_INTERFACES,
MINOR_REVALIDATE,
MINOR_FLUSH,
MSGSZ = 2048,
NMSG = 100,
};
struct aoe_chardev {
ulong minor;
char name[32];
};
enum { EMFL_VALID = 1 };
struct ErrMsg {
short flags;
short len;
char *msg;
};
static DEFINE_MUTEX(aoechr_mutex);
static struct ErrMsg emsgs[NMSG];
static int emsgs_head_idx, emsgs_tail_idx;
static struct completion emsgs_comp;
static spinlock_t emsgs_lock;
static int nblocked_emsgs_readers;
static struct class *aoe_class;
static struct aoe_chardev chardevs[] = {
{ MINOR_ERR, "err" },
{ MINOR_DISCOVER, "discover" },
{ MINOR_INTERFACES, "interfaces" },
{ MINOR_REVALIDATE, "revalidate" },
{ MINOR_FLUSH, "flush" },
};
static int
discover(void)
{
aoecmd_cfg(0xffff, 0xff);
return 0;
}
static int
interfaces(const char __user *str, size_t size)
{
if (set_aoe_iflist(str, size)) {
printk(KERN_ERR
"aoe: could not set interface list: too many interfaces\n");
return -EINVAL;
}
return 0;
}
static int
revalidate(const char __user *str, size_t size)
{
int major, minor, n;
ulong flags;
struct aoedev *d;
struct sk_buff *skb;
char buf[16];
if (size >= sizeof buf)
return -EINVAL;
buf[sizeof buf - 1] = '\0';
if (copy_from_user(buf, str, size))
return -EFAULT;
n = sscanf(buf, "e%d.%d", &major, &minor);
if (n != 2) {
printk(KERN_ERR "aoe: invalid device specification\n");
return -EINVAL;
}
d = aoedev_by_aoeaddr(major, minor);
if (!d)
return -EINVAL;
spin_lock_irqsave(&d->lock, flags);
aoecmd_cleanslate(d);
loop:
skb = aoecmd_ata_id(d);
spin_unlock_irqrestore(&d->lock, flags);
if (!skb && !msleep_interruptible(200)) {
spin_lock_irqsave(&d->lock, flags);
goto loop;
}
if (skb) {
struct sk_buff_head queue;
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
}
aoecmd_cfg(major, minor);
return 0;
}
void
aoechr_error(char *msg)
{
struct ErrMsg *em;
char *mp;
ulong flags, n;
n = strlen(msg);
spin_lock_irqsave(&emsgs_lock, flags);
em = emsgs + emsgs_tail_idx;
if ((em->flags & EMFL_VALID)) {
bail: spin_unlock_irqrestore(&emsgs_lock, flags);
return;
}
mp = kmalloc(n, GFP_ATOMIC);
if (mp == NULL) {
printk(KERN_ERR "aoe: allocation failure, len=%ld\n", n);
goto bail;
}
memcpy(mp, msg, n);
em->msg = mp;
em->flags |= EMFL_VALID;
em->len = n;
emsgs_tail_idx++;
emsgs_tail_idx %= ARRAY_SIZE(emsgs);
spin_unlock_irqrestore(&emsgs_lock, flags);
if (nblocked_emsgs_readers)
complete(&emsgs_comp);
}
static ssize_t
aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp)
{
int ret = -EINVAL;
switch ((unsigned long) filp->private_data) {
default:
printk(KERN_INFO "aoe: can't write to that file.\n");
break;
case MINOR_DISCOVER:
ret = discover();
break;
case MINOR_INTERFACES:
ret = interfaces(buf, cnt);
break;
case MINOR_REVALIDATE:
ret = revalidate(buf, cnt);
break;
case MINOR_FLUSH:
ret = aoedev_flush(buf, cnt);
}
if (ret == 0)
ret = cnt;
return ret;
}
static int
aoechr_open(struct inode *inode, struct file *filp)
{
int n, i;
mutex_lock(&aoechr_mutex);
n = iminor(inode);
filp->private_data = (void *) (unsigned long) n;
for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
if (chardevs[i].minor == n) {
mutex_unlock(&aoechr_mutex);
return 0;
}
mutex_unlock(&aoechr_mutex);
return -EINVAL;
}
static int
aoechr_rel(struct inode *inode, struct file *filp)
{
return 0;
}
static ssize_t
aoechr_read(struct file *filp, char __user *buf, size_t cnt, loff_t *off)
{
unsigned long n;
char *mp;
struct ErrMsg *em;
ssize_t len;
ulong flags;
n = (unsigned long) filp->private_data;
if (n != MINOR_ERR)
return -EFAULT;
spin_lock_irqsave(&emsgs_lock, flags);
for (;;) {
em = emsgs + emsgs_head_idx;
if ((em->flags & EMFL_VALID) != 0)
break;
if (filp->f_flags & O_NDELAY) {
spin_unlock_irqrestore(&emsgs_lock, flags);
return -EAGAIN;
}
nblocked_emsgs_readers++;
spin_unlock_irqrestore(&emsgs_lock, flags);
n = wait_for_completion_interruptible(&emsgs_comp);
spin_lock_irqsave(&emsgs_lock, flags);
nblocked_emsgs_readers--;
if (n) {
spin_unlock_irqrestore(&emsgs_lock, flags);
return -ERESTARTSYS;
}
}
if (em->len > cnt) {
spin_unlock_irqrestore(&emsgs_lock, flags);
return -EAGAIN;
}
mp = em->msg;
len = em->len;
em->msg = NULL;
em->flags &= ~EMFL_VALID;
emsgs_head_idx++;
emsgs_head_idx %= ARRAY_SIZE(emsgs);
spin_unlock_irqrestore(&emsgs_lock, flags);
n = copy_to_user(buf, mp, len);
kfree(mp);
return n == 0 ? len : -EFAULT;
}
static const struct file_operations aoe_fops = {
.write = aoechr_write,
.read = aoechr_read,
.open = aoechr_open,
.release = aoechr_rel,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
static char *aoe_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
}
int __init
aoechr_init(void)
{
int n, i;
n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops);
if (n < 0) {
printk(KERN_ERR "aoe: can't register char device\n");
return n;
}
init_completion(&emsgs_comp);
spin_lock_init(&emsgs_lock);
aoe_class = class_create(THIS_MODULE, "aoe");
if (IS_ERR(aoe_class)) {
unregister_chrdev(AOE_MAJOR, "aoechr");
return PTR_ERR(aoe_class);
}
aoe_class->devnode = aoe_devnode;
for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
device_create(aoe_class, NULL,
MKDEV(AOE_MAJOR, chardevs[i].minor), NULL,
chardevs[i].name);
return 0;
}
void
aoechr_exit(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor));
class_destroy(aoe_class);
unregister_chrdev(AOE_MAJOR, "aoechr");
}
| gpl-2.0 |
talnoah/msm8960_Carbon-Kernel | kernel/trace/trace.c | 34 | 117424 | /*
* ring buffer based function tracer
*
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
*
* Originally taken from the RT patch by:
* Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Based on code from the latency_tracer, that is:
* Copyright (C) 2004-2006 Ingo Molnar
* Copyright (C) 2004 William Lee Irwin III
*/
#include <linux/ring_buffer.h>
#include <generated/utsrelease.h>
#include <linux/stacktrace.h>
#include <linux/writeback.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/irqflags.h>
#include <linux/debugfs.h>
#include <linux/pagemap.h>
#include <linux/hardirq.h>
#include <linux/linkage.h>
#include <linux/uaccess.h>
#include <linux/kprobes.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/splice.h>
#include <linux/kdebug.h>
#include <linux/string.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/nmi.h>
#include <linux/fs.h>
#include <linux/coresight-stm.h>
#include "trace.h"
#include "trace_output.h"
/*
* On boot up, the ring buffer is set to the minimum size, so that
* we do not waste memory on systems that are not using tracing.
*/
int ring_buffer_expanded;
/*
* We need to change this state when a selftest is running.
* A selftest will lurk into the ring-buffer to count the
* entries inserted during the selftest although some concurrent
* insertions into the ring-buffer such as trace_printk could occurred
* at the same time, giving false positive or negative results.
*/
static bool __read_mostly tracing_selftest_running;
/*
* If a tracer is running, we do not want to run SELFTEST.
*/
bool __read_mostly tracing_selftest_disabled;
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
{ }
};
static struct tracer_flags dummy_tracer_flags = {
.val = 0,
.opts = dummy_tracer_opt
};
static int dummy_set_flag(u32 old_flags, u32 bit, int set)
{
return 0;
}
/*
* Kill all tracing for good (never come back).
* It is initialized to 1 but will turn to zero if the initialization
* of the tracer is successful. But that is the only place that sets
* this back to zero.
*/
static int tracing_disabled = 1;
DEFINE_PER_CPU(int, ftrace_cpu_disabled);
static inline void ftrace_disable_cpu(void)
{
preempt_disable();
__this_cpu_inc(ftrace_cpu_disabled);
}
static inline void ftrace_enable_cpu(void)
{
__this_cpu_dec(ftrace_cpu_disabled);
preempt_enable();
}
cpumask_var_t __read_mostly tracing_buffer_mask;
/*
* ftrace_dump_on_oops - variable to dump ftrace buffer on oops
*
* If there is an oops (or kernel panic) and the ftrace_dump_on_oops
* is set, then ftrace_dump is called. This will output the contents
* of the ftrace buffers to the console. This is very useful for
* capturing traces that lead to crashes and outputing it to a
* serial console.
*
* It is default off, but you can enable it with either specifying
* "ftrace_dump_on_oops" in the kernel command line, or setting
* /proc/sys/kernel/ftrace_dump_on_oops
* Set 1 if you want to dump buffers of all CPUs
* Set 2 if you want to dump the buffer of the CPU that triggered oops
*/
enum ftrace_dump_mode ftrace_dump_on_oops;
static int tracing_set_tracer(const char *buf);
#define MAX_TRACER_SIZE 100
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
static char *default_bootup_tracer;
static int __init set_cmdline_ftrace(char *str)
{
strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf;
/* We are using ftrace early, expand it */
ring_buffer_expanded = 1;
return 1;
}
__setup("ftrace=", set_cmdline_ftrace);
static int __init set_ftrace_dump_on_oops(char *str)
{
if (*str++ != '=' || !*str) {
ftrace_dump_on_oops = DUMP_ALL;
return 1;
}
if (!strcmp("orig_cpu", str)) {
ftrace_dump_on_oops = DUMP_ORIG;
return 1;
}
return 0;
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
unsigned long long ns2usecs(cycle_t nsec)
{
nsec += 500;
do_div(nsec, 1000);
return nsec;
}
/*
* The global_trace is the descriptor that holds the tracing
* buffers for the live tracing. For each CPU, it contains
* a link list of pages that will store trace entries. The
* page descriptor of the pages in the memory is used to hold
* the link list by linking the lru item in the page descriptor
* to each of the pages in the buffer per CPU.
*
* For each active CPU there is a data field that holds the
* pages for the buffer for that CPU. Each CPU has the same number
* of pages allocated for its buffer.
*/
static struct trace_array global_trace;
static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
int filter_current_check_discard(struct ring_buffer *buffer,
struct ftrace_event_call *call, void *rec,
struct ring_buffer_event *event)
{
return filter_check_discard(call, rec, buffer, event);
}
EXPORT_SYMBOL_GPL(filter_current_check_discard);
cycle_t ftrace_now(int cpu)
{
u64 ts;
/* Early boot up does not have a buffer yet */
if (!global_trace.buffer)
return trace_clock_local();
ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
return ts;
}
/*
* The max_tr is used to snapshot the global_trace when a maximum
* latency is reached. Some tracers will use this to store a maximum
* trace while it continues examining live traces.
*
* The buffers for the max_tr are set up the same as the global_trace.
* When a snapshot is taken, the link list of the max_tr is swapped
* with the link list of the global_trace and the buffers are reset for
* the global_trace so the tracing can continue.
*/
static struct trace_array max_tr;
static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
/* tracer_enabled is used to toggle activation of a tracer */
static int tracer_enabled = 1;
/**
* tracing_is_enabled - return tracer_enabled status
*
* This function is used by other tracers to know the status
* of the tracer_enabled flag. Tracers may use this function
* to know if it should enable their features when starting
* up. See irqsoff tracer for an example (start_irqsoff_tracer).
*/
int tracing_is_enabled(void)
{
return tracer_enabled;
}
/*
* trace_buf_size is the size in bytes that is allocated
* for a buffer. Note, the number of bytes is always rounded
* to page size.
*
* This number is purposely set to a low number of 16384.
* If the dump on oops happens, it will be much appreciated
* to not have to wait for all that output. Anyway this can be
* boot time and run time configurable.
*/
#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
/* trace_types holds a link list of available tracers. */
static struct tracer *trace_types __read_mostly;
/* current_trace points to the tracer that is currently active */
static struct tracer *current_trace __read_mostly;
/*
* trace_types_lock is used to protect the trace_types list.
*/
static DEFINE_MUTEX(trace_types_lock);
/*
* serialize the access of the ring buffer
*
* ring buffer serializes readers, but it is low level protection.
* The validity of the events (which returns by ring_buffer_peek() ..etc)
* are not protected by ring buffer.
*
* The content of events may become garbage if we allow other process consumes
* these events concurrently:
* A) the page of the consumed events may become a normal page
* (not reader page) in ring buffer, and this page will be rewrited
* by events producer.
* B) The page of the consumed events may become a page for splice_read,
* and this page will be returned to system.
*
* These primitives allow multi process access to different cpu ring buffer
* concurrently.
*
* These primitives don't distinguish read-only and read-consume access.
* Multi read-only access are also serialized.
*/
#ifdef CONFIG_SMP
static DECLARE_RWSEM(all_cpu_access_lock);
static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
static inline void trace_access_lock(int cpu)
{
if (cpu == TRACE_PIPE_ALL_CPU) {
/* gain it for accessing the whole ring buffer. */
down_write(&all_cpu_access_lock);
} else {
/* gain it for accessing a cpu ring buffer. */
/* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
down_read(&all_cpu_access_lock);
/* Secondly block other access to this @cpu ring buffer. */
mutex_lock(&per_cpu(cpu_access_lock, cpu));
}
}
static inline void trace_access_unlock(int cpu)
{
if (cpu == TRACE_PIPE_ALL_CPU) {
up_write(&all_cpu_access_lock);
} else {
mutex_unlock(&per_cpu(cpu_access_lock, cpu));
up_read(&all_cpu_access_lock);
}
}
static inline void trace_access_lock_init(void)
{
int cpu;
for_each_possible_cpu(cpu)
mutex_init(&per_cpu(cpu_access_lock, cpu));
}
#else
static DEFINE_MUTEX(access_lock);
static inline void trace_access_lock(int cpu)
{
(void)cpu;
mutex_lock(&access_lock);
}
static inline void trace_access_unlock(int cpu)
{
(void)cpu;
mutex_unlock(&access_lock);
}
static inline void trace_access_lock_init(void)
{
}
#endif
/* trace_wait is a waitqueue for tasks blocked on trace_poll */
static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
/* trace_flags holds trace_options default values */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
TRACE_ITER_IRQ_INFO;
static int trace_stop_count;
static DEFINE_RAW_SPINLOCK(tracing_start_lock);
static void wakeup_work_handler(struct work_struct *work)
{
wake_up(&trace_wait);
}
static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
/**
* tracing_on - enable tracing buffers
*
* This function enables tracing buffers that may have been
* disabled with tracing_off.
*/
void tracing_on(void)
{
if (global_trace.buffer)
ring_buffer_record_on(global_trace.buffer);
/*
* This flag is only looked at when buffers haven't been
* allocated yet. We don't really care about the race
* between setting this flag and actually turning
* on the buffer.
*/
global_trace.buffer_disabled = 0;
}
EXPORT_SYMBOL_GPL(tracing_on);
/**
* tracing_off - turn off tracing buffers
*
* This function stops the tracing buffers from recording data.
* It does not disable any overhead the tracers themselves may
* be causing. This function simply causes all recording to
* the ring buffers to fail.
*/
void tracing_off(void)
{
if (global_trace.buffer)
ring_buffer_record_off(global_trace.buffer);
/*
* This flag is only looked at when buffers haven't been
* allocated yet. We don't really care about the race
* between setting this flag and actually turning
* on the buffer.
*/
global_trace.buffer_disabled = 1;
}
EXPORT_SYMBOL_GPL(tracing_off);
/**
* tracing_is_on - show state of ring buffers enabled
*/
int tracing_is_on(void)
{
if (global_trace.buffer)
return ring_buffer_record_is_on(global_trace.buffer);
return !global_trace.buffer_disabled;
}
EXPORT_SYMBOL_GPL(tracing_is_on);
/**
* trace_wake_up - wake up tasks waiting for trace input
*
* Schedules a delayed work to wake up any task that is blocked on the
* trace_wait queue. These is used with trace_poll for tasks polling the
* trace.
*/
void trace_wake_up(void)
{
const unsigned long delay = msecs_to_jiffies(2);
if (trace_flags & TRACE_ITER_BLOCK)
return;
schedule_delayed_work(&wakeup_work, delay);
}
static int __init set_buf_size(char *str)
{
unsigned long buf_size;
if (!str)
return 0;
buf_size = memparse(str, &str);
/* nr_entries can not be zero */
if (buf_size == 0)
return 0;
trace_buf_size = buf_size;
return 1;
}
__setup("trace_buf_size=", set_buf_size);
static int __init set_tracing_thresh(char *str)
{
unsigned long threshhold;
int ret;
if (!str)
return 0;
ret = strict_strtoul(str, 0, &threshhold);
if (ret < 0)
return 0;
tracing_thresh = threshhold * 1000;
return 1;
}
__setup("tracing_thresh=", set_tracing_thresh);
unsigned long nsecs_to_usecs(unsigned long nsecs)
{
return nsecs / 1000;
}
/* These must match the bit postions in trace_iterator_flags */
static const char *trace_options[] = {
"print-parent",
"sym-offset",
"sym-addr",
"verbose",
"raw",
"hex",
"bin",
"block",
"stacktrace",
"trace_printk",
"ftrace_preempt",
"branch",
"annotate",
"userstacktrace",
"sym-userobj",
"printk-msg-only",
"context-info",
"latency-format",
"sleep-time",
"graph-time",
"record-cmd",
"overwrite",
"disable_on_free",
"irq-info",
NULL
};
static struct {
u64 (*func)(void);
const char *name;
} trace_clocks[] = {
{ trace_clock_local, "local" },
{ trace_clock_global, "global" },
{ trace_clock_counter, "counter" },
};
int trace_clock_id;
/*
* trace_parser_get_init - gets the buffer for trace parser
*/
int trace_parser_get_init(struct trace_parser *parser, int size)
{
memset(parser, 0, sizeof(*parser));
parser->buffer = kmalloc(size, GFP_KERNEL);
if (!parser->buffer)
return 1;
parser->size = size;
return 0;
}
/*
* trace_parser_put - frees the buffer for trace parser
*/
void trace_parser_put(struct trace_parser *parser)
{
kfree(parser->buffer);
}
/*
* trace_get_user - reads the user input string separated by space
* (matched by isspace(ch))
*
* For each string found the 'struct trace_parser' is updated,
* and the function returns.
*
* Returns number of bytes read.
*
* See kernel/trace/trace.h for 'struct trace_parser' details.
*/
int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char ch;
size_t read = 0;
ssize_t ret;
if (!*ppos)
trace_parser_clear(parser);
ret = get_user(ch, ubuf++);
if (ret)
goto out;
read++;
cnt--;
/*
* The parser is not finished with the last write,
* continue reading the user input without skipping spaces.
*/
if (!parser->cont) {
/* skip white space */
while (cnt && isspace(ch)) {
ret = get_user(ch, ubuf++);
if (ret)
goto out;
read++;
cnt--;
}
/* only spaces were written */
if (isspace(ch)) {
*ppos += read;
ret = read;
goto out;
}
parser->idx = 0;
}
/* read the non-space input */
while (cnt && !isspace(ch)) {
if (parser->idx < parser->size - 1)
parser->buffer[parser->idx++] = ch;
else {
ret = -EINVAL;
goto out;
}
ret = get_user(ch, ubuf++);
if (ret)
goto out;
read++;
cnt--;
}
/* We either got finished input or we have to wait for another call. */
if (isspace(ch)) {
parser->buffer[parser->idx] = 0;
parser->cont = false;
} else {
parser->cont = true;
parser->buffer[parser->idx++] = ch;
}
*ppos += read;
ret = read;
out:
return ret;
}
ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
{
int len;
int ret;
if (!cnt)
return 0;
if (s->len <= s->readpos)
return -EBUSY;
len = s->len - s->readpos;
if (cnt > len)
cnt = len;
ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
if (ret == cnt)
return -EFAULT;
cnt -= ret;
s->readpos += cnt;
return cnt;
}
static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
{
int len;
void *ret;
if (s->len <= s->readpos)
return -EBUSY;
len = s->len - s->readpos;
if (cnt > len)
cnt = len;
ret = memcpy(buf, s->buffer + s->readpos, cnt);
if (!ret)
return -EFAULT;
s->readpos += cnt;
return cnt;
}
/*
* ftrace_max_lock is used to protect the swapping of buffers
* when taking a max snapshot. The buffers themselves are
* protected by per_cpu spinlocks. But the action of the swap
* needs its own lock.
*
* This is defined as a arch_spinlock_t in order to help
* with performance when lockdep debugging is enabled.
*
* It is also used in other places outside the update_max_tr
* so it needs to be defined outside of the
* CONFIG_TRACER_MAX_TRACE.
*/
static arch_spinlock_t ftrace_max_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
unsigned long __read_mostly tracing_thresh;
#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly tracing_max_latency;
/*
* Copy the new maximum trace into the separate maximum-trace
* structure. (this way the maximum trace is permanently saved,
* for later retrieval via /sys/kernel/debug/tracing/latency_trace)
*/
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
struct trace_array_cpu *data = tr->data[cpu];
struct trace_array_cpu *max_data;
max_tr.cpu = cpu;
max_tr.time_start = data->preempt_timestamp;
max_data = max_tr.data[cpu];
max_data->saved_latency = tracing_max_latency;
max_data->critical_start = data->critical_start;
max_data->critical_end = data->critical_end;
memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
max_data->pid = tsk->pid;
max_data->uid = task_uid(tsk);
max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
max_data->policy = tsk->policy;
max_data->rt_priority = tsk->rt_priority;
/* record this tasks comm */
tracing_record_cmdline(tsk);
}
/**
* update_max_tr - snapshot all trace buffers from global_trace to max_tr
* @tr: tracer
* @tsk: the task with the latency
* @cpu: The cpu that initiated the trace.
*
* Flip the buffers between the @tr and the max_tr and record information
* about which task was the cause of this latency.
*/
void
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
struct ring_buffer *buf = tr->buffer;
if (trace_stop_count)
return;
WARN_ON_ONCE(!irqs_disabled());
if (!current_trace->use_max_tr) {
WARN_ON_ONCE(1);
return;
}
arch_spin_lock(&ftrace_max_lock);
tr->buffer = max_tr.buffer;
max_tr.buffer = buf;
__update_max_tr(tr, tsk, cpu);
arch_spin_unlock(&ftrace_max_lock);
}
/**
* update_max_tr_single - only copy one trace over, and reset the rest
* @tr - tracer
* @tsk - task with the latency
* @cpu - the cpu of the buffer to copy.
*
* Flip the trace of a single CPU buffer between the @tr and the max_tr.
*/
void
update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
int ret;
if (trace_stop_count)
return;
WARN_ON_ONCE(!irqs_disabled());
if (!current_trace->use_max_tr) {
WARN_ON_ONCE(1);
return;
}
arch_spin_lock(&ftrace_max_lock);
ftrace_disable_cpu();
ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
if (ret == -EBUSY) {
/*
* We failed to swap the buffer due to a commit taking
* place on this CPU. We fail to record, but we reset
* the max trace buffer (no one writes directly to it)
* and flag that it failed.
*/
trace_array_printk(&max_tr, _THIS_IP_,
"Failed to swap buffers due to commit in progress\n");
}
ftrace_enable_cpu();
WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
__update_max_tr(tr, tsk, cpu);
arch_spin_unlock(&ftrace_max_lock);
}
#endif /* CONFIG_TRACER_MAX_TRACE */
/**
* register_tracer - register a tracer with the ftrace system.
* @type - the plugin for the tracer
*
* Register a new plugin tracer.
*/
int register_tracer(struct tracer *type)
__releases(kernel_lock)
__acquires(kernel_lock)
{
struct tracer *t;
int ret = 0;
if (!type->name) {
pr_info("Tracer must have a name\n");
return -1;
}
if (strlen(type->name) >= MAX_TRACER_SIZE) {
pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
return -1;
}
mutex_lock(&trace_types_lock);
tracing_selftest_running = true;
for (t = trace_types; t; t = t->next) {
if (strcmp(type->name, t->name) == 0) {
/* already found */
pr_info("Tracer %s already registered\n",
type->name);
ret = -1;
goto out;
}
}
if (!type->set_flag)
type->set_flag = &dummy_set_flag;
if (!type->flags)
type->flags = &dummy_tracer_flags;
else
if (!type->flags->opts)
type->flags->opts = dummy_tracer_opt;
if (!type->wait_pipe)
type->wait_pipe = default_wait_pipe;
#ifdef CONFIG_FTRACE_STARTUP_TEST
if (type->selftest && !tracing_selftest_disabled) {
struct tracer *saved_tracer = current_trace;
struct trace_array *tr = &global_trace;
/*
* Run a selftest on this tracer.
* Here we reset the trace buffer, and set the current
* tracer to be this tracer. The tracer can then run some
* internal tracing to verify that everything is in order.
* If we fail, we do not register this tracer.
*/
tracing_reset_online_cpus(tr);
current_trace = type;
/* If we expanded the buffers, make sure the max is expanded too */
if (ring_buffer_expanded && type->use_max_tr)
ring_buffer_resize(max_tr.buffer, trace_buf_size);
/* the test is responsible for initializing and enabling */
pr_info("Testing tracer %s: ", type->name);
ret = type->selftest(type, tr);
/* the test is responsible for resetting too */
current_trace = saved_tracer;
if (ret) {
printk(KERN_CONT "FAILED!\n");
goto out;
}
/* Only reset on passing, to avoid touching corrupted buffers */
tracing_reset_online_cpus(tr);
/* Shrink the max buffer again */
if (ring_buffer_expanded && type->use_max_tr)
ring_buffer_resize(max_tr.buffer, 1);
printk(KERN_CONT "PASSED\n");
}
#endif
type->next = trace_types;
trace_types = type;
out:
tracing_selftest_running = false;
mutex_unlock(&trace_types_lock);
if (ret || !default_bootup_tracer)
goto out_unlock;
if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
goto out_unlock;
printk(KERN_INFO "Starting tracer '%s'\n", type->name);
/* Do we want this tracer to start on bootup? */
tracing_set_tracer(type->name);
default_bootup_tracer = NULL;
/* disable other selftests, since this will break it. */
tracing_selftest_disabled = 1;
#ifdef CONFIG_FTRACE_STARTUP_TEST
printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
type->name);
#endif
out_unlock:
return ret;
}
void unregister_tracer(struct tracer *type)
{
struct tracer **t;
mutex_lock(&trace_types_lock);
for (t = &trace_types; *t; t = &(*t)->next) {
if (*t == type)
goto found;
}
pr_info("Tracer %s not registered\n", type->name);
goto out;
found:
*t = (*t)->next;
if (type == current_trace && tracer_enabled) {
tracer_enabled = 0;
tracing_stop();
if (current_trace->stop)
current_trace->stop(&global_trace);
current_trace = &nop_trace;
}
out:
mutex_unlock(&trace_types_lock);
}
static void __tracing_reset(struct ring_buffer *buffer, int cpu)
{
ftrace_disable_cpu();
ring_buffer_reset_cpu(buffer, cpu);
ftrace_enable_cpu();
}
void tracing_reset(struct trace_array *tr, int cpu)
{
struct ring_buffer *buffer = tr->buffer;
ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */
synchronize_sched();
__tracing_reset(buffer, cpu);
ring_buffer_record_enable(buffer);
}
void tracing_reset_online_cpus(struct trace_array *tr)
{
struct ring_buffer *buffer = tr->buffer;
int cpu;
ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */
synchronize_sched();
tr->time_start = ftrace_now(tr->cpu);
for_each_online_cpu(cpu)
__tracing_reset(buffer, cpu);
ring_buffer_record_enable(buffer);
}
void tracing_reset_current(int cpu)
{
tracing_reset(&global_trace, cpu);
}
void tracing_reset_current_online_cpus(void)
{
tracing_reset_online_cpus(&global_trace);
}
#define SAVED_CMDLINES 128
#define NO_CMDLINE_MAP UINT_MAX
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
/* temporary disable recording */
static atomic_t trace_record_cmdline_disabled __read_mostly;
static void trace_init_cmdlines(void)
{
memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
cmdline_idx = 0;
}
int is_tracing_stopped(void)
{
return trace_stop_count;
}
/**
* ftrace_off_permanent - disable all ftrace code permanently
*
* This should only be called when a serious anomally has
* been detected. This will turn off the function tracing,
* ring buffers, and other tracing utilites. It takes no
* locks and can be called from any context.
*/
void ftrace_off_permanent(void)
{
tracing_disabled = 1;
ftrace_stop();
tracing_off_permanent();
}
/**
* tracing_start - quick start of the tracer
*
* If tracing is enabled but was stopped by tracing_stop,
* this will start the tracer back up.
*/
void tracing_start(void)
{
struct ring_buffer *buffer;
unsigned long flags;
if (tracing_disabled)
return;
raw_spin_lock_irqsave(&tracing_start_lock, flags);
if (--trace_stop_count) {
if (trace_stop_count < 0) {
/* Someone screwed up their debugging */
WARN_ON_ONCE(1);
trace_stop_count = 0;
}
goto out;
}
/* Prevent the buffers from switching */
arch_spin_lock(&ftrace_max_lock);
buffer = global_trace.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
buffer = max_tr.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
arch_spin_unlock(&ftrace_max_lock);
ftrace_start();
out:
raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
}
/**
* tracing_stop - quick stop of the tracer
*
* Light weight way to stop tracing. Use in conjunction with
* tracing_start.
*/
void tracing_stop(void)
{
struct ring_buffer *buffer;
unsigned long flags;
ftrace_stop();
raw_spin_lock_irqsave(&tracing_start_lock, flags);
if (trace_stop_count++)
goto out;
/* Prevent the buffers from switching */
arch_spin_lock(&ftrace_max_lock);
buffer = global_trace.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
buffer = max_tr.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
arch_spin_unlock(&ftrace_max_lock);
out:
raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
}
void trace_stop_cmdline_recording(void);
static void trace_save_cmdline(struct task_struct *tsk)
{
unsigned pid, idx;
if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
return;
/*
* It's not the end of the world if we don't get
* the lock, but we also don't want to spin
* nor do we want to disable interrupts,
* so if we miss here, then better luck next time.
*/
if (!arch_spin_trylock(&trace_cmdline_lock))
return;
idx = map_pid_to_cmdline[tsk->pid];
if (idx == NO_CMDLINE_MAP) {
idx = (cmdline_idx + 1) % SAVED_CMDLINES;
/*
* Check whether the cmdline buffer at idx has a pid
* mapped. We are going to overwrite that entry so we
* need to clear the map_pid_to_cmdline. Otherwise we
* would read the new comm for the old pid.
*/
pid = map_cmdline_to_pid[idx];
if (pid != NO_CMDLINE_MAP)
map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
map_cmdline_to_pid[idx] = tsk->pid;
map_pid_to_cmdline[tsk->pid] = idx;
cmdline_idx = idx;
}
memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
arch_spin_unlock(&trace_cmdline_lock);
}
void trace_find_cmdline(int pid, char comm[])
{
unsigned map;
if (!pid) {
strcpy(comm, "<idle>");
return;
}
if (WARN_ON_ONCE(pid < 0)) {
strcpy(comm, "<XXX>");
return;
}
if (pid > PID_MAX_DEFAULT) {
strcpy(comm, "<...>");
return;
}
preempt_disable();
arch_spin_lock(&trace_cmdline_lock);
map = map_pid_to_cmdline[pid];
if (map != NO_CMDLINE_MAP)
strcpy(comm, saved_cmdlines[map]);
else
strcpy(comm, "<...>");
arch_spin_unlock(&trace_cmdline_lock);
preempt_enable();
}
void tracing_record_cmdline(struct task_struct *tsk)
{
if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
!tracing_is_on())
return;
trace_save_cmdline(tsk);
}
void
tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
int pc)
{
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
entry->pid = (tsk) ? tsk->pid : 0;
entry->padding = 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
#else
TRACE_FLAG_IRQS_NOSUPPORT |
#endif
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
struct ring_buffer_event *
trace_buffer_lock_reserve(struct ring_buffer *buffer,
int type,
unsigned long len,
unsigned long flags, int pc)
{
struct ring_buffer_event *event;
event = ring_buffer_lock_reserve(buffer, len);
if (event != NULL) {
struct trace_entry *ent = ring_buffer_event_data(event);
tracing_generic_entry_update(ent, flags, pc);
ent->type = type;
}
return event;
}
static inline void
__trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
int wake)
{
ring_buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
ftrace_trace_userstack(buffer, flags, pc);
if (wake)
trace_wake_up();
}
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
__trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
}
struct ring_buffer_event *
trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
int type, unsigned long len,
unsigned long flags, int pc)
{
*current_rb = global_trace.buffer;
return trace_buffer_lock_reserve(*current_rb,
type, len, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
__trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
}
EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
__trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
}
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs)
{
ring_buffer_unlock_commit(buffer, event);
ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
ftrace_trace_userstack(buffer, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event)
{
ring_buffer_discard_commit(buffer, event);
}
EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
void
trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip, unsigned long flags,
int pc)
{
struct ftrace_event_call *call = &event_function;
struct ring_buffer *buffer = tr->buffer;
struct ring_buffer_event *event;
struct ftrace_entry *entry;
/* If we are reading the ring buffer, don't trace */
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
return;
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
entry->ip = ip;
entry->parent_ip = parent_ip;
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
}
void
ftrace(struct trace_array *tr, struct trace_array_cpu *data,
unsigned long ip, unsigned long parent_ip, unsigned long flags,
int pc)
{
if (likely(!atomic_read(&data->disabled)))
trace_function(tr, ip, parent_ip, flags, pc);
}
#ifdef CONFIG_STACKTRACE
#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
struct ftrace_stack {
unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
};
static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
static void __ftrace_trace_stack(struct ring_buffer *buffer,
unsigned long flags,
int skip, int pc, struct pt_regs *regs)
{
struct ftrace_event_call *call = &event_kernel_stack;
struct ring_buffer_event *event;
struct stack_entry *entry;
struct stack_trace trace;
int use_stack;
int size = FTRACE_STACK_ENTRIES;
trace.nr_entries = 0;
trace.skip = skip;
/*
* Since events can happen in NMIs there's no safe way to
* use the per cpu ftrace_stacks. We reserve it and if an interrupt
* or NMI comes in, it will just have to use the default
* FTRACE_STACK_SIZE.
*/
preempt_disable_notrace();
use_stack = ++__get_cpu_var(ftrace_stack_reserve);
/*
* We don't need any atomic variables, just a barrier.
* If an interrupt comes in, we don't care, because it would
* have exited and put the counter back to what we want.
* We just need a barrier to keep gcc from moving things
* around.
*/
barrier();
if (use_stack == 1) {
trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
if (regs)
save_stack_trace_regs(regs, &trace);
else
save_stack_trace(&trace);
if (trace.nr_entries > size)
size = trace.nr_entries;
} else
/* From now on, use_stack is a boolean */
use_stack = 0;
size *= sizeof(unsigned long);
event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
sizeof(*entry) + size, flags, pc);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
memset(&entry->caller, 0, size);
if (use_stack)
memcpy(&entry->caller, trace.entries,
trace.nr_entries * sizeof(unsigned long));
else {
trace.max_entries = FTRACE_STACK_ENTRIES;
trace.entries = entry->caller;
if (regs)
save_stack_trace_regs(regs, &trace);
else
save_stack_trace(&trace);
}
entry->size = trace.nr_entries;
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
out:
/* Again, don't let gcc optimize things here */
barrier();
__get_cpu_var(ftrace_stack_reserve)--;
preempt_enable_notrace();
}
void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
int skip, int pc, struct pt_regs *regs)
{
if (!(trace_flags & TRACE_ITER_STACKTRACE))
return;
__ftrace_trace_stack(buffer, flags, skip, pc, regs);
}
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
int skip, int pc)
{
if (!(trace_flags & TRACE_ITER_STACKTRACE))
return;
__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
}
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc)
{
__ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
}
/**
* trace_dump_stack - record a stack back trace in the trace buffer
*/
void trace_dump_stack(void)
{
unsigned long flags;
if (tracing_disabled || tracing_selftest_running)
return;
local_save_flags(flags);
/* skipping 3 traces, seems to get us at the caller of this function */
__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
}
static DEFINE_PER_CPU(int, user_stack_count);
void
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
{
struct ftrace_event_call *call = &event_user_stack;
struct ring_buffer_event *event;
struct userstack_entry *entry;
struct stack_trace trace;
if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
/*
* NMIs can not handle page faults, even with fix ups.
* The save user stack can (and often does) fault.
*/
if (unlikely(in_nmi()))
return;
/*
* prevent recursion, since the user stack tracing may
* trigger other kernel events.
*/
preempt_disable();
if (__this_cpu_read(user_stack_count))
goto out;
__this_cpu_inc(user_stack_count);
event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
sizeof(*entry), flags, pc);
if (!event)
goto out_drop_count;
entry = ring_buffer_event_data(event);
entry->tgid = current->tgid;
memset(&entry->caller, 0, sizeof(entry->caller));
trace.nr_entries = 0;
trace.max_entries = FTRACE_STACK_ENTRIES;
trace.skip = 0;
trace.entries = entry->caller;
save_stack_trace_user(&trace);
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
out_drop_count:
__this_cpu_dec(user_stack_count);
out:
preempt_enable();
}
#ifdef UNUSED
static void __trace_userstack(struct trace_array *tr, unsigned long flags)
{
ftrace_trace_userstack(tr, flags, preempt_count());
}
#endif /* UNUSED */
#endif /* CONFIG_STACKTRACE */
/**
* trace_vbprintk - write binary msg to tracing buffer
*
*/
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
static arch_spinlock_t trace_buf_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static u32 trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_bprint;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
struct bprint_entry *entry;
unsigned long flags;
int disable;
int cpu, len = 0, size, pc;
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();
pc = preempt_count();
preempt_disable_notrace();
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disable = atomic_inc_return(&data->disabled);
if (unlikely(disable != 1))
goto out;
/* Lockdep uses trace_printk for lock tracing */
local_irq_save(flags);
arch_spin_lock(&trace_buf_lock);
len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
if (len > TRACE_BUF_SIZE || len < 0)
goto out_unlock;
size = sizeof(*entry) + sizeof(u32) * len;
buffer = tr->buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
flags, pc);
if (!event)
goto out_unlock;
entry = ring_buffer_event_data(event);
entry->ip = ip;
entry->fmt = fmt;
memcpy(entry->buf, trace_buf, sizeof(u32) * len);
if (!filter_check_discard(call, entry, buffer, event)) {
ring_buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
}
out_unlock:
arch_spin_unlock(&trace_buf_lock);
local_irq_restore(flags);
out:
atomic_dec_return(&data->disabled);
preempt_enable_notrace();
unpause_graph_tracing();
return len;
}
EXPORT_SYMBOL_GPL(trace_vbprintk);
int trace_array_printk(struct trace_array *tr,
unsigned long ip, const char *fmt, ...)
{
int ret;
va_list ap;
if (!(trace_flags & TRACE_ITER_PRINTK))
return 0;
va_start(ap, fmt);
ret = trace_array_vprintk(tr, ip, fmt, ap);
va_end(ap);
return ret;
}
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_print;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
struct trace_array_cpu *data;
int cpu, len = 0, size, pc;
struct print_entry *entry;
unsigned long irq_flags;
int disable;
if (tracing_disabled || tracing_selftest_running)
return 0;
pc = preempt_count();
preempt_disable_notrace();
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disable = atomic_inc_return(&data->disabled);
if (unlikely(disable != 1))
goto out;
pause_graph_tracing();
raw_local_irq_save(irq_flags);
arch_spin_lock(&trace_buf_lock);
len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
size = sizeof(*entry) + len + 1;
buffer = tr->buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
irq_flags, pc);
if (!event)
goto out_unlock;
entry = ring_buffer_event_data(event);
entry->ip = ip;
memcpy(&entry->buf, trace_buf, len);
entry->buf[len] = '\0';
if (!filter_check_discard(call, entry, buffer, event)) {
stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, len + 1);
ring_buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, irq_flags, 6, pc);
}
out_unlock:
arch_spin_unlock(&trace_buf_lock);
raw_local_irq_restore(irq_flags);
unpause_graph_tracing();
out:
atomic_dec_return(&data->disabled);
preempt_enable_notrace();
return len;
}
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
return trace_array_vprintk(&global_trace, ip, fmt, args);
}
EXPORT_SYMBOL_GPL(trace_vprintk);
static void trace_iterator_increment(struct trace_iterator *iter)
{
/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu();
iter->idx++;
if (iter->buffer_iter[iter->cpu])
ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
ftrace_enable_cpu();
}
static struct trace_entry *
peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
unsigned long *lost_events)
{
struct ring_buffer_event *event;
struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu();
if (buf_iter)
event = ring_buffer_iter_peek(buf_iter, ts);
else
event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
lost_events);
ftrace_enable_cpu();
if (event) {
iter->ent_size = ring_buffer_event_length(event);
return ring_buffer_event_data(event);
}
iter->ent_size = 0;
return NULL;
}
static struct trace_entry *
__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
unsigned long *missing_events, u64 *ent_ts)
{
struct ring_buffer *buffer = iter->tr->buffer;
struct trace_entry *ent, *next = NULL;
unsigned long lost_events = 0, next_lost = 0;
int cpu_file = iter->cpu_file;
u64 next_ts = 0, ts;
int next_cpu = -1;
int next_size = 0;
int cpu;
/*
* If we are in a per_cpu trace file, don't bother by iterating over
* all cpu and peek directly.
*/
if (cpu_file > TRACE_PIPE_ALL_CPU) {
if (ring_buffer_empty_cpu(buffer, cpu_file))
return NULL;
ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
if (ent_cpu)
*ent_cpu = cpu_file;
return ent;
}
for_each_tracing_cpu(cpu) {
if (ring_buffer_empty_cpu(buffer, cpu))
continue;
ent = peek_next_entry(iter, cpu, &ts, &lost_events);
/*
* Pick the entry with the smallest timestamp:
*/
if (ent && (!next || ts < next_ts)) {
next = ent;
next_cpu = cpu;
next_ts = ts;
next_lost = lost_events;
next_size = iter->ent_size;
}
}
iter->ent_size = next_size;
if (ent_cpu)
*ent_cpu = next_cpu;
if (ent_ts)
*ent_ts = next_ts;
if (missing_events)
*missing_events = next_lost;
return next;
}
/* Find the next real entry, without updating the iterator itself */
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts)
{
return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
}
/* Find the next real entry, and increment the iterator to the next entry */
void *trace_find_next_entry_inc(struct trace_iterator *iter)
{
iter->ent = __find_next_entry(iter, &iter->cpu,
&iter->lost_events, &iter->ts);
if (iter->ent)
trace_iterator_increment(iter);
return iter->ent ? iter : NULL;
}
static void trace_consume(struct trace_iterator *iter)
{
/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu();
ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
&iter->lost_events);
ftrace_enable_cpu();
}
static void *s_next(struct seq_file *m, void *v, loff_t *pos)
{
struct trace_iterator *iter = m->private;
int i = (int)*pos;
void *ent;
WARN_ON_ONCE(iter->leftover);
(*pos)++;
/* can't go backwards */
if (iter->idx > i)
return NULL;
if (iter->idx < 0)
ent = trace_find_next_entry_inc(iter);
else
ent = iter;
while (ent && iter->idx < i)
ent = trace_find_next_entry_inc(iter);
iter->pos = *pos;
return ent;
}
void tracing_iter_reset(struct trace_iterator *iter, int cpu)
{
struct trace_array *tr = iter->tr;
struct ring_buffer_event *event;
struct ring_buffer_iter *buf_iter;
unsigned long entries = 0;
u64 ts;
tr->data[cpu]->skipped_entries = 0;
if (!iter->buffer_iter[cpu])
return;
buf_iter = iter->buffer_iter[cpu];
ring_buffer_iter_reset(buf_iter);
/*
* We could have the case with the max latency tracers
* that a reset never took place on a cpu. This is evident
* by the timestamp being before the start of the buffer.
*/
while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
if (ts >= iter->tr->time_start)
break;
entries++;
ring_buffer_read(buf_iter, NULL);
}
tr->data[cpu]->skipped_entries = entries;
}
/*
* The current tracer is copied to avoid a global locking
* all around.
*/
static void *s_start(struct seq_file *m, loff_t *pos)
{
struct trace_iterator *iter = m->private;
static struct tracer *old_tracer;
int cpu_file = iter->cpu_file;
void *p = NULL;
loff_t l = 0;
int cpu;
/* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
if (unlikely(old_tracer != current_trace && current_trace)) {
old_tracer = current_trace;
*iter->trace = *current_trace;
}
mutex_unlock(&trace_types_lock);
atomic_inc(&trace_record_cmdline_disabled);
if (*pos != iter->pos) {
iter->ent = NULL;
iter->cpu = 0;
iter->idx = -1;
ftrace_disable_cpu();
if (cpu_file == TRACE_PIPE_ALL_CPU) {
for_each_tracing_cpu(cpu)
tracing_iter_reset(iter, cpu);
} else
tracing_iter_reset(iter, cpu_file);
ftrace_enable_cpu();
iter->leftover = 0;
for (p = iter; p && l < *pos; p = s_next(m, p, &l))
;
} else {
/*
* If we overflowed the seq_file before, then we want
* to just reuse the trace_seq buffer again.
*/
if (iter->leftover)
p = iter;
else {
l = *pos - 1;
p = s_next(m, p, &l);
}
}
trace_event_read_lock();
trace_access_lock(cpu_file);
return p;
}
static void s_stop(struct seq_file *m, void *p)
{
struct trace_iterator *iter = m->private;
atomic_dec(&trace_record_cmdline_disabled);
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock();
}
static void
get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries)
{
unsigned long count;
int cpu;
*total = 0;
*entries = 0;
for_each_tracing_cpu(cpu) {
count = ring_buffer_entries_cpu(tr->buffer, cpu);
/*
* If this buffer has skipped entries, then we hold all
* entries for the trace and we need to ignore the
* ones before the time stamp.
*/
if (tr->data[cpu]->skipped_entries) {
count -= tr->data[cpu]->skipped_entries;
/* total is the same as the entries */
*total += count;
} else
*total += count +
ring_buffer_overrun_cpu(tr->buffer, cpu);
*entries += count;
}
}
static void print_lat_help_header(struct seq_file *m)
{
seq_puts(m, "# _------=> CPU# \n");
seq_puts(m, "# / _-----=> irqs-off \n");
seq_puts(m, "# | / _----=> need-resched \n");
seq_puts(m, "# || / _---=> hardirq/softirq \n");
seq_puts(m, "# ||| / _--=> preempt-depth \n");
seq_puts(m, "# |||| / delay \n");
seq_puts(m, "# cmd pid ||||| time | caller \n");
seq_puts(m, "# \\ / ||||| \\ | / \n");
}
static void print_event_info(struct trace_array *tr, struct seq_file *m)
{
unsigned long total;
unsigned long entries;
get_total_entries(tr, &total, &entries);
seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
entries, total, num_online_cpus());
seq_puts(m, "#\n");
}
static void print_func_help_header(struct trace_array *tr, struct seq_file *m)
{
print_event_info(tr, m);
seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
seq_puts(m, "# | | | | |\n");
}
static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
{
print_event_info(tr, m);
seq_puts(m, "# _-----=> irqs-off\n");
seq_puts(m, "# / _----=> need-resched\n");
seq_puts(m, "# | / _---=> hardirq/softirq\n");
seq_puts(m, "# || / _--=> preempt-depth\n");
seq_puts(m, "# ||| / delay\n");
seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
seq_puts(m, "# | | | |||| | |\n");
}
void
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
{
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
struct trace_array *tr = iter->tr;
struct trace_array_cpu *data = tr->data[tr->cpu];
struct tracer *type = current_trace;
unsigned long entries;
unsigned long total;
const char *name = "preemption";
if (type)
name = type->name;
get_total_entries(tr, &total, &entries);
seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
name, UTS_RELEASE);
seq_puts(m, "# -----------------------------------"
"---------------------------------\n");
seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
" (M:%s VP:%d, KP:%d, SP:%d HP:%d",
nsecs_to_usecs(data->saved_latency),
entries,
total,
tr->cpu,
#if defined(CONFIG_PREEMPT_NONE)
"server",
#elif defined(CONFIG_PREEMPT_VOLUNTARY)
"desktop",
#elif defined(CONFIG_PREEMPT)
"preempt",
#else
"unknown",
#endif
/* These are reserved for later use */
0, 0, 0, 0);
#ifdef CONFIG_SMP
seq_printf(m, " #P:%d)\n", num_online_cpus());
#else
seq_puts(m, ")\n");
#endif
seq_puts(m, "# -----------------\n");
seq_printf(m, "# | task: %.16s-%d "
"(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
data->comm, data->pid, data->uid, data->nice,
data->policy, data->rt_priority);
seq_puts(m, "# -----------------\n");
if (data->critical_start) {
seq_puts(m, "# => started at: ");
seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
trace_print_seq(m, &iter->seq);
seq_puts(m, "\n# => ended at: ");
seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
trace_print_seq(m, &iter->seq);
seq_puts(m, "\n#\n");
}
seq_puts(m, "#\n");
}
static void test_cpu_buff_start(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
if (!(trace_flags & TRACE_ITER_ANNOTATE))
return;
if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
return;
if (cpumask_test_cpu(iter->cpu, iter->started))
return;
if (iter->tr->data[iter->cpu]->skipped_entries)
return;
cpumask_set_cpu(iter->cpu, iter->started);
/* Don't print started cpu buffer for the first entry of the trace */
if (iter->idx > 1)
trace_seq_printf(s, "##### CPU %u buffer started ####\n",
iter->cpu);
}
static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
struct trace_entry *entry;
struct trace_event *event;
entry = iter->ent;
test_cpu_buff_start(iter);
event = ftrace_find_event(entry->type);
if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
if (!trace_print_lat_context(iter))
goto partial;
} else {
if (!trace_print_context(iter))
goto partial;
}
}
if (event)
return event->funcs->trace(iter, sym_flags, event);
if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
goto partial;
return TRACE_TYPE_HANDLED;
partial:
return TRACE_TYPE_PARTIAL_LINE;
}
static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
struct trace_entry *entry;
struct trace_event *event;
entry = iter->ent;
if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
if (!trace_seq_printf(s, "%d %d %llu ",
entry->pid, iter->cpu, iter->ts))
goto partial;
}
event = ftrace_find_event(entry->type);
if (event)
return event->funcs->raw(iter, 0, event);
if (!trace_seq_printf(s, "%d ?\n", entry->type))
goto partial;
return TRACE_TYPE_HANDLED;
partial:
return TRACE_TYPE_PARTIAL_LINE;
}
static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
unsigned char newline = '\n';
struct trace_entry *entry;
struct trace_event *event;
entry = iter->ent;
if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
}
event = ftrace_find_event(entry->type);
if (event) {
enum print_line_t ret = event->funcs->hex(iter, 0, event);
if (ret != TRACE_TYPE_HANDLED)
return ret;
}
SEQ_PUT_FIELD_RET(s, newline);
return TRACE_TYPE_HANDLED;
}
static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
struct trace_entry *entry;
struct trace_event *event;
entry = iter->ent;
if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
SEQ_PUT_FIELD_RET(s, entry->pid);
SEQ_PUT_FIELD_RET(s, iter->cpu);
SEQ_PUT_FIELD_RET(s, iter->ts);
}
event = ftrace_find_event(entry->type);
return event ? event->funcs->binary(iter, 0, event) :
TRACE_TYPE_HANDLED;
}
int trace_empty(struct trace_iterator *iter)
{
int cpu;
/* If we are looking at one CPU buffer, only check that one */
if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
cpu = iter->cpu_file;
if (iter->buffer_iter[cpu]) {
if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
return 0;
} else {
if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
return 0;
}
return 1;
}
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu]) {
if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
return 0;
} else {
if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
return 0;
}
}
return 1;
}
/* Called with trace_event_read_lock() held. */
enum print_line_t print_trace_line(struct trace_iterator *iter)
{
enum print_line_t ret;
if (iter->lost_events &&
!trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
iter->cpu, iter->lost_events))
return TRACE_TYPE_PARTIAL_LINE;
if (iter->trace && iter->trace->print_line) {
ret = iter->trace->print_line(iter);
if (ret != TRACE_TYPE_UNHANDLED)
return ret;
}
if (iter->ent->type == TRACE_BPRINT &&
trace_flags & TRACE_ITER_PRINTK &&
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
return trace_print_bprintk_msg_only(iter);
if (iter->ent->type == TRACE_PRINT &&
trace_flags & TRACE_ITER_PRINTK &&
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
return trace_print_printk_msg_only(iter);
if (trace_flags & TRACE_ITER_BIN)
return print_bin_fmt(iter);
if (trace_flags & TRACE_ITER_HEX)
return print_hex_fmt(iter);
if (trace_flags & TRACE_ITER_RAW)
return print_raw_fmt(iter);
return print_trace_fmt(iter);
}
void trace_latency_header(struct seq_file *m)
{
struct trace_iterator *iter = m->private;
/* print nothing if the buffers are empty */
if (trace_empty(iter))
return;
if (iter->iter_flags & TRACE_FILE_LAT_FMT)
print_trace_header(m, iter);
if (!(trace_flags & TRACE_ITER_VERBOSE))
print_lat_help_header(m);
}
void trace_default_header(struct seq_file *m)
{
struct trace_iterator *iter = m->private;
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
return;
if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
/* print nothing if the buffers are empty */
if (trace_empty(iter))
return;
print_trace_header(m, iter);
if (!(trace_flags & TRACE_ITER_VERBOSE))
print_lat_help_header(m);
} else {
if (!(trace_flags & TRACE_ITER_VERBOSE)) {
if (trace_flags & TRACE_ITER_IRQ_INFO)
print_func_help_header_irq(iter->tr, m);
else
print_func_help_header(iter->tr, m);
}
}
}
static void test_ftrace_alive(struct seq_file *m)
{
if (!ftrace_is_dead())
return;
seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
}
static int s_show(struct seq_file *m, void *v)
{
struct trace_iterator *iter = v;
int ret;
if (iter->ent == NULL) {
if (iter->tr) {
seq_printf(m, "# tracer: %s\n", iter->trace->name);
seq_puts(m, "#\n");
test_ftrace_alive(m);
}
if (iter->trace && iter->trace->print_header)
iter->trace->print_header(m);
else
trace_default_header(m);
} else if (iter->leftover) {
/*
* If we filled the seq_file buffer earlier, we
* want to just show it now.
*/
ret = trace_print_seq(m, &iter->seq);
/* ret should this time be zero, but you never know */
iter->leftover = ret;
} else {
print_trace_line(iter);
ret = trace_print_seq(m, &iter->seq);
/*
* If we overflow the seq_file buffer, then it will
* ask us for this data again at start up.
* Use that instead.
* ret is 0 if seq_file write succeeded.
* -1 otherwise.
*/
iter->leftover = ret;
}
return 0;
}
static const struct seq_operations tracer_seq_ops = {
.start = s_start,
.next = s_next,
.stop = s_stop,
.show = s_show,
};
static struct trace_iterator *
__tracing_open(struct inode *inode, struct file *file)
{
long cpu_file = (long) inode->i_private;
void *fail_ret = ERR_PTR(-ENOMEM);
struct trace_iterator *iter;
struct seq_file *m;
int cpu, ret;
if (tracing_disabled)
return ERR_PTR(-ENODEV);
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return ERR_PTR(-ENOMEM);
/*
* We make a copy of the current tracer to avoid concurrent
* changes on it while we are reading.
*/
mutex_lock(&trace_types_lock);
iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
if (!iter->trace)
goto fail;
if (current_trace)
*iter->trace = *current_trace;
if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
goto fail;
if (current_trace && current_trace->print_max)
iter->tr = &max_tr;
else
iter->tr = &global_trace;
iter->pos = -1;
mutex_init(&iter->mutex);
iter->cpu_file = cpu_file;
/* Notify the tracer early; before we stop tracing. */
if (iter->trace && iter->trace->open)
iter->trace->open(iter);
/* Annotate start of buffers if we had overruns */
if (ring_buffer_overruns(iter->tr->buffer))
iter->iter_flags |= TRACE_FILE_ANNOTATE;
/* stop the trace while dumping */
tracing_stop();
if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
for_each_tracing_cpu(cpu) {
iter->buffer_iter[cpu] =
ring_buffer_read_prepare(iter->tr->buffer, cpu);
}
ring_buffer_read_prepare_sync();
for_each_tracing_cpu(cpu) {
ring_buffer_read_start(iter->buffer_iter[cpu]);
tracing_iter_reset(iter, cpu);
}
} else {
cpu = iter->cpu_file;
iter->buffer_iter[cpu] =
ring_buffer_read_prepare(iter->tr->buffer, cpu);
ring_buffer_read_prepare_sync();
ring_buffer_read_start(iter->buffer_iter[cpu]);
tracing_iter_reset(iter, cpu);
}
ret = seq_open(file, &tracer_seq_ops);
if (ret < 0) {
fail_ret = ERR_PTR(ret);
goto fail_buffer;
}
m = file->private_data;
m->private = iter;
mutex_unlock(&trace_types_lock);
return iter;
fail_buffer:
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu])
ring_buffer_read_finish(iter->buffer_iter[cpu]);
}
free_cpumask_var(iter->started);
tracing_start();
fail:
mutex_unlock(&trace_types_lock);
kfree(iter->trace);
kfree(iter);
return fail_ret;
}
int tracing_open_generic(struct inode *inode, struct file *filp)
{
if (tracing_disabled)
return -ENODEV;
filp->private_data = inode->i_private;
return 0;
}
static int tracing_release(struct inode *inode, struct file *file)
{
struct seq_file *m = file->private_data;
struct trace_iterator *iter;
int cpu;
if (!(file->f_mode & FMODE_READ))
return 0;
iter = m->private;
mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu])
ring_buffer_read_finish(iter->buffer_iter[cpu]);
}
if (iter->trace && iter->trace->close)
iter->trace->close(iter);
/* reenable tracing if it was previously enabled */
tracing_start();
mutex_unlock(&trace_types_lock);
seq_release(inode, file);
mutex_destroy(&iter->mutex);
free_cpumask_var(iter->started);
kfree(iter->trace);
kfree(iter);
return 0;
}
static int tracing_open(struct inode *inode, struct file *file)
{
struct trace_iterator *iter;
int ret = 0;
/* If this file was open for write, then erase contents */
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC)) {
long cpu = (long) inode->i_private;
if (cpu == TRACE_PIPE_ALL_CPU)
tracing_reset_online_cpus(&global_trace);
else
tracing_reset(&global_trace, cpu);
}
if (file->f_mode & FMODE_READ) {
iter = __tracing_open(inode, file);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
else if (trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
}
return ret;
}
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
struct tracer *t = v;
(*pos)++;
if (t)
t = t->next;
return t;
}
static void *t_start(struct seq_file *m, loff_t *pos)
{
struct tracer *t;
loff_t l = 0;
mutex_lock(&trace_types_lock);
for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
;
return t;
}
static void t_stop(struct seq_file *m, void *p)
{
mutex_unlock(&trace_types_lock);
}
static int t_show(struct seq_file *m, void *v)
{
struct tracer *t = v;
if (!t)
return 0;
seq_printf(m, "%s", t->name);
if (t->next)
seq_putc(m, ' ');
else
seq_putc(m, '\n');
return 0;
}
static const struct seq_operations show_traces_seq_ops = {
.start = t_start,
.next = t_next,
.stop = t_stop,
.show = t_show,
};
static int show_traces_open(struct inode *inode, struct file *file)
{
if (tracing_disabled)
return -ENODEV;
return seq_open(file, &show_traces_seq_ops);
}
static ssize_t
tracing_write_stub(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
{
return count;
}
static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
{
if (file->f_mode & FMODE_READ)
return seq_lseek(file, offset, origin);
else
return 0;
}
static const struct file_operations tracing_fops = {
.open = tracing_open,
.read = seq_read,
.write = tracing_write_stub,
.llseek = tracing_seek,
.release = tracing_release,
};
static const struct file_operations show_traces_fops = {
.open = show_traces_open,
.read = seq_read,
.release = seq_release,
.llseek = seq_lseek,
};
/*
* Only trace on a CPU if the bitmask is set:
*/
static cpumask_var_t tracing_cpumask;
/*
* The tracer itself will not take this lock, but still we want
* to provide a consistent cpumask to user-space:
*/
static DEFINE_MUTEX(tracing_cpumask_update_lock);
/*
* Temporary storage for the character representation of the
* CPU bitmask (and one more byte for the newline):
*/
static char mask_str[NR_CPUS + 1];
static ssize_t
tracing_cpumask_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
int len;
mutex_lock(&tracing_cpumask_update_lock);
len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
if (count - len < 2) {
count = -EINVAL;
goto out_err;
}
len += sprintf(mask_str + len, "\n");
count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
out_err:
mutex_unlock(&tracing_cpumask_update_lock);
return count;
}
static ssize_t
tracing_cpumask_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
{
int err, cpu;
cpumask_var_t tracing_cpumask_new;
if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
return -ENOMEM;
err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
if (err)
goto err_unlock;
mutex_lock(&tracing_cpumask_update_lock);
local_irq_disable();
arch_spin_lock(&ftrace_max_lock);
for_each_tracing_cpu(cpu) {
/*
* Increase/decrease the disabled counter if we are
* about to flip a bit in the cpumask:
*/
if (cpumask_test_cpu(cpu, tracing_cpumask) &&
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_inc(&global_trace.data[cpu]->disabled);
ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
}
if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_dec(&global_trace.data[cpu]->disabled);
ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
}
}
arch_spin_unlock(&ftrace_max_lock);
local_irq_enable();
cpumask_copy(tracing_cpumask, tracing_cpumask_new);
mutex_unlock(&tracing_cpumask_update_lock);
free_cpumask_var(tracing_cpumask_new);
return count;
err_unlock:
free_cpumask_var(tracing_cpumask_new);
return err;
}
static const struct file_operations tracing_cpumask_fops = {
.open = tracing_open_generic,
.read = tracing_cpumask_read,
.write = tracing_cpumask_write,
.llseek = generic_file_llseek,
};
static int tracing_trace_options_show(struct seq_file *m, void *v)
{
struct tracer_opt *trace_opts;
u32 tracer_flags;
int i;
mutex_lock(&trace_types_lock);
tracer_flags = current_trace->flags->val;
trace_opts = current_trace->flags->opts;
for (i = 0; trace_options[i]; i++) {
if (trace_flags & (1 << i))
seq_printf(m, "%s\n", trace_options[i]);
else
seq_printf(m, "no%s\n", trace_options[i]);
}
for (i = 0; trace_opts[i].name; i++) {
if (tracer_flags & trace_opts[i].bit)
seq_printf(m, "%s\n", trace_opts[i].name);
else
seq_printf(m, "no%s\n", trace_opts[i].name);
}
mutex_unlock(&trace_types_lock);
return 0;
}
static int __set_tracer_option(struct tracer *trace,
struct tracer_flags *tracer_flags,
struct tracer_opt *opts, int neg)
{
int ret;
ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
if (ret)
return ret;
if (neg)
tracer_flags->val &= ~opts->bit;
else
tracer_flags->val |= opts->bit;
return 0;
}
/* Try to assign a tracer specific option */
static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
{
struct tracer_flags *tracer_flags = trace->flags;
struct tracer_opt *opts = NULL;
int i;
for (i = 0; tracer_flags->opts[i].name; i++) {
opts = &tracer_flags->opts[i];
if (strcmp(cmp, opts->name) == 0)
return __set_tracer_option(trace, trace->flags,
opts, neg);
}
return -EINVAL;
}
static void set_tracer_flags(unsigned int mask, int enabled)
{
/* do nothing if flag is already set */
if (!!(trace_flags & mask) == !!enabled)
return;
if (enabled)
trace_flags |= mask;
else
trace_flags &= ~mask;
if (mask == TRACE_ITER_RECORD_CMD)
trace_event_enable_cmd_record(enabled);
if (mask == TRACE_ITER_OVERWRITE)
ring_buffer_change_overwrite(global_trace.buffer, enabled);
}
static ssize_t
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
char *cmp;
int neg = 0;
int ret;
int i;
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
cmp = strstrip(buf);
if (strncmp(cmp, "no", 2) == 0) {
neg = 1;
cmp += 2;
}
for (i = 0; trace_options[i]; i++) {
if (strcmp(cmp, trace_options[i]) == 0) {
set_tracer_flags(1 << i, !neg);
break;
}
}
/* If no option could be set, test the specific tracer options */
if (!trace_options[i]) {
mutex_lock(&trace_types_lock);
ret = set_tracer_option(current_trace, cmp, neg);
mutex_unlock(&trace_types_lock);
if (ret)
return ret;
}
*ppos += cnt;
return cnt;
}
static int tracing_trace_options_open(struct inode *inode, struct file *file)
{
if (tracing_disabled)
return -ENODEV;
return single_open(file, tracing_trace_options_show, NULL);
}
static const struct file_operations tracing_iter_fops = {
.open = tracing_trace_options_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = tracing_trace_options_write,
};
static const char readme_msg[] =
"tracing mini-HOWTO:\n\n"
"# mount -t debugfs nodev /sys/kernel/debug\n\n"
"# cat /sys/kernel/debug/tracing/available_tracers\n"
"wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
"# cat /sys/kernel/debug/tracing/current_tracer\n"
"nop\n"
"# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
"# cat /sys/kernel/debug/tracing/current_tracer\n"
"wakeup\n"
"# cat /sys/kernel/debug/tracing/trace_options\n"
"noprint-parent nosym-offset nosym-addr noverbose\n"
"# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
"# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
"# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
"# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
;
static ssize_t
tracing_readme_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
return simple_read_from_buffer(ubuf, cnt, ppos,
readme_msg, strlen(readme_msg));
}
static const struct file_operations tracing_readme_fops = {
.open = tracing_open_generic,
.read = tracing_readme_read,
.llseek = generic_file_llseek,
};
static ssize_t
tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char *buf_comm;
char *file_buf;
char *buf;
int len = 0;
int pid;
int i;
file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
if (!file_buf)
return -ENOMEM;
buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
if (!buf_comm) {
kfree(file_buf);
return -ENOMEM;
}
buf = file_buf;
for (i = 0; i < SAVED_CMDLINES; i++) {
int r;
pid = map_cmdline_to_pid[i];
if (pid == -1 || pid == NO_CMDLINE_MAP)
continue;
trace_find_cmdline(pid, buf_comm);
r = sprintf(buf, "%d %s\n", pid, buf_comm);
buf += r;
len += r;
}
len = simple_read_from_buffer(ubuf, cnt, ppos,
file_buf, len);
kfree(file_buf);
kfree(buf_comm);
return len;
}
static const struct file_operations tracing_saved_cmdlines_fops = {
.open = tracing_open_generic,
.read = tracing_saved_cmdlines_read,
.llseek = generic_file_llseek,
};
static ssize_t
tracing_ctrl_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
int r;
r = sprintf(buf, "%u\n", tracer_enabled);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
tracing_ctrl_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
val = !!val;
mutex_lock(&trace_types_lock);
if (tracer_enabled ^ val) {
/* Only need to warn if this is used to change the state */
WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
if (val) {
tracer_enabled = 1;
if (current_trace->start)
current_trace->start(tr);
tracing_start();
} else {
tracer_enabled = 0;
tracing_stop();
if (current_trace->stop)
current_trace->stop(tr);
}
}
mutex_unlock(&trace_types_lock);
*ppos += cnt;
return cnt;
}
static ssize_t
tracing_set_trace_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[MAX_TRACER_SIZE+2];
int r;
mutex_lock(&trace_types_lock);
if (current_trace)
r = sprintf(buf, "%s\n", current_trace->name);
else
r = sprintf(buf, "\n");
mutex_unlock(&trace_types_lock);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
int tracer_init(struct tracer *t, struct trace_array *tr)
{
tracing_reset_online_cpus(tr);
return t->init(tr);
}
static int __tracing_resize_ring_buffer(unsigned long size)
{
int ret;
/*
* If kernel or user changes the size of the ring buffer
* we use the size that was given, and we can forget about
* expanding it later.
*/
ring_buffer_expanded = 1;
ret = ring_buffer_resize(global_trace.buffer, size);
if (ret < 0)
return ret;
if (!current_trace->use_max_tr)
goto out;
ret = ring_buffer_resize(max_tr.buffer, size);
if (ret < 0) {
int r;
r = ring_buffer_resize(global_trace.buffer,
global_trace.entries);
if (r < 0) {
/*
* AARGH! We are left with different
* size max buffer!!!!
* The max buffer is our "snapshot" buffer.
* When a tracer needs a snapshot (one of the
* latency tracers), it swaps the max buffer
* with the saved snap shot. We succeeded to
* update the size of the main buffer, but failed to
* update the size of the max buffer. But when we tried
* to reset the main buffer to the original size, we
* failed there too. This is very unlikely to
* happen, but if it does, warn and kill all
* tracing.
*/
WARN_ON(1);
tracing_disabled = 1;
}
return ret;
}
max_tr.entries = size;
out:
global_trace.entries = size;
return ret;
}
static ssize_t tracing_resize_ring_buffer(unsigned long size)
{
int cpu, ret = size;
mutex_lock(&trace_types_lock);
tracing_stop();
/* disable all cpu buffers */
for_each_tracing_cpu(cpu) {
if (global_trace.data[cpu])
atomic_inc(&global_trace.data[cpu]->disabled);
if (max_tr.data[cpu])
atomic_inc(&max_tr.data[cpu]->disabled);
}
if (size != global_trace.entries)
ret = __tracing_resize_ring_buffer(size);
if (ret < 0)
ret = -ENOMEM;
for_each_tracing_cpu(cpu) {
if (global_trace.data[cpu])
atomic_dec(&global_trace.data[cpu]->disabled);
if (max_tr.data[cpu])
atomic_dec(&max_tr.data[cpu]->disabled);
}
tracing_start();
mutex_unlock(&trace_types_lock);
return ret;
}
/**
* tracing_update_buffers - used by tracing facility to expand ring buffers
*
* To save on memory when the tracing is never used on a system with it
* configured in. The ring buffers are set to a minimum size. But once
* a user starts to use the tracing facility, then they need to grow
* to their default size.
*
* This function is to be called when a tracer is about to be used.
*/
int tracing_update_buffers(void)
{
int ret = 0;
mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded)
ret = __tracing_resize_ring_buffer(trace_buf_size);
mutex_unlock(&trace_types_lock);
return ret;
}
struct trace_option_dentry;
static struct trace_option_dentry *
create_trace_option_files(struct tracer *tracer);
static void
destroy_trace_option_files(struct trace_option_dentry *topts);
static int tracing_set_tracer(const char *buf)
{
static struct trace_option_dentry *topts;
struct trace_array *tr = &global_trace;
struct tracer *t;
int ret = 0;
mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded) {
ret = __tracing_resize_ring_buffer(trace_buf_size);
if (ret < 0)
goto out;
ret = 0;
}
for (t = trace_types; t; t = t->next) {
if (strcmp(t->name, buf) == 0)
break;
}
if (!t) {
ret = -EINVAL;
goto out;
}
if (t == current_trace)
goto out;
trace_branch_disable();
if (current_trace && current_trace->reset)
current_trace->reset(tr);
if (current_trace && current_trace->use_max_tr) {
/*
* We don't free the ring buffer. instead, resize it because
* The max_tr ring buffer has some state (e.g. ring->clock) and
* we want preserve it.
*/
ring_buffer_resize(max_tr.buffer, 1);
max_tr.entries = 1;
}
destroy_trace_option_files(topts);
current_trace = t;
topts = create_trace_option_files(current_trace);
if (current_trace->use_max_tr) {
ret = ring_buffer_resize(max_tr.buffer, global_trace.entries);
if (ret < 0)
goto out;
max_tr.entries = global_trace.entries;
}
if (t->init) {
ret = tracer_init(t, tr);
if (ret)
goto out;
}
trace_branch_enable(tr);
out:
mutex_unlock(&trace_types_lock);
return ret;
}
static ssize_t
tracing_set_trace_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[MAX_TRACER_SIZE+1];
int i;
size_t ret;
int err;
ret = cnt;
if (cnt > MAX_TRACER_SIZE)
cnt = MAX_TRACER_SIZE;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
/* strip ending whitespace. */
for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
buf[i] = 0;
err = tracing_set_tracer(buf);
if (err)
return err;
*ppos += ret;
return ret;
}
static ssize_t
tracing_max_lat_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long *ptr = filp->private_data;
char buf[64];
int r;
r = snprintf(buf, sizeof(buf), "%ld\n",
*ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
if (r > sizeof(buf))
r = sizeof(buf);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
tracing_max_lat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long *ptr = filp->private_data;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
*ptr = val * 1000;
return cnt;
}
static int tracing_open_pipe(struct inode *inode, struct file *filp)
{
long cpu_file = (long) inode->i_private;
struct trace_iterator *iter;
int ret = 0;
if (tracing_disabled)
return -ENODEV;
mutex_lock(&trace_types_lock);
/* create a buffer to store the information to pass to userspace */
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter) {
ret = -ENOMEM;
goto out;
}
/*
* We make a copy of the current tracer to avoid concurrent
* changes on it while we are reading.
*/
iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
if (!iter->trace) {
ret = -ENOMEM;
goto fail;
}
if (current_trace)
*iter->trace = *current_trace;
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
ret = -ENOMEM;
goto fail;
}
/* trace pipe does not show start of buffer */
cpumask_setall(iter->started);
if (trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
iter->cpu_file = cpu_file;
iter->tr = &global_trace;
mutex_init(&iter->mutex);
filp->private_data = iter;
if (iter->trace->pipe_open)
iter->trace->pipe_open(iter);
nonseekable_open(inode, filp);
out:
mutex_unlock(&trace_types_lock);
return ret;
fail:
kfree(iter->trace);
kfree(iter);
mutex_unlock(&trace_types_lock);
return ret;
}
static int tracing_release_pipe(struct inode *inode, struct file *file)
{
struct trace_iterator *iter = file->private_data;
mutex_lock(&trace_types_lock);
if (iter->trace->pipe_close)
iter->trace->pipe_close(iter);
mutex_unlock(&trace_types_lock);
free_cpumask_var(iter->started);
mutex_destroy(&iter->mutex);
kfree(iter->trace);
kfree(iter);
return 0;
}
static unsigned int
tracing_poll_pipe(struct file *filp, poll_table *poll_table)
{
struct trace_iterator *iter = filp->private_data;
if (trace_flags & TRACE_ITER_BLOCK) {
/*
* Always select as readable when in blocking mode
*/
return POLLIN | POLLRDNORM;
} else {
if (!trace_empty(iter))
return POLLIN | POLLRDNORM;
poll_wait(filp, &trace_wait, poll_table);
if (!trace_empty(iter))
return POLLIN | POLLRDNORM;
return 0;
}
}
void default_wait_pipe(struct trace_iterator *iter)
{
DEFINE_WAIT(wait);
prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
if (trace_empty(iter))
schedule();
finish_wait(&trace_wait, &wait);
}
/*
* This is a make-shift waitqueue.
* A tracer might use this callback on some rare cases:
*
* 1) the current tracer might hold the runqueue lock when it wakes up
* a reader, hence a deadlock (sched, function, and function graph tracers)
* 2) the function tracers, trace all functions, we don't want
* the overhead of calling wake_up and friends
* (and tracing them too)
*
* Anyway, this is really very primitive wakeup.
*/
void poll_wait_pipe(struct trace_iterator *iter)
{
set_current_state(TASK_INTERRUPTIBLE);
/* sleep for 100 msecs, and try again. */
schedule_timeout(HZ / 10);
}
/* Must be called with trace_types_lock mutex held. */
static int tracing_wait_pipe(struct file *filp)
{
struct trace_iterator *iter = filp->private_data;
while (trace_empty(iter)) {
if ((filp->f_flags & O_NONBLOCK)) {
return -EAGAIN;
}
mutex_unlock(&iter->mutex);
iter->trace->wait_pipe(iter);
mutex_lock(&iter->mutex);
if (signal_pending(current))
return -EINTR;
/*
* We block until we read something and tracing is disabled.
* We still block if tracing is disabled, but we have never
* read anything. This allows a user to cat this file, and
* then enable tracing. But after we have read something,
* we give an EOF when tracing is again disabled.
*
* iter->pos will be 0 if we haven't read anything.
*/
if (!tracer_enabled && iter->pos)
break;
}
return 1;
}
/*
* Consumer reader.
*/
static ssize_t
tracing_read_pipe(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_iterator *iter = filp->private_data;
static struct tracer *old_tracer;
ssize_t sret;
/* return any leftover data */
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
if (sret != -EBUSY)
return sret;
trace_seq_init(&iter->seq);
/* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
if (unlikely(old_tracer != current_trace && current_trace)) {
old_tracer = current_trace;
*iter->trace = *current_trace;
}
mutex_unlock(&trace_types_lock);
/*
* Avoid more than one consumer on a single file descriptor
* This is just a matter of traces coherency, the ring buffer itself
* is protected.
*/
mutex_lock(&iter->mutex);
if (iter->trace->read) {
sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
if (sret)
goto out;
}
waitagain:
sret = tracing_wait_pipe(filp);
if (sret <= 0)
goto out;
/* stop when tracing is finished */
if (trace_empty(iter)) {
sret = 0;
goto out;
}
if (cnt >= PAGE_SIZE)
cnt = PAGE_SIZE - 1;
/* reset all but tr, trace, and overruns */
memset(&iter->seq, 0,
sizeof(struct trace_iterator) -
offsetof(struct trace_iterator, seq));
iter->pos = -1;
trace_event_read_lock();
trace_access_lock(iter->cpu_file);
while (trace_find_next_entry_inc(iter) != NULL) {
enum print_line_t ret;
int len = iter->seq.len;
ret = print_trace_line(iter);
if (ret == TRACE_TYPE_PARTIAL_LINE) {
/* don't print partial lines */
iter->seq.len = len;
break;
}
if (ret != TRACE_TYPE_NO_CONSUME)
trace_consume(iter);
if (iter->seq.len >= cnt)
break;
/*
* Setting the full flag means we reached the trace_seq buffer
* size and we should leave by partial output condition above.
* One of the trace_seq_* functions is not used properly.
*/
WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
iter->ent->type);
}
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock();
/* Now copy what we have to the user */
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
if (iter->seq.readpos >= iter->seq.len)
trace_seq_init(&iter->seq);
/*
* If there was nothing to send to user, in spite of consuming trace
* entries, go back to wait for more entries.
*/
if (sret == -EBUSY)
goto waitagain;
out:
mutex_unlock(&iter->mutex);
return sret;
}
static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
__free_page(buf->page);
}
static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
unsigned int idx)
{
__free_page(spd->pages[idx]);
}
static const struct pipe_buf_operations tracing_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = tracing_pipe_buf_release,
.steal = generic_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
static size_t
tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
{
size_t count;
int ret;
/* Seq buffer is page-sized, exactly what we need. */
for (;;) {
count = iter->seq.len;
ret = print_trace_line(iter);
count = iter->seq.len - count;
if (rem < count) {
rem = 0;
iter->seq.len -= count;
break;
}
if (ret == TRACE_TYPE_PARTIAL_LINE) {
iter->seq.len -= count;
break;
}
if (ret != TRACE_TYPE_NO_CONSUME)
trace_consume(iter);
rem -= count;
if (!trace_find_next_entry_inc(iter)) {
rem = 0;
iter->ent = NULL;
break;
}
}
return rem;
}
static ssize_t tracing_splice_read_pipe(struct file *filp,
loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len,
unsigned int flags)
{
struct page *pages_def[PIPE_DEF_BUFFERS];
struct partial_page partial_def[PIPE_DEF_BUFFERS];
struct trace_iterator *iter = filp->private_data;
struct splice_pipe_desc spd = {
.pages = pages_def,
.partial = partial_def,
.nr_pages = 0, /* This gets updated below. */
.nr_pages_max = PIPE_DEF_BUFFERS,
.flags = flags,
.ops = &tracing_pipe_buf_ops,
.spd_release = tracing_spd_release_pipe,
};
static struct tracer *old_tracer;
ssize_t ret;
size_t rem;
unsigned int i;
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
/* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
if (unlikely(old_tracer != current_trace && current_trace)) {
old_tracer = current_trace;
*iter->trace = *current_trace;
}
mutex_unlock(&trace_types_lock);
mutex_lock(&iter->mutex);
if (iter->trace->splice_read) {
ret = iter->trace->splice_read(iter, filp,
ppos, pipe, len, flags);
if (ret)
goto out_err;
}
ret = tracing_wait_pipe(filp);
if (ret <= 0)
goto out_err;
if (!iter->ent && !trace_find_next_entry_inc(iter)) {
ret = -EFAULT;
goto out_err;
}
trace_event_read_lock();
trace_access_lock(iter->cpu_file);
/* Fill as many pages as possible. */
for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
spd.pages[i] = alloc_page(GFP_KERNEL);
if (!spd.pages[i])
break;
rem = tracing_fill_pipe_page(rem, iter);
/* Copy the data into the page, so we can start over. */
ret = trace_seq_to_buffer(&iter->seq,
page_address(spd.pages[i]),
iter->seq.len);
if (ret < 0) {
__free_page(spd.pages[i]);
break;
}
spd.partial[i].offset = 0;
spd.partial[i].len = iter->seq.len;
trace_seq_init(&iter->seq);
}
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock();
mutex_unlock(&iter->mutex);
spd.nr_pages = i;
ret = splice_to_pipe(pipe, &spd);
out:
splice_shrink_spd(&spd);
return ret;
out_err:
mutex_unlock(&iter->mutex);
goto out;
}
static ssize_t
tracing_entries_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
char buf[96];
int r;
mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded)
r = sprintf(buf, "%lu (expanded: %lu)\n",
tr->entries >> 10,
trace_buf_size >> 10);
else
r = sprintf(buf, "%lu\n", tr->entries >> 10);
mutex_unlock(&trace_types_lock);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
tracing_entries_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
/* must have at least 1 entry */
if (!val)
return -EINVAL;
/* value is in KB */
val <<= 10;
ret = tracing_resize_ring_buffer(val);
if (ret < 0)
return ret;
*ppos += cnt;
return cnt;
}
static ssize_t
tracing_total_entries_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
char buf[64];
int r, cpu;
unsigned long size = 0, expanded_size = 0;
mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) {
size += tr->entries >> 10;
if (!ring_buffer_expanded)
expanded_size += trace_buf_size >> 10;
}
if (ring_buffer_expanded)
r = sprintf(buf, "%lu\n", size);
else
r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
mutex_unlock(&trace_types_lock);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
/*
* There is no need to read what the user has written, this function
* is just to make sure that there is no error when "echo" is used
*/
*ppos += cnt;
return cnt;
}
static int
tracing_free_buffer_release(struct inode *inode, struct file *filp)
{
/* disable tracing ? */
if (trace_flags & TRACE_ITER_STOP_ON_FREE)
tracing_off();
/* resize the ring buffer to 0 */
tracing_resize_ring_buffer(0);
return 0;
}
static ssize_t
tracing_mark_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *fpos)
{
unsigned long addr = (unsigned long)ubuf;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
struct print_entry *entry;
unsigned long irq_flags;
struct page *pages[2];
int nr_pages = 1;
ssize_t written;
void *page1;
void *page2 = NULL;
int offset;
int size;
int len;
int ret;
if (tracing_disabled)
return -EINVAL;
if (cnt > TRACE_BUF_SIZE)
cnt = TRACE_BUF_SIZE;
/*
* Userspace is injecting traces into the kernel trace buffer.
* We want to be as non intrusive as possible.
* To do so, we do not want to allocate any special buffers
* or take any locks, but instead write the userspace data
* straight into the ring buffer.
*
* First we need to pin the userspace buffer into memory,
* which, most likely it is, because it just referenced it.
* But there's no guarantee that it is. By using get_user_pages_fast()
* and kmap_atomic/kunmap_atomic() we can get access to the
* pages directly. We then write the data directly into the
* ring buffer.
*/
BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
/* check if we cross pages */
if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
nr_pages = 2;
offset = addr & (PAGE_SIZE - 1);
addr &= PAGE_MASK;
ret = get_user_pages_fast(addr, nr_pages, 0, pages);
if (ret < nr_pages) {
while (--ret >= 0)
put_page(pages[ret]);
written = -EFAULT;
goto out;
}
page1 = kmap_atomic(pages[0]);
if (nr_pages == 2)
page2 = kmap_atomic(pages[1]);
local_save_flags(irq_flags);
size = sizeof(*entry) + cnt + 2; /* possible \n added */
buffer = global_trace.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
irq_flags, preempt_count());
if (!event) {
/* Ring buffer disabled, return as if not open for write */
written = -EBADF;
goto out_unlock;
}
entry = ring_buffer_event_data(event);
entry->ip = _THIS_IP_;
if (nr_pages == 2) {
len = PAGE_SIZE - offset;
memcpy(&entry->buf, page1 + offset, len);
memcpy(&entry->buf[len], page2, cnt - len);
} else
memcpy(&entry->buf, page1 + offset, cnt);
if (entry->buf[cnt - 1] != '\n') {
entry->buf[cnt] = '\n';
entry->buf[cnt + 1] = '\0';
stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 2);
} else {
entry->buf[cnt] = '\0';
stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 1);
}
ring_buffer_unlock_commit(buffer, event);
written = cnt;
*fpos += written;
out_unlock:
if (nr_pages == 2)
kunmap_atomic(page2);
kunmap_atomic(page1);
while (nr_pages > 0)
put_page(pages[--nr_pages]);
out:
return written;
}
static int tracing_clock_show(struct seq_file *m, void *v)
{
int i;
for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
seq_printf(m,
"%s%s%s%s", i ? " " : "",
i == trace_clock_id ? "[" : "", trace_clocks[i].name,
i == trace_clock_id ? "]" : "");
seq_putc(m, '\n');
return 0;
}
static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *fpos)
{
char buf[64];
const char *clockstr;
int i;
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
clockstr = strstrip(buf);
for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
if (strcmp(trace_clocks[i].name, clockstr) == 0)
break;
}
if (i == ARRAY_SIZE(trace_clocks))
return -EINVAL;
trace_clock_id = i;
mutex_lock(&trace_types_lock);
ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
if (max_tr.buffer)
ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
mutex_unlock(&trace_types_lock);
*fpos += cnt;
return cnt;
}
static int tracing_clock_open(struct inode *inode, struct file *file)
{
if (tracing_disabled)
return -ENODEV;
return single_open(file, tracing_clock_show, NULL);
}
static const struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic,
.read = tracing_max_lat_read,
.write = tracing_max_lat_write,
.llseek = generic_file_llseek,
};
static const struct file_operations tracing_ctrl_fops = {
.open = tracing_open_generic,
.read = tracing_ctrl_read,
.write = tracing_ctrl_write,
.llseek = generic_file_llseek,
};
static const struct file_operations set_tracer_fops = {
.open = tracing_open_generic,
.read = tracing_set_trace_read,
.write = tracing_set_trace_write,
.llseek = generic_file_llseek,
};
static const struct file_operations tracing_pipe_fops = {
.open = tracing_open_pipe,
.poll = tracing_poll_pipe,
.read = tracing_read_pipe,
.splice_read = tracing_splice_read_pipe,
.release = tracing_release_pipe,
.llseek = no_llseek,
};
static const struct file_operations tracing_entries_fops = {
.open = tracing_open_generic,
.read = tracing_entries_read,
.write = tracing_entries_write,
.llseek = generic_file_llseek,
};
static const struct file_operations tracing_total_entries_fops = {
.open = tracing_open_generic,
.read = tracing_total_entries_read,
.llseek = generic_file_llseek,
};
static const struct file_operations tracing_free_buffer_fops = {
.write = tracing_free_buffer_write,
.release = tracing_free_buffer_release,
};
static const struct file_operations tracing_mark_fops = {
.open = tracing_open_generic,
.write = tracing_mark_write,
.llseek = generic_file_llseek,
};
static const struct file_operations trace_clock_fops = {
.open = tracing_clock_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = tracing_clock_write,
};
struct ftrace_buffer_info {
struct trace_array *tr;
void *spare;
int cpu;
unsigned int read;
};
static int tracing_buffers_open(struct inode *inode, struct file *filp)
{
int cpu = (int)(long)inode->i_private;
struct ftrace_buffer_info *info;
if (tracing_disabled)
return -ENODEV;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->tr = &global_trace;
info->cpu = cpu;
info->spare = NULL;
/* Force reading ring buffer for first read */
info->read = (unsigned int)-1;
filp->private_data = info;
return nonseekable_open(inode, filp);
}
static ssize_t
tracing_buffers_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ftrace_buffer_info *info = filp->private_data;
ssize_t ret;
size_t size;
if (!count)
return 0;
if (!info->spare)
info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
if (!info->spare)
return -ENOMEM;
/* Do we have previous read data to read? */
if (info->read < PAGE_SIZE)
goto read;
trace_access_lock(info->cpu);
ret = ring_buffer_read_page(info->tr->buffer,
&info->spare,
count,
info->cpu, 0);
trace_access_unlock(info->cpu);
if (ret < 0)
return 0;
info->read = 0;
read:
size = PAGE_SIZE - info->read;
if (size > count)
size = count;
ret = copy_to_user(ubuf, info->spare + info->read, size);
if (ret == size)
return -EFAULT;
size -= ret;
*ppos += size;
info->read += size;
return size;
}
static int tracing_buffers_release(struct inode *inode, struct file *file)
{
struct ftrace_buffer_info *info = file->private_data;
if (info->spare)
ring_buffer_free_read_page(info->tr->buffer, info->spare);
kfree(info);
return 0;
}
struct buffer_ref {
struct ring_buffer *buffer;
void *page;
int ref;
};
static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
if (--ref->ref)
return;
ring_buffer_free_read_page(ref->buffer, ref->page);
kfree(ref);
buf->private = 0;
}
static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
return 1;
}
static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
ref->ref++;
}
/* Pipe buffer operations for a buffer. */
static const struct pipe_buf_operations buffer_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = buffer_pipe_buf_release,
.steal = buffer_pipe_buf_steal,
.get = buffer_pipe_buf_get,
};
/*
* Callback from splice_to_pipe(), if we need to release some pages
* at the end of the spd in case we error'ed out in filling the pipe.
*/
static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
{
struct buffer_ref *ref =
(struct buffer_ref *)spd->partial[i].private;
if (--ref->ref)
return;
ring_buffer_free_read_page(ref->buffer, ref->page);
kfree(ref);
spd->partial[i].private = 0;
}
static ssize_t
tracing_buffers_splice_read(struct file *file, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
struct ftrace_buffer_info *info = file->private_data;
struct partial_page partial_def[PIPE_DEF_BUFFERS];
struct page *pages_def[PIPE_DEF_BUFFERS];
struct splice_pipe_desc spd = {
.pages = pages_def,
.partial = partial_def,
.nr_pages_max = PIPE_DEF_BUFFERS,
.flags = flags,
.ops = &buffer_pipe_buf_ops,
.spd_release = buffer_spd_release,
};
struct buffer_ref *ref;
int entries, size, i;
size_t ret;
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
if (*ppos & (PAGE_SIZE - 1)) {
WARN_ONCE(1, "Ftrace: previous read must page-align\n");
ret = -EINVAL;
goto out;
}
if (len & (PAGE_SIZE - 1)) {
WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
if (len < PAGE_SIZE) {
ret = -EINVAL;
goto out;
}
len &= PAGE_MASK;
}
trace_access_lock(info->cpu);
entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
struct page *page;
int r;
ref = kzalloc(sizeof(*ref), GFP_KERNEL);
if (!ref)
break;
ref->ref = 1;
ref->buffer = info->tr->buffer;
ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
if (!ref->page) {
kfree(ref);
break;
}
r = ring_buffer_read_page(ref->buffer, &ref->page,
len, info->cpu, 1);
if (r < 0) {
ring_buffer_free_read_page(ref->buffer, ref->page);
kfree(ref);
break;
}
/*
* zero out any left over data, this is going to
* user land.
*/
size = ring_buffer_page_len(ref->page);
if (size < PAGE_SIZE)
memset(ref->page + size, 0, PAGE_SIZE - size);
page = virt_to_page(ref->page);
spd.pages[i] = page;
spd.partial[i].len = PAGE_SIZE;
spd.partial[i].offset = 0;
spd.partial[i].private = (unsigned long)ref;
spd.nr_pages++;
*ppos += PAGE_SIZE;
entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
}
trace_access_unlock(info->cpu);
spd.nr_pages = i;
/* did we read anything? */
if (!spd.nr_pages) {
if (flags & SPLICE_F_NONBLOCK)
ret = -EAGAIN;
else
ret = 0;
/* TODO: block */
goto out;
}
ret = splice_to_pipe(pipe, &spd);
splice_shrink_spd(&spd);
out:
return ret;
}
static const struct file_operations tracing_buffers_fops = {
.open = tracing_buffers_open,
.read = tracing_buffers_read,
.release = tracing_buffers_release,
.splice_read = tracing_buffers_splice_read,
.llseek = no_llseek,
};
static ssize_t
tracing_stats_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
unsigned long cpu = (unsigned long)filp->private_data;
struct trace_array *tr = &global_trace;
struct trace_seq *s;
unsigned long cnt;
unsigned long long t;
unsigned long usec_rem;
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
trace_seq_init(s);
cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
trace_seq_printf(s, "entries: %ld\n", cnt);
cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
trace_seq_printf(s, "overrun: %ld\n", cnt);
cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
trace_seq_printf(s, "commit overrun: %ld\n", cnt);
cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
trace_seq_printf(s, "bytes: %ld\n", cnt);
t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
kfree(s);
return count;
}
static const struct file_operations tracing_stats_fops = {
.open = tracing_open_generic,
.read = tracing_stats_read,
.llseek = generic_file_llseek,
};
#ifdef CONFIG_DYNAMIC_FTRACE
int __weak ftrace_arch_read_dyn_info(char *buf, int size)
{
return 0;
}
static ssize_t
tracing_read_dyn_info(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
static char ftrace_dyn_info_buffer[1024];
static DEFINE_MUTEX(dyn_info_mutex);
unsigned long *p = filp->private_data;
char *buf = ftrace_dyn_info_buffer;
int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
int r;
mutex_lock(&dyn_info_mutex);
r = sprintf(buf, "%ld ", *p);
r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
buf[r++] = '\n';
r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
mutex_unlock(&dyn_info_mutex);
return r;
}
static const struct file_operations tracing_dyn_info_fops = {
.open = tracing_open_generic,
.read = tracing_read_dyn_info,
.llseek = generic_file_llseek,
};
#endif
static struct dentry *d_tracer;
struct dentry *tracing_init_dentry(void)
{
static int once;
if (d_tracer)
return d_tracer;
if (!debugfs_initialized())
return NULL;
d_tracer = debugfs_create_dir("tracing", NULL);
if (!d_tracer && !once) {
once = 1;
pr_warning("Could not create debugfs directory 'tracing'\n");
return NULL;
}
return d_tracer;
}
static struct dentry *d_percpu;
struct dentry *tracing_dentry_percpu(void)
{
static int once;
struct dentry *d_tracer;
if (d_percpu)
return d_percpu;
d_tracer = tracing_init_dentry();
if (!d_tracer)
return NULL;
d_percpu = debugfs_create_dir("per_cpu", d_tracer);
if (!d_percpu && !once) {
once = 1;
pr_warning("Could not create debugfs directory 'per_cpu'\n");
return NULL;
}
return d_percpu;
}
static void tracing_init_debugfs_percpu(long cpu)
{
struct dentry *d_percpu = tracing_dentry_percpu();
struct dentry *d_cpu;
char cpu_dir[30]; /* 30 characters should be more than enough */
snprintf(cpu_dir, 30, "cpu%ld", cpu);
d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
if (!d_cpu) {
pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
return;
}
/* per cpu trace_pipe */
trace_create_file("trace_pipe", 0444, d_cpu,
(void *) cpu, &tracing_pipe_fops);
/* per cpu trace */
trace_create_file("trace", 0644, d_cpu,
(void *) cpu, &tracing_fops);
trace_create_file("trace_pipe_raw", 0444, d_cpu,
(void *) cpu, &tracing_buffers_fops);
trace_create_file("stats", 0444, d_cpu,
(void *) cpu, &tracing_stats_fops);
}
#ifdef CONFIG_FTRACE_SELFTEST
/* Let selftest have access to static functions in this file */
#include "trace_selftest.c"
#endif
struct trace_option_dentry {
struct tracer_opt *opt;
struct tracer_flags *flags;
struct dentry *entry;
};
static ssize_t
trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
struct trace_option_dentry *topt = filp->private_data;
char *buf;
if (topt->flags->val & topt->opt->bit)
buf = "1\n";
else
buf = "0\n";
return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
}
static ssize_t
trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
struct trace_option_dentry *topt = filp->private_data;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
if (val != 0 && val != 1)
return -EINVAL;
if (!!(topt->flags->val & topt->opt->bit) != val) {
mutex_lock(&trace_types_lock);
ret = __set_tracer_option(current_trace, topt->flags,
topt->opt, !val);
mutex_unlock(&trace_types_lock);
if (ret)
return ret;
}
*ppos += cnt;
return cnt;
}
static const struct file_operations trace_options_fops = {
.open = tracing_open_generic,
.read = trace_options_read,
.write = trace_options_write,
.llseek = generic_file_llseek,
};
static ssize_t
trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
long index = (long)filp->private_data;
char *buf;
if (trace_flags & (1 << index))
buf = "1\n";
else
buf = "0\n";
return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
}
static ssize_t
trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
long index = (long)filp->private_data;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
if (val != 0 && val != 1)
return -EINVAL;
set_tracer_flags(1 << index, val);
*ppos += cnt;
return cnt;
}
static const struct file_operations trace_options_core_fops = {
.open = tracing_open_generic,
.read = trace_options_core_read,
.write = trace_options_core_write,
.llseek = generic_file_llseek,
};
struct dentry *trace_create_file(const char *name,
umode_t mode,
struct dentry *parent,
void *data,
const struct file_operations *fops)
{
struct dentry *ret;
ret = debugfs_create_file(name, mode, parent, data, fops);
if (!ret)
pr_warning("Could not create debugfs '%s' entry\n", name);
return ret;
}
static struct dentry *trace_options_init_dentry(void)
{
struct dentry *d_tracer;
static struct dentry *t_options;
if (t_options)
return t_options;
d_tracer = tracing_init_dentry();
if (!d_tracer)
return NULL;
t_options = debugfs_create_dir("options", d_tracer);
if (!t_options) {
pr_warning("Could not create debugfs directory 'options'\n");
return NULL;
}
return t_options;
}
static void
create_trace_option_file(struct trace_option_dentry *topt,
struct tracer_flags *flags,
struct tracer_opt *opt)
{
struct dentry *t_options;
t_options = trace_options_init_dentry();
if (!t_options)
return;
topt->flags = flags;
topt->opt = opt;
topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
&trace_options_fops);
}
static struct trace_option_dentry *
create_trace_option_files(struct tracer *tracer)
{
struct trace_option_dentry *topts;
struct tracer_flags *flags;
struct tracer_opt *opts;
int cnt;
if (!tracer)
return NULL;
flags = tracer->flags;
if (!flags || !flags->opts)
return NULL;
opts = flags->opts;
for (cnt = 0; opts[cnt].name; cnt++)
;
topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
if (!topts)
return NULL;
for (cnt = 0; opts[cnt].name; cnt++)
create_trace_option_file(&topts[cnt], flags,
&opts[cnt]);
return topts;
}
static void
destroy_trace_option_files(struct trace_option_dentry *topts)
{
int cnt;
if (!topts)
return;
for (cnt = 0; topts[cnt].opt; cnt++) {
if (topts[cnt].entry)
debugfs_remove(topts[cnt].entry);
}
kfree(topts);
}
static struct dentry *
create_trace_option_core_file(const char *option, long index)
{
struct dentry *t_options;
t_options = trace_options_init_dentry();
if (!t_options)
return NULL;
return trace_create_file(option, 0644, t_options, (void *)index,
&trace_options_core_fops);
}
static __init void create_trace_options_dir(void)
{
struct dentry *t_options;
int i;
t_options = trace_options_init_dentry();
if (!t_options)
return;
for (i = 0; trace_options[i]; i++)
create_trace_option_core_file(trace_options[i], i);
}
static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
struct ring_buffer *buffer = tr->buffer;
char buf[64];
int r;
if (buffer)
r = ring_buffer_record_is_on(buffer);
else
r = 0;
r = sprintf(buf, "%d\n", r);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
struct ring_buffer *buffer = tr->buffer;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
if (buffer) {
if (val)
ring_buffer_record_on(buffer);
else
ring_buffer_record_off(buffer);
}
(*ppos)++;
return cnt;
}
static const struct file_operations rb_simple_fops = {
.open = tracing_open_generic,
.read = rb_simple_read,
.write = rb_simple_write,
.llseek = default_llseek,
};
static __init int tracer_init_debugfs(void)
{
struct dentry *d_tracer;
int cpu;
trace_access_lock_init();
d_tracer = tracing_init_dentry();
trace_create_file("tracing_enabled", 0644, d_tracer,
&global_trace, &tracing_ctrl_fops);
trace_create_file("trace_options", 0644, d_tracer,
NULL, &tracing_iter_fops);
trace_create_file("tracing_cpumask", 0644, d_tracer,
NULL, &tracing_cpumask_fops);
trace_create_file("trace", 0644, d_tracer,
(void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
trace_create_file("available_tracers", 0444, d_tracer,
&global_trace, &show_traces_fops);
trace_create_file("current_tracer", 0644, d_tracer,
&global_trace, &set_tracer_fops);
#ifdef CONFIG_TRACER_MAX_TRACE
trace_create_file("tracing_max_latency", 0644, d_tracer,
&tracing_max_latency, &tracing_max_lat_fops);
#endif
trace_create_file("tracing_thresh", 0644, d_tracer,
&tracing_thresh, &tracing_max_lat_fops);
trace_create_file("README", 0444, d_tracer,
NULL, &tracing_readme_fops);
trace_create_file("trace_pipe", 0444, d_tracer,
(void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
trace_create_file("buffer_size_kb", 0644, d_tracer,
&global_trace, &tracing_entries_fops);
trace_create_file("buffer_total_size_kb", 0444, d_tracer,
&global_trace, &tracing_total_entries_fops);
trace_create_file("free_buffer", 0644, d_tracer,
&global_trace, &tracing_free_buffer_fops);
trace_create_file("trace_marker", 0220, d_tracer,
NULL, &tracing_mark_fops);
trace_create_file("saved_cmdlines", 0444, d_tracer,
NULL, &tracing_saved_cmdlines_fops);
trace_create_file("trace_clock", 0644, d_tracer, NULL,
&trace_clock_fops);
trace_create_file("tracing_on", 0644, d_tracer,
&global_trace, &rb_simple_fops);
#ifdef CONFIG_DYNAMIC_FTRACE
trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
#endif
create_trace_options_dir();
for_each_tracing_cpu(cpu)
tracing_init_debugfs_percpu(cpu);
return 0;
}
static int trace_panic_handler(struct notifier_block *this,
unsigned long event, void *unused)
{
if (ftrace_dump_on_oops)
ftrace_dump(ftrace_dump_on_oops);
return NOTIFY_OK;
}
static struct notifier_block trace_panic_notifier = {
.notifier_call = trace_panic_handler,
.next = NULL,
.priority = 150 /* priority: INT_MAX >= x >= 0 */
};
static int trace_die_handler(struct notifier_block *self,
unsigned long val,
void *data)
{
switch (val) {
case DIE_OOPS:
if (ftrace_dump_on_oops)
ftrace_dump(ftrace_dump_on_oops);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block trace_die_notifier = {
.notifier_call = trace_die_handler,
.priority = 200
};
/*
* printk is set to max of 1024, we really don't need it that big.
* Nothing should be printing 1000 characters anyway.
*/
#define TRACE_MAX_PRINT 1000
/*
* Define here KERN_TRACE so that we have one place to modify
* it if we decide to change what log level the ftrace dump
* should be at.
*/
#define KERN_TRACE KERN_EMERG
void
trace_printk_seq(struct trace_seq *s)
{
/* Probably should print a warning here. */
if (s->len >= 1000)
s->len = 1000;
/* should be zero ended, but we are paranoid. */
s->buffer[s->len] = 0;
printk(KERN_TRACE "%s", s->buffer);
trace_seq_init(s);
}
void trace_init_global_iter(struct trace_iterator *iter)
{
iter->tr = &global_trace;
iter->trace = current_trace;
iter->cpu_file = TRACE_PIPE_ALL_CPU;
}
static void
__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
{
static arch_spinlock_t ftrace_dump_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
unsigned int old_userobj;
static int dump_ran;
unsigned long flags;
int cnt = 0, cpu;
/* only one dump */
local_irq_save(flags);
arch_spin_lock(&ftrace_dump_lock);
if (dump_ran)
goto out;
dump_ran = 1;
tracing_off();
/* Did function tracer already get disabled? */
if (ftrace_is_dead()) {
printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
printk("# MAY BE MISSING FUNCTION EVENTS\n");
}
if (disable_tracing)
ftrace_kill();
trace_init_global_iter(&iter);
for_each_tracing_cpu(cpu) {
atomic_inc(&iter.tr->data[cpu]->disabled);
}
old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
/* don't look at user memory in panic mode */
trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
/* Simulate the iterator */
iter.tr = &global_trace;
iter.trace = current_trace;
switch (oops_dump_mode) {
case DUMP_ALL:
iter.cpu_file = TRACE_PIPE_ALL_CPU;
break;
case DUMP_ORIG:
iter.cpu_file = raw_smp_processor_id();
break;
case DUMP_NONE:
goto out_enable;
default:
printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
iter.cpu_file = TRACE_PIPE_ALL_CPU;
}
printk(KERN_TRACE "Dumping ftrace buffer:\n");
/*
* We need to stop all tracing on all CPUS to read the
* the next buffer. This is a bit expensive, but is
* not done often. We fill all what we can read,
* and then release the locks again.
*/
while (!trace_empty(&iter)) {
if (!cnt)
printk(KERN_TRACE "---------------------------------\n");
cnt++;
/* reset all but tr, trace, and overruns */
memset(&iter.seq, 0,
sizeof(struct trace_iterator) -
offsetof(struct trace_iterator, seq));
iter.iter_flags |= TRACE_FILE_LAT_FMT;
iter.pos = -1;
if (trace_find_next_entry_inc(&iter) != NULL) {
int ret;
ret = print_trace_line(&iter);
if (ret != TRACE_TYPE_NO_CONSUME)
trace_consume(&iter);
}
touch_nmi_watchdog();
trace_printk_seq(&iter.seq);
}
if (!cnt)
printk(KERN_TRACE " (ftrace buffer empty)\n");
else
printk(KERN_TRACE "---------------------------------\n");
out_enable:
/* Re-enable tracing if requested */
if (!disable_tracing) {
trace_flags |= old_userobj;
for_each_tracing_cpu(cpu) {
atomic_dec(&iter.tr->data[cpu]->disabled);
}
tracing_on();
}
out:
arch_spin_unlock(&ftrace_dump_lock);
local_irq_restore(flags);
}
/* By default: disable tracing after the dump */
void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
{
__ftrace_dump(true, oops_dump_mode);
}
EXPORT_SYMBOL_GPL(ftrace_dump);
__init static int tracer_alloc_buffers(void)
{
int ring_buf_size;
enum ring_buffer_flags rb_flags;
int i;
int ret = -ENOMEM;
if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
goto out;
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask;
/* To save memory, keep the ring buffer size to its minimum */
if (ring_buffer_expanded)
ring_buf_size = trace_buf_size;
else
ring_buf_size = 1;
rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
cpumask_copy(tracing_cpumask, cpu_all_mask);
/* TODO: make the number of buffers hot pluggable with CPUS */
global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
if (!global_trace.buffer) {
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
WARN_ON(1);
goto out_free_cpumask;
}
global_trace.entries = ring_buffer_size(global_trace.buffer);
if (global_trace.buffer_disabled)
tracing_off();
#ifdef CONFIG_TRACER_MAX_TRACE
max_tr.buffer = ring_buffer_alloc(1, rb_flags);
if (!max_tr.buffer) {
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
WARN_ON(1);
ring_buffer_free(global_trace.buffer);
goto out_free_cpumask;
}
max_tr.entries = 1;
#endif
/* Allocate the first page for all buffers */
for_each_tracing_cpu(i) {
global_trace.data[i] = &per_cpu(global_trace_cpu, i);
max_tr.data[i] = &per_cpu(max_tr_data, i);
}
trace_init_cmdlines();
register_tracer(&nop_trace);
current_trace = &nop_trace;
/* All seems OK, enable tracing */
tracing_disabled = 0;
atomic_notifier_chain_register(&panic_notifier_list,
&trace_panic_notifier);
register_die_notifier(&trace_die_notifier);
return 0;
out_free_cpumask:
free_cpumask_var(tracing_cpumask);
out_free_buffer_mask:
free_cpumask_var(tracing_buffer_mask);
out:
return ret;
}
__init static int clear_boot_tracer(void)
{
/*
* The default tracer at boot buffer is an init section.
* This function is called in lateinit. If we did not
* find the boot tracer, then clear it out, to prevent
* later registration from accessing the buffer that is
* about to be freed.
*/
if (!default_bootup_tracer)
return 0;
printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
default_bootup_tracer);
default_bootup_tracer = NULL;
return 0;
}
early_initcall(tracer_alloc_buffers);
fs_initcall(tracer_init_debugfs);
late_initcall(clear_boot_tracer);
| gpl-2.0 |
emceethemouth/kernel_mainline | drivers/acpi/acpica/psloop.c | 290 | 17470 | /******************************************************************************
*
* Module Name: psloop - Main AML parse loop
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
/*
* Parse the AML and build an operation tree as most interpreters, (such as
* Perl) do. Parsing is done by hand rather than with a YACC generated parser
* to tightly constrain stack and dynamic memory usage. Parsing is kept
* flexible and the code fairly compact by parsing based on a list of AML
* opcode templates in aml_op_info[].
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acinterp.h"
#include "acparser.h"
#include "acdispat.h"
#include "amlcode.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psloop")
/* Local prototypes */
static acpi_status
acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
u8 * aml_op_start, union acpi_parse_object *op);
static void
acpi_ps_link_module_code(union acpi_parse_object *parent_op,
u8 *aml_start, u32 aml_length, acpi_owner_id owner_id);
/*******************************************************************************
*
* FUNCTION: acpi_ps_get_arguments
*
* PARAMETERS: walk_state - Current state
* aml_op_start - Op start in AML
* op - Current Op
*
* RETURN: Status
*
* DESCRIPTION: Get arguments for passed Op.
*
******************************************************************************/
static acpi_status
acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
u8 * aml_op_start, union acpi_parse_object *op)
{
acpi_status status = AE_OK;
union acpi_parse_object *arg = NULL;
const struct acpi_opcode_info *op_info;
ACPI_FUNCTION_TRACE_PTR(ps_get_arguments, walk_state);
switch (op->common.aml_opcode) {
case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
case AML_WORD_OP: /* AML_WORDDATA_ARG */
case AML_DWORD_OP: /* AML_DWORDATA_ARG */
case AML_QWORD_OP: /* AML_QWORDATA_ARG */
case AML_STRING_OP: /* AML_ASCIICHARLIST_ARG */
/* Fill in constant or string argument directly */
acpi_ps_get_next_simple_arg(&(walk_state->parser_state),
GET_CURRENT_ARG_TYPE(walk_state->
arg_types),
op);
break;
case AML_INT_NAMEPATH_OP: /* AML_NAMESTRING_ARG */
status = acpi_ps_get_next_namepath(walk_state,
&(walk_state->parser_state),
op,
ACPI_POSSIBLE_METHOD_CALL);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
walk_state->arg_types = 0;
break;
default:
/*
* Op is not a constant or string, append each argument to the Op
*/
while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) &&
!walk_state->arg_count) {
walk_state->aml = walk_state->parser_state.aml;
status =
acpi_ps_get_next_arg(walk_state,
&(walk_state->parser_state),
GET_CURRENT_ARG_TYPE
(walk_state->arg_types), &arg);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
if (arg) {
acpi_ps_append_arg(op, arg);
}
INCREMENT_ARG_LIST(walk_state->arg_types);
}
/*
* Handle executable code at "module-level". This refers to
* executable opcodes that appear outside of any control method.
*/
if ((walk_state->pass_number <= ACPI_IMODE_LOAD_PASS2) &&
((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) == 0)) {
/*
* We want to skip If/Else/While constructs during Pass1 because we
* want to actually conditionally execute the code during Pass2.
*
* Except for disassembly, where we always want to walk the
* If/Else/While packages
*/
switch (op->common.aml_opcode) {
case AML_IF_OP:
case AML_ELSE_OP:
case AML_WHILE_OP:
/*
* Currently supported module-level opcodes are:
* IF/ELSE/WHILE. These appear to be the most common,
* and easiest to support since they open an AML
* package.
*/
if (walk_state->pass_number ==
ACPI_IMODE_LOAD_PASS1) {
acpi_ps_link_module_code(op->common.
parent,
aml_op_start,
(u32)
(walk_state->
parser_state.
pkg_end -
aml_op_start),
walk_state->
owner_id);
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"Pass1: Skipping an If/Else/While body\n"));
/* Skip body of if/else/while in pass 1 */
walk_state->parser_state.aml =
walk_state->parser_state.pkg_end;
walk_state->arg_count = 0;
break;
default:
/*
* Check for an unsupported executable opcode at module
* level. We must be in PASS1, the parent must be a SCOPE,
* The opcode class must be EXECUTE, and the opcode must
* not be an argument to another opcode.
*/
if ((walk_state->pass_number ==
ACPI_IMODE_LOAD_PASS1)
&& (op->common.parent->common.aml_opcode ==
AML_SCOPE_OP)) {
op_info =
acpi_ps_get_opcode_info(op->common.
aml_opcode);
if ((op_info->class ==
AML_CLASS_EXECUTE) && (!arg)) {
ACPI_WARNING((AE_INFO,
"Unsupported module-level executable opcode "
"0x%.2X at table offset 0x%.4X",
op->common.
aml_opcode,
(u32)
(ACPI_PTR_DIFF
(aml_op_start,
walk_state->
parser_state.
aml_start) +
sizeof(struct
acpi_table_header))));
}
}
break;
}
}
/* Special processing for certain opcodes */
switch (op->common.aml_opcode) {
case AML_METHOD_OP:
/*
* Skip parsing of control method because we don't have enough
* info in the first pass to parse it correctly.
*
* Save the length and address of the body
*/
op->named.data = walk_state->parser_state.aml;
op->named.length = (u32)
(walk_state->parser_state.pkg_end -
walk_state->parser_state.aml);
/* Skip body of method */
walk_state->parser_state.aml =
walk_state->parser_state.pkg_end;
walk_state->arg_count = 0;
break;
case AML_BUFFER_OP:
case AML_PACKAGE_OP:
case AML_VAR_PACKAGE_OP:
if ((op->common.parent) &&
(op->common.parent->common.aml_opcode ==
AML_NAME_OP)
&& (walk_state->pass_number <=
ACPI_IMODE_LOAD_PASS2)) {
/*
* Skip parsing of Buffers and Packages because we don't have
* enough info in the first pass to parse them correctly.
*/
op->named.data = aml_op_start;
op->named.length = (u32)
(walk_state->parser_state.pkg_end -
aml_op_start);
/* Skip body */
walk_state->parser_state.aml =
walk_state->parser_state.pkg_end;
walk_state->arg_count = 0;
}
break;
case AML_WHILE_OP:
if (walk_state->control_state) {
walk_state->control_state->control.package_end =
walk_state->parser_state.pkg_end;
}
break;
default:
/* No action for all other opcodes */
break;
}
break;
}
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_link_module_code
*
* PARAMETERS: parent_op - Parent parser op
* aml_start - Pointer to the AML
* aml_length - Length of executable AML
* owner_id - owner_id of module level code
*
* RETURN: None.
*
* DESCRIPTION: Wrap the module-level code with a method object and link the
* object to the global list. Note, the mutex field of the method
* object is used to link multiple module-level code objects.
*
******************************************************************************/
static void
acpi_ps_link_module_code(union acpi_parse_object *parent_op,
u8 *aml_start, u32 aml_length, acpi_owner_id owner_id)
{
union acpi_operand_object *prev;
union acpi_operand_object *next;
union acpi_operand_object *method_obj;
struct acpi_namespace_node *parent_node;
ACPI_FUNCTION_TRACE(ps_link_module_code);
/* Get the tail of the list */
prev = next = acpi_gbl_module_code_list;
while (next) {
prev = next;
next = next->method.mutex;
}
/*
* Insert the module level code into the list. Merge it if it is
* adjacent to the previous element.
*/
if (!prev ||
((prev->method.aml_start + prev->method.aml_length) != aml_start)) {
/* Create, initialize, and link a new temporary method object */
method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
if (!method_obj) {
return_VOID;
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"Create/Link new code block: %p\n",
method_obj));
if (parent_op->common.node) {
parent_node = parent_op->common.node;
} else {
parent_node = acpi_gbl_root_node;
}
method_obj->method.aml_start = aml_start;
method_obj->method.aml_length = aml_length;
method_obj->method.owner_id = owner_id;
method_obj->method.info_flags |= ACPI_METHOD_MODULE_LEVEL;
/*
* Save the parent node in next_object. This is cheating, but we
* don't want to expand the method object.
*/
method_obj->method.next_object =
ACPI_CAST_PTR(union acpi_operand_object, parent_node);
if (!prev) {
acpi_gbl_module_code_list = method_obj;
} else {
prev->method.mutex = method_obj;
}
} else {
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"Appending to existing code block: %p\n",
prev));
prev->method.aml_length += aml_length;
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_parse_loop
*
* PARAMETERS: walk_state - Current state
*
* RETURN: Status
*
* DESCRIPTION: Parse AML (pointed to by the current parser state) and return
* a tree of ops.
*
******************************************************************************/
acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
union acpi_parse_object *op = NULL; /* current op */
struct acpi_parse_state *parser_state;
u8 *aml_op_start = NULL;
ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state);
if (walk_state->descending_callback == NULL) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
parser_state = &walk_state->parser_state;
walk_state->arg_types = 0;
#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) {
/* We are restarting a preempted control method */
if (acpi_ps_has_completed_scope(parser_state)) {
/*
* We must check if a predicate to an IF or WHILE statement
* was just completed
*/
if ((parser_state->scope->parse_scope.op) &&
((parser_state->scope->parse_scope.op->common.
aml_opcode == AML_IF_OP)
|| (parser_state->scope->parse_scope.op->common.
aml_opcode == AML_WHILE_OP))
&& (walk_state->control_state)
&& (walk_state->control_state->common.state ==
ACPI_CONTROL_PREDICATE_EXECUTING)) {
/*
* A predicate was just completed, get the value of the
* predicate and branch based on that value
*/
walk_state->op = NULL;
status =
acpi_ds_get_predicate_value(walk_state,
ACPI_TO_POINTER
(TRUE));
if (ACPI_FAILURE(status)
&& ((status & AE_CODE_MASK) !=
AE_CODE_CONTROL)) {
if (status == AE_AML_NO_RETURN_VALUE) {
ACPI_EXCEPTION((AE_INFO, status,
"Invoked method did not return a value"));
}
ACPI_EXCEPTION((AE_INFO, status,
"GetPredicate Failed"));
return_ACPI_STATUS(status);
}
status =
acpi_ps_next_parse_state(walk_state, op,
status);
}
acpi_ps_pop_scope(parser_state, &op,
&walk_state->arg_types,
&walk_state->arg_count);
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"Popped scope, Op=%p\n", op));
} else if (walk_state->prev_op) {
/* We were in the middle of an op */
op = walk_state->prev_op;
walk_state->arg_types = walk_state->prev_arg_types;
}
}
#endif
/* Iterative parsing loop, while there is more AML to process: */
while ((parser_state->aml < parser_state->aml_end) || (op)) {
aml_op_start = parser_state->aml;
if (!op) {
status =
acpi_ps_create_op(walk_state, aml_op_start, &op);
if (ACPI_FAILURE(status)) {
if (status == AE_CTRL_PARSE_CONTINUE) {
continue;
}
if (status == AE_CTRL_PARSE_PENDING) {
status = AE_OK;
}
if (status == AE_CTRL_TERMINATE) {
return_ACPI_STATUS(status);
}
status =
acpi_ps_complete_op(walk_state, &op,
status);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
continue;
}
acpi_ex_start_trace_opcode(op, walk_state);
}
/*
* Start arg_count at zero because we don't know if there are
* any args yet
*/
walk_state->arg_count = 0;
/* Are there any arguments that must be processed? */
if (walk_state->arg_types) {
/* Get arguments */
status =
acpi_ps_get_arguments(walk_state, aml_op_start, op);
if (ACPI_FAILURE(status)) {
status =
acpi_ps_complete_op(walk_state, &op,
status);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
continue;
}
}
/* Check for arguments that need to be processed */
if (walk_state->arg_count) {
/*
* There are arguments (complex ones), push Op and
* prepare for argument
*/
status = acpi_ps_push_scope(parser_state, op,
walk_state->arg_types,
walk_state->arg_count);
if (ACPI_FAILURE(status)) {
status =
acpi_ps_complete_op(walk_state, &op,
status);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
continue;
}
op = NULL;
continue;
}
/*
* All arguments have been processed -- Op is complete,
* prepare for next
*/
walk_state->op_info =
acpi_ps_get_opcode_info(op->common.aml_opcode);
if (walk_state->op_info->flags & AML_NAMED) {
if (op->common.aml_opcode == AML_REGION_OP ||
op->common.aml_opcode == AML_DATA_REGION_OP) {
/*
* Skip parsing of control method or opregion body,
* because we don't have enough info in the first pass
* to parse them correctly.
*
* Completed parsing an op_region declaration, we now
* know the length.
*/
op->named.length =
(u32) (parser_state->aml - op->named.data);
}
}
if (walk_state->op_info->flags & AML_CREATE) {
/*
* Backup to beginning of create_XXXfield declaration (1 for
* Opcode)
*
* body_length is unknown until we parse the body
*/
op->named.length =
(u32) (parser_state->aml - op->named.data);
}
if (op->common.aml_opcode == AML_BANK_FIELD_OP) {
/*
* Backup to beginning of bank_field declaration
*
* body_length is unknown until we parse the body
*/
op->named.length =
(u32) (parser_state->aml - op->named.data);
}
/* This op complete, notify the dispatcher */
if (walk_state->ascending_callback != NULL) {
walk_state->op = op;
walk_state->opcode = op->common.aml_opcode;
status = walk_state->ascending_callback(walk_state);
status =
acpi_ps_next_parse_state(walk_state, op, status);
if (status == AE_CTRL_PENDING) {
status = AE_OK;
}
}
status = acpi_ps_complete_op(walk_state, &op, status);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
} /* while parser_state->Aml */
status = acpi_ps_complete_final_op(walk_state, op, status);
return_ACPI_STATUS(status);
}
| gpl-2.0 |
kenrestivo/linux | arch/arm/mach-omap2/board-devkit8000.c | 546 | 19444 | /*
* board-devkit8000.c - TimLL Devkit8000
*
* Copyright (C) 2009 Kim Botherway
* Copyright (C) 2010 Thomas Weber
*
* Modified from mach-omap2/board-omap3beagle.c
*
* Initial code: Syed Mohammed Khasim
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/leds.h>
#include <linux/gpio.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand.h>
#include <linux/mmc/host.h>
#include <linux/usb/phy.h>
#include <linux/regulator/machine.h>
#include <linux/i2c/twl.h>
#include "id.h"
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/flash.h>
#include "common.h"
#include "gpmc.h"
#include <linux/platform_data/mtd-nand-omap2.h>
#include <video/omapdss.h>
#include <video/omap-panel-data.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
#include <linux/input/matrix_keypad.h>
#include <linux/spi/spi.h>
#include <linux/dm9000.h>
#include <linux/interrupt.h>
#include "sdram-micron-mt46h32m32lf-6.h"
#include "mux.h"
#include "hsmmc.h"
#include "board-flash.h"
#include "common-board-devices.h"
#define NAND_CS 0
#define OMAP_DM9000_GPIO_IRQ 25
#define OMAP3_DEVKIT_TS_GPIO 27
static struct mtd_partition devkit8000_nand_partitions[] = {
/* All the partition sizes are listed in terms of NAND block size */
{
.name = "X-Loader",
.offset = 0,
.size = 4 * NAND_BLOCK_SIZE,
.mask_flags = MTD_WRITEABLE, /* force read-only */
},
{
.name = "U-Boot",
.offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */
.size = 15 * NAND_BLOCK_SIZE,
.mask_flags = MTD_WRITEABLE, /* force read-only */
},
{
.name = "U-Boot Env",
.offset = MTDPART_OFS_APPEND, /* Offset = 0x260000 */
.size = 1 * NAND_BLOCK_SIZE,
},
{
.name = "Kernel",
.offset = MTDPART_OFS_APPEND, /* Offset = 0x280000 */
.size = 32 * NAND_BLOCK_SIZE,
},
{
.name = "File System",
.offset = MTDPART_OFS_APPEND, /* Offset = 0x680000 */
.size = MTDPART_SIZ_FULL,
},
};
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 1,
.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
.gpio_wp = 29,
.deferred = true,
},
{} /* Terminator */
};
static struct regulator_consumer_supply devkit8000_vmmc1_supply[] = {
REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
};
/* ads7846 on SPI */
static struct regulator_consumer_supply devkit8000_vio_supply[] = {
REGULATOR_SUPPLY("vcc", "spi2.0"),
};
static const struct display_timing devkit8000_lcd_videomode = {
.pixelclock = { 0, 40000000, 0 },
.hactive = { 0, 800, 0 },
.hfront_porch = { 0, 1, 0 },
.hback_porch = { 0, 1, 0 },
.hsync_len = { 0, 48, 0 },
.vactive = { 0, 480, 0 },
.vfront_porch = { 0, 12, 0 },
.vback_porch = { 0, 25, 0 },
.vsync_len = { 0, 3, 0 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
static struct panel_dpi_platform_data devkit8000_lcd_pdata = {
.name = "lcd",
.source = "dpi.0",
.data_lines = 24,
.display_timing = &devkit8000_lcd_videomode,
.enable_gpio = -1, /* filled in code */
.backlight_gpio = -1,
};
static struct platform_device devkit8000_lcd_device = {
.name = "panel-dpi",
.id = 0,
.dev.platform_data = &devkit8000_lcd_pdata,
};
static struct connector_dvi_platform_data devkit8000_dvi_connector_pdata = {
.name = "dvi",
.source = "tfp410.0",
.i2c_bus_num = 1,
};
static struct platform_device devkit8000_dvi_connector_device = {
.name = "connector-dvi",
.id = 0,
.dev.platform_data = &devkit8000_dvi_connector_pdata,
};
static struct encoder_tfp410_platform_data devkit8000_tfp410_pdata = {
.name = "tfp410.0",
.source = "dpi.0",
.data_lines = 24,
.power_down_gpio = -1, /* filled in code */
};
static struct platform_device devkit8000_tfp410_device = {
.name = "tfp410",
.id = 0,
.dev.platform_data = &devkit8000_tfp410_pdata,
};
static struct connector_atv_platform_data devkit8000_tv_pdata = {
.name = "tv",
.source = "venc.0",
.connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
.invert_polarity = false,
};
static struct platform_device devkit8000_tv_connector_device = {
.name = "connector-analog-tv",
.id = 0,
.dev.platform_data = &devkit8000_tv_pdata,
};
static struct omap_dss_board_info devkit8000_dss_data = {
.default_display_name = "lcd",
};
static uint32_t board_keymap[] = {
KEY(0, 0, KEY_1),
KEY(1, 0, KEY_2),
KEY(2, 0, KEY_3),
KEY(0, 1, KEY_4),
KEY(1, 1, KEY_5),
KEY(2, 1, KEY_6),
KEY(3, 1, KEY_F5),
KEY(0, 2, KEY_7),
KEY(1, 2, KEY_8),
KEY(2, 2, KEY_9),
KEY(3, 2, KEY_F6),
KEY(0, 3, KEY_F7),
KEY(1, 3, KEY_0),
KEY(2, 3, KEY_F8),
PERSISTENT_KEY(4, 5),
KEY(4, 4, KEY_VOLUMEUP),
KEY(5, 5, KEY_VOLUMEDOWN),
0
};
static struct matrix_keymap_data board_map_data = {
.keymap = board_keymap,
.keymap_size = ARRAY_SIZE(board_keymap),
};
static struct twl4030_keypad_data devkit8000_kp_data = {
.keymap_data = &board_map_data,
.rows = 6,
.cols = 6,
.rep = 1,
};
static struct gpio_led gpio_leds[];
static int devkit8000_twl_gpio_setup(struct device *dev,
unsigned gpio, unsigned ngpio)
{
/* gpio + 0 is "mmc0_cd" (input/IRQ) */
mmc[0].gpio_cd = gpio + 0;
omap_hsmmc_late_init(mmc);
/* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
/* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */
devkit8000_lcd_pdata.enable_gpio = gpio + TWL4030_GPIO_MAX + 0;
/* gpio + 7 is "DVI_PD" (out, active low) */
devkit8000_tfp410_pdata.power_down_gpio = gpio + 7;
return 0;
}
static struct twl4030_gpio_platform_data devkit8000_gpio_data = {
.use_leds = true,
.pulldowns = BIT(1) | BIT(2) | BIT(6) | BIT(8) | BIT(13)
| BIT(15) | BIT(16) | BIT(17),
.setup = devkit8000_twl_gpio_setup,
};
static struct regulator_consumer_supply devkit8000_vpll1_supplies[] = {
REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
REGULATOR_SUPPLY("vdds_dsi", "omapdss_dpi.0"),
REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi.0"),
};
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
static struct regulator_init_data devkit8000_vmmc1 = {
.constraints = {
.min_uV = 1850000,
.max_uV = 3150000,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(devkit8000_vmmc1_supply),
.consumer_supplies = devkit8000_vmmc1_supply,
};
/* VPLL1 for digital video outputs */
static struct regulator_init_data devkit8000_vpll1 = {
.constraints = {
.min_uV = 1800000,
.max_uV = 1800000,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(devkit8000_vpll1_supplies),
.consumer_supplies = devkit8000_vpll1_supplies,
};
/* VAUX4 for ads7846 and nubs */
static struct regulator_init_data devkit8000_vio = {
.constraints = {
.min_uV = 1800000,
.max_uV = 1800000,
.apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(devkit8000_vio_supply),
.consumer_supplies = devkit8000_vio_supply,
};
static struct twl4030_platform_data devkit8000_twldata = {
/* platform_data for children goes here */
.gpio = &devkit8000_gpio_data,
.vmmc1 = &devkit8000_vmmc1,
.vpll1 = &devkit8000_vpll1,
.vio = &devkit8000_vio,
.keypad = &devkit8000_kp_data,
};
static int __init devkit8000_i2c_init(void)
{
omap3_pmic_get_config(&devkit8000_twldata,
TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_AUDIO,
TWL_COMMON_REGULATOR_VDAC);
omap3_pmic_init("tps65930", &devkit8000_twldata);
/* Bus 3 is attached to the DVI port where devices like the pico DLP
* projector don't work reliably with 400kHz */
omap_register_i2c_bus(3, 400, NULL, 0);
return 0;
}
static struct gpio_led gpio_leds[] = {
{
.name = "led1",
.default_trigger = "heartbeat",
.gpio = 186,
.active_low = true,
},
{
.name = "led2",
.default_trigger = "mmc0",
.gpio = 163,
.active_low = true,
},
{
.name = "ledB",
.default_trigger = "none",
.gpio = 153,
.active_low = true,
},
{
.name = "led3",
.default_trigger = "none",
.gpio = 164,
.active_low = true,
},
};
static struct gpio_led_platform_data gpio_led_info = {
.leds = gpio_leds,
.num_leds = ARRAY_SIZE(gpio_leds),
};
static struct platform_device leds_gpio = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &gpio_led_info,
},
};
static struct gpio_keys_button gpio_buttons[] = {
{
.code = BTN_EXTRA,
.gpio = 26,
.desc = "user",
.wakeup = 1,
},
};
static struct gpio_keys_platform_data gpio_key_info = {
.buttons = gpio_buttons,
.nbuttons = ARRAY_SIZE(gpio_buttons),
};
static struct platform_device keys_gpio = {
.name = "gpio-keys",
.id = -1,
.dev = {
.platform_data = &gpio_key_info,
},
};
#define OMAP_DM9000_BASE 0x2c000000
static struct resource omap_dm9000_resources[] = {
[0] = {
.start = OMAP_DM9000_BASE,
.end = (OMAP_DM9000_BASE + 0x4 - 1),
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (OMAP_DM9000_BASE + 0x400),
.end = (OMAP_DM9000_BASE + 0x400 + 0x4 - 1),
.flags = IORESOURCE_MEM,
},
[2] = {
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
},
};
static struct dm9000_plat_data omap_dm9000_platdata = {
.flags = DM9000_PLATF_16BITONLY,
};
static struct platform_device omap_dm9000_dev = {
.name = "dm9000",
.id = -1,
.num_resources = ARRAY_SIZE(omap_dm9000_resources),
.resource = omap_dm9000_resources,
.dev = {
.platform_data = &omap_dm9000_platdata,
},
};
static void __init omap_dm9000_init(void)
{
unsigned char *eth_addr = omap_dm9000_platdata.dev_addr;
struct omap_die_id odi;
int ret;
ret = gpio_request_one(OMAP_DM9000_GPIO_IRQ, GPIOF_IN, "dm9000 irq");
if (ret < 0) {
printk(KERN_ERR "Failed to request GPIO%d for dm9000 IRQ\n",
OMAP_DM9000_GPIO_IRQ);
return;
}
/* init the mac address using DIE id */
omap_get_die_id(&odi);
eth_addr[0] = 0x02; /* locally administered */
eth_addr[1] = odi.id_1 & 0xff;
eth_addr[2] = (odi.id_0 & 0xff000000) >> 24;
eth_addr[3] = (odi.id_0 & 0x00ff0000) >> 16;
eth_addr[4] = (odi.id_0 & 0x0000ff00) >> 8;
eth_addr[5] = (odi.id_0 & 0x000000ff);
}
static struct platform_device *devkit8000_devices[] __initdata = {
&leds_gpio,
&keys_gpio,
&omap_dm9000_dev,
&devkit8000_lcd_device,
&devkit8000_tfp410_device,
&devkit8000_dvi_connector_device,
&devkit8000_tv_connector_device,
};
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
};
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
/* nCS and IRQ for Devkit8000 ethernet */
OMAP3_MUX(GPMC_NCS6, OMAP_MUX_MODE0),
OMAP3_MUX(ETK_D11, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP),
/* McSPI 2*/
OMAP3_MUX(MCSPI2_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(MCSPI2_SIMO, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(MCSPI2_SOMI, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(MCSPI2_CS0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(MCSPI2_CS1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
/* PENDOWN GPIO */
OMAP3_MUX(ETK_D13, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
/* mUSB */
OMAP3_MUX(HSUSB0_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(HSUSB0_STP, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(HSUSB0_DIR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(HSUSB0_NXT, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(HSUSB0_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(HSUSB0_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(HSUSB0_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(HSUSB0_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(HSUSB0_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(HSUSB0_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(HSUSB0_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(HSUSB0_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
/* USB 1 */
OMAP3_MUX(ETK_CTL, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
OMAP3_MUX(ETK_CLK, OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT),
OMAP3_MUX(ETK_D8, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
OMAP3_MUX(ETK_D9, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
OMAP3_MUX(ETK_D0, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
OMAP3_MUX(ETK_D1, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
OMAP3_MUX(ETK_D2, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
OMAP3_MUX(ETK_D3, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
OMAP3_MUX(ETK_D4, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
OMAP3_MUX(ETK_D5, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
OMAP3_MUX(ETK_D6, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
OMAP3_MUX(ETK_D7, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
/* MMC 1 */
OMAP3_MUX(SDMMC1_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(SDMMC1_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(SDMMC1_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(SDMMC1_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(SDMMC1_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(SDMMC1_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(SDMMC1_DAT4, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(SDMMC1_DAT5, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(SDMMC1_DAT6, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(SDMMC1_DAT7, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
/* McBSP 2 */
OMAP3_MUX(MCBSP2_FSX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(MCBSP2_CLKX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(MCBSP2_DR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(MCBSP2_DX, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
/* I2C 1 */
OMAP3_MUX(I2C1_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(I2C1_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
/* I2C 2 */
OMAP3_MUX(I2C2_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(I2C2_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
/* I2C 3 */
OMAP3_MUX(I2C3_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(I2C3_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
/* I2C 4 */
OMAP3_MUX(I2C4_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(I2C4_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
/* serial ports */
OMAP3_MUX(MCBSP3_CLKX, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
OMAP3_MUX(MCBSP3_FSX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
OMAP3_MUX(UART1_TX, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(UART1_RX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
/* DSS */
OMAP3_MUX(DSS_PCLK, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_HSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_VSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_ACBIAS, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA8, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA9, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA10, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA11, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA12, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA13, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA14, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA15, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA16, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA17, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA20, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
/* expansion port */
/* McSPI 1 */
OMAP3_MUX(MCSPI1_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(MCSPI1_SIMO, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(MCSPI1_SOMI, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(MCSPI1_CS0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
OMAP3_MUX(MCSPI1_CS3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
/* HDQ */
OMAP3_MUX(HDQ_SIO, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
/* McSPI4 */
OMAP3_MUX(MCBSP1_CLKR, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
OMAP3_MUX(MCBSP1_DX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
OMAP3_MUX(MCBSP1_DR, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
OMAP3_MUX(MCBSP1_FSX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT_PULLUP),
/* MMC 2 */
OMAP3_MUX(SDMMC2_DAT4, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
OMAP3_MUX(SDMMC2_DAT5, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
OMAP3_MUX(SDMMC2_DAT6, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
OMAP3_MUX(SDMMC2_DAT7, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
/* I2C3 */
OMAP3_MUX(I2C3_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(I2C3_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
OMAP3_MUX(MCBSP1_CLKX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
OMAP3_MUX(MCBSP1_FSR, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
OMAP3_MUX(GPMC_NCS7, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
OMAP3_MUX(GPMC_NCS3, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
/* TPS IRQ */
OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_WAKEUP_EN | \
OMAP_PIN_INPUT_PULLUP),
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#endif
static void __init devkit8000_init(void)
{
omap3_mux_init(board_mux, OMAP_PACKAGE_CUS);
omap_serial_init();
omap_sdrc_init(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
omap_dm9000_init();
omap_hsmmc_init(mmc);
devkit8000_i2c_init();
omap_dm9000_resources[2].start = gpio_to_irq(OMAP_DM9000_GPIO_IRQ);
platform_add_devices(devkit8000_devices,
ARRAY_SIZE(devkit8000_devices));
omap_display_init(&devkit8000_dss_data);
omap_ads7846_init(2, OMAP3_DEVKIT_TS_GPIO, 0, NULL);
usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(NULL);
usbhs_init(&usbhs_bdata);
board_nand_init(devkit8000_nand_partitions,
ARRAY_SIZE(devkit8000_nand_partitions), NAND_CS,
NAND_BUSWIDTH_16, NULL);
omap_twl4030_audio_init("omap3beagle", NULL);
/* Ensure SDRC pins are mux'd for self-refresh */
omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
}
MACHINE_START(DEVKIT8000, "OMAP3 Devkit8000")
.atag_offset = 0x100,
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
.init_machine = devkit8000_init,
.init_late = omap35xx_init_late,
.init_time = omap3_secure_sync32k_timer_init,
.restart = omap3xxx_restart,
MACHINE_END
| gpl-2.0 |
pccr10001/Kernel-2.6.32.61-for-PDK-7105 | arch/microblaze/kernel/ptrace.c | 546 | 6676 | /*
* `ptrace' system call
*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2004-2007 John Williams <john.williams@petalogix.com>
*
* derived from arch/v850/kernel/ptrace.c
*
* Copyright (C) 2002,03 NEC Electronics Corporation
* Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
*
* Derived from arch/mips/kernel/ptrace.c:
*
* Copyright (C) 1992 Ross Biro
* Copyright (C) Linus Torvalds
* Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
* Copyright (C) 1996 David S. Miller
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 1999 MIPS Technologies, Inc.
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/signal.h>
#include <linux/elf.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <linux/tracehook.h>
#include <linux/errno.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/asm-offsets.h>
/* Returns the address where the register at REG_OFFS in P is stashed away. */
static microblaze_reg_t *reg_save_addr(unsigned reg_offs,
struct task_struct *t)
{
struct pt_regs *regs;
/*
* Three basic cases:
*
* (1) A register normally saved before calling the scheduler, is
* available in the kernel entry pt_regs structure at the top
* of the kernel stack. The kernel trap/irq exit path takes
* care to save/restore almost all registers for ptrace'd
* processes.
*
* (2) A call-clobbered register, where the process P entered the
* kernel via [syscall] trap, is not stored anywhere; that's
* OK, because such registers are not expected to be preserved
* when the trap returns anyway (so we don't actually bother to
* test for this case).
*
* (3) A few registers not used at all by the kernel, and so
* normally never saved except by context-switches, are in the
* context switch state.
*/
/* Register saved during kernel entry (or not available). */
regs = task_pt_regs(t);
return (microblaze_reg_t *)((char *)regs + reg_offs);
}
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
int rval;
unsigned long val = 0;
unsigned long copied;
switch (request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
pr_debug("PEEKTEXT/PEEKDATA at %08lX\n", addr);
copied = access_process_vm(child, addr, &val, sizeof(val), 0);
rval = -EIO;
if (copied != sizeof(val))
break;
rval = put_user(val, (unsigned long *)data);
break;
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
pr_debug("POKETEXT/POKEDATA to %08lX\n", addr);
rval = 0;
if (access_process_vm(child, addr, &data, sizeof(data), 1)
== sizeof(data))
break;
rval = -EIO;
break;
/* Read/write the word at location ADDR in the registers. */
case PTRACE_PEEKUSR:
case PTRACE_POKEUSR:
pr_debug("PEEKUSR/POKEUSR : 0x%08lx\n", addr);
rval = 0;
if (addr >= PT_SIZE && request == PTRACE_PEEKUSR) {
/*
* Special requests that don't actually correspond
* to offsets in struct pt_regs.
*/
if (addr == PT_TEXT_ADDR) {
val = child->mm->start_code;
} else if (addr == PT_DATA_ADDR) {
val = child->mm->start_data;
} else if (addr == PT_TEXT_LEN) {
val = child->mm->end_code
- child->mm->start_code;
} else {
rval = -EIO;
}
} else if (addr >= 0 && addr < PT_SIZE && (addr & 0x3) == 0) {
microblaze_reg_t *reg_addr = reg_save_addr(addr, child);
if (request == PTRACE_PEEKUSR)
val = *reg_addr;
else
*reg_addr = data;
} else
rval = -EIO;
if (rval == 0 && request == PTRACE_PEEKUSR)
rval = put_user(val, (unsigned long *)data);
break;
/* Continue and stop at next (return from) syscall */
case PTRACE_SYSCALL:
pr_debug("PTRACE_SYSCALL\n");
case PTRACE_SINGLESTEP:
pr_debug("PTRACE_SINGLESTEP\n");
/* Restart after a signal. */
case PTRACE_CONT:
pr_debug("PTRACE_CONT\n");
rval = -EIO;
if (!valid_signal(data))
break;
if (request == PTRACE_SYSCALL)
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
else
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
pr_debug("wakeup_process\n");
wake_up_process(child);
rval = 0;
break;
/*
* make the child exit. Best I can do is send it a sigkill.
* perhaps it should be put in the status that it wants to
* exit.
*/
case PTRACE_KILL:
pr_debug("PTRACE_KILL\n");
rval = 0;
if (child->exit_state == EXIT_ZOMBIE) /* already dead */
break;
child->exit_code = SIGKILL;
wake_up_process(child);
break;
case PTRACE_DETACH: /* detach a process that was attached. */
pr_debug("PTRACE_DETACH\n");
rval = ptrace_detach(child, data);
break;
default:
/* rval = ptrace_request(child, request, addr, data); noMMU */
rval = -EIO;
}
return rval;
}
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
secure_computing(regs->r12);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
/*
* Tracing decided this syscall should not happen.
* We'll return a bogus call number to get an ENOSYS
* error, but leave the original number in regs->regs[0].
*/
ret = -1L;
if (unlikely(current->audit_context))
audit_syscall_entry(EM_XILINX_MICROBLAZE, regs->r12,
regs->r5, regs->r6,
regs->r7, regs->r8);
return ret ?: regs->r12;
}
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->r3), regs->r3);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}
#if 0
static asmlinkage void syscall_trace(void)
{
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
if (!(current->ptrace & PT_PTRACED))
return;
/* The 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
}
#endif
void ptrace_disable(struct task_struct *child)
{
/* nothing to do */
}
| gpl-2.0 |
MoltenMotherBoard/platform_kernel_samsung_cori | fs/nfs/nfs2xdr.c | 802 | 19806 | /*
* linux/fs/nfs/nfs2xdr.c
*
* XDR functions to encode/decode NFS RPC arguments and results.
*
* Copyright (C) 1992, 1993, 1994 Rick Sladkey
* Copyright (C) 1996 Olaf Kirch
* 04 Aug 1998 Ion Badulescu <ionut@cs.columbia.edu>
* FIFO's need special handling in NFSv2
*/
#include <linux/param.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs.h>
#include <linux/nfs2.h>
#include <linux/nfs_fs.h>
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
/* Mapping from NFS error code to "errno" error code. */
#define errno_NFSERR_IO EIO
/*
* Declare the space requirements for NFS arguments and replies as
* number of 32bit-words
*/
#define NFS_fhandle_sz (8)
#define NFS_sattr_sz (8)
#define NFS_filename_sz (1+(NFS2_MAXNAMLEN>>2))
#define NFS_path_sz (1+(NFS2_MAXPATHLEN>>2))
#define NFS_fattr_sz (17)
#define NFS_info_sz (5)
#define NFS_entry_sz (NFS_filename_sz+3)
#define NFS_diropargs_sz (NFS_fhandle_sz+NFS_filename_sz)
#define NFS_removeargs_sz (NFS_fhandle_sz+NFS_filename_sz)
#define NFS_sattrargs_sz (NFS_fhandle_sz+NFS_sattr_sz)
#define NFS_readlinkargs_sz (NFS_fhandle_sz)
#define NFS_readargs_sz (NFS_fhandle_sz+3)
#define NFS_writeargs_sz (NFS_fhandle_sz+4)
#define NFS_createargs_sz (NFS_diropargs_sz+NFS_sattr_sz)
#define NFS_renameargs_sz (NFS_diropargs_sz+NFS_diropargs_sz)
#define NFS_linkargs_sz (NFS_fhandle_sz+NFS_diropargs_sz)
#define NFS_symlinkargs_sz (NFS_diropargs_sz+1+NFS_sattr_sz)
#define NFS_readdirargs_sz (NFS_fhandle_sz+2)
#define NFS_attrstat_sz (1+NFS_fattr_sz)
#define NFS_diropres_sz (1+NFS_fhandle_sz+NFS_fattr_sz)
#define NFS_readlinkres_sz (2)
#define NFS_readres_sz (1+NFS_fattr_sz+1)
#define NFS_writeres_sz (NFS_attrstat_sz)
#define NFS_stat_sz (1)
#define NFS_readdirres_sz (1)
#define NFS_statfsres_sz (1+NFS_info_sz)
/*
* Common NFS XDR functions as inlines
*/
static inline __be32 *
xdr_encode_fhandle(__be32 *p, const struct nfs_fh *fhandle)
{
memcpy(p, fhandle->data, NFS2_FHSIZE);
return p + XDR_QUADLEN(NFS2_FHSIZE);
}
static inline __be32 *
xdr_decode_fhandle(__be32 *p, struct nfs_fh *fhandle)
{
/* NFSv2 handles have a fixed length */
fhandle->size = NFS2_FHSIZE;
memcpy(fhandle->data, p, NFS2_FHSIZE);
return p + XDR_QUADLEN(NFS2_FHSIZE);
}
static inline __be32*
xdr_encode_time(__be32 *p, struct timespec *timep)
{
*p++ = htonl(timep->tv_sec);
/* Convert nanoseconds into microseconds */
*p++ = htonl(timep->tv_nsec ? timep->tv_nsec / 1000 : 0);
return p;
}
static inline __be32*
xdr_encode_current_server_time(__be32 *p, struct timespec *timep)
{
/*
* Passing the invalid value useconds=1000000 is a
* Sun convention for "set to current server time".
* It's needed to make permissions checks for the
* "touch" program across v2 mounts to Solaris and
* Irix boxes work correctly. See description of
* sattr in section 6.1 of "NFS Illustrated" by
* Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5
*/
*p++ = htonl(timep->tv_sec);
*p++ = htonl(1000000);
return p;
}
static inline __be32*
xdr_decode_time(__be32 *p, struct timespec *timep)
{
timep->tv_sec = ntohl(*p++);
/* Convert microseconds into nanoseconds */
timep->tv_nsec = ntohl(*p++) * 1000;
return p;
}
static __be32 *
xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr)
{
u32 rdev, type;
type = ntohl(*p++);
fattr->mode = ntohl(*p++);
fattr->nlink = ntohl(*p++);
fattr->uid = ntohl(*p++);
fattr->gid = ntohl(*p++);
fattr->size = ntohl(*p++);
fattr->du.nfs2.blocksize = ntohl(*p++);
rdev = ntohl(*p++);
fattr->du.nfs2.blocks = ntohl(*p++);
fattr->fsid.major = ntohl(*p++);
fattr->fsid.minor = 0;
fattr->fileid = ntohl(*p++);
p = xdr_decode_time(p, &fattr->atime);
p = xdr_decode_time(p, &fattr->mtime);
p = xdr_decode_time(p, &fattr->ctime);
fattr->valid |= NFS_ATTR_FATTR_V2;
fattr->rdev = new_decode_dev(rdev);
if (type == NFCHR && rdev == NFS2_FIFO_DEV) {
fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO;
fattr->rdev = 0;
}
return p;
}
static inline __be32 *
xdr_encode_sattr(__be32 *p, struct iattr *attr)
{
const __be32 not_set = __constant_htonl(0xFFFFFFFF);
*p++ = (attr->ia_valid & ATTR_MODE) ? htonl(attr->ia_mode) : not_set;
*p++ = (attr->ia_valid & ATTR_UID) ? htonl(attr->ia_uid) : not_set;
*p++ = (attr->ia_valid & ATTR_GID) ? htonl(attr->ia_gid) : not_set;
*p++ = (attr->ia_valid & ATTR_SIZE) ? htonl(attr->ia_size) : not_set;
if (attr->ia_valid & ATTR_ATIME_SET) {
p = xdr_encode_time(p, &attr->ia_atime);
} else if (attr->ia_valid & ATTR_ATIME) {
p = xdr_encode_current_server_time(p, &attr->ia_atime);
} else {
*p++ = not_set;
*p++ = not_set;
}
if (attr->ia_valid & ATTR_MTIME_SET) {
p = xdr_encode_time(p, &attr->ia_mtime);
} else if (attr->ia_valid & ATTR_MTIME) {
p = xdr_encode_current_server_time(p, &attr->ia_mtime);
} else {
*p++ = not_set;
*p++ = not_set;
}
return p;
}
/*
* NFS encode functions
*/
/*
* Encode file handle argument
* GETATTR, READLINK, STATFS
*/
static int
nfs_xdr_fhandle(struct rpc_rqst *req, __be32 *p, struct nfs_fh *fh)
{
p = xdr_encode_fhandle(p, fh);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
/*
* Encode SETATTR arguments
*/
static int
nfs_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs_sattrargs *args)
{
p = xdr_encode_fhandle(p, args->fh);
p = xdr_encode_sattr(p, args->sattr);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
/*
* Encode directory ops argument
* LOOKUP, RMDIR
*/
static int
nfs_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs_diropargs *args)
{
p = xdr_encode_fhandle(p, args->fh);
p = xdr_encode_array(p, args->name, args->len);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
/*
* Encode REMOVE argument
*/
static int
nfs_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
{
p = xdr_encode_fhandle(p, args->fh);
p = xdr_encode_array(p, args->name.name, args->name.len);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
/*
* Arguments to a READ call. Since we read data directly into the page
* cache, we also set up the reply iovec here so that iov[1] points
* exactly to the page we want to fetch.
*/
static int
nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
{
struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
unsigned int replen;
u32 offset = (u32)args->offset;
u32 count = args->count;
p = xdr_encode_fhandle(p, args->fh);
*p++ = htonl(offset);
*p++ = htonl(count);
*p++ = htonl(count);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
/* Inline the page array */
replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readres_sz) << 2;
xdr_inline_pages(&req->rq_rcv_buf, replen,
args->pages, args->pgbase, count);
req->rq_rcv_buf.flags |= XDRBUF_READ;
return 0;
}
/*
* Decode READ reply
*/
static int
nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
{
struct kvec *iov = req->rq_rcv_buf.head;
size_t hdrlen;
u32 count, recvd;
int status;
if ((status = ntohl(*p++)))
return nfs_stat_to_errno(status);
p = xdr_decode_fattr(p, res->fattr);
count = ntohl(*p++);
res->eof = 0;
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len < hdrlen) {
dprintk("NFS: READ reply header overflowed:"
"length %Zu > %Zu\n", hdrlen, iov->iov_len);
return -errno_NFSERR_IO;
} else if (iov->iov_len != hdrlen) {
dprintk("NFS: READ header is short. iovec will be shifted.\n");
xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
}
recvd = req->rq_rcv_buf.len - hdrlen;
if (count > recvd) {
dprintk("NFS: server cheating in read reply: "
"count %u > recvd %u\n", count, recvd);
count = recvd;
}
dprintk("RPC: readres OK count %u\n", count);
if (count < res->count)
res->count = count;
return count;
}
/*
* Write arguments. Splice the buffer to be written into the iovec.
*/
static int
nfs_xdr_writeargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
{
struct xdr_buf *sndbuf = &req->rq_snd_buf;
u32 offset = (u32)args->offset;
u32 count = args->count;
p = xdr_encode_fhandle(p, args->fh);
*p++ = htonl(offset);
*p++ = htonl(offset);
*p++ = htonl(count);
*p++ = htonl(count);
sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
/* Copy the page array */
xdr_encode_pages(sndbuf, args->pages, args->pgbase, count);
sndbuf->flags |= XDRBUF_WRITE;
return 0;
}
/*
* Encode create arguments
* CREATE, MKDIR
*/
static int
nfs_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs_createargs *args)
{
p = xdr_encode_fhandle(p, args->fh);
p = xdr_encode_array(p, args->name, args->len);
p = xdr_encode_sattr(p, args->sattr);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
/*
* Encode RENAME arguments
*/
static int
nfs_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args)
{
p = xdr_encode_fhandle(p, args->fromfh);
p = xdr_encode_array(p, args->fromname, args->fromlen);
p = xdr_encode_fhandle(p, args->tofh);
p = xdr_encode_array(p, args->toname, args->tolen);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
/*
* Encode LINK arguments
*/
static int
nfs_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs_linkargs *args)
{
p = xdr_encode_fhandle(p, args->fromfh);
p = xdr_encode_fhandle(p, args->tofh);
p = xdr_encode_array(p, args->toname, args->tolen);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
/*
* Encode SYMLINK arguments
*/
static int
nfs_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_symlinkargs *args)
{
struct xdr_buf *sndbuf = &req->rq_snd_buf;
size_t pad;
p = xdr_encode_fhandle(p, args->fromfh);
p = xdr_encode_array(p, args->fromname, args->fromlen);
*p++ = htonl(args->pathlen);
sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
xdr_encode_pages(sndbuf, args->pages, 0, args->pathlen);
/*
* xdr_encode_pages may have added a few bytes to ensure the
* pathname ends on a 4-byte boundary. Start encoding the
* attributes after the pad bytes.
*/
pad = sndbuf->tail->iov_len;
if (pad > 0)
p++;
p = xdr_encode_sattr(p, args->sattr);
sndbuf->len += xdr_adjust_iovec(sndbuf->tail, p) - pad;
return 0;
}
/*
* Encode arguments to readdir call
*/
static int
nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args)
{
struct rpc_task *task = req->rq_task;
struct rpc_auth *auth = task->tk_msg.rpc_cred->cr_auth;
unsigned int replen;
u32 count = args->count;
p = xdr_encode_fhandle(p, args->fh);
*p++ = htonl(args->cookie);
*p++ = htonl(count); /* see above */
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
/* Inline the page array */
replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readdirres_sz) << 2;
xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
return 0;
}
/*
* Decode the result of a readdir call.
* We're not really decoding anymore, we just leave the buffer untouched
* and only check that it is syntactically correct.
* The real decoding happens in nfs_decode_entry below, called directly
* from nfs_readdir for each entry.
*/
static int
nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct kvec *iov = rcvbuf->head;
struct page **page;
size_t hdrlen;
unsigned int pglen, recvd;
u32 len;
int status, nr = 0;
__be32 *end, *entry, *kaddr;
if ((status = ntohl(*p++)))
return nfs_stat_to_errno(status);
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len < hdrlen) {
dprintk("NFS: READDIR reply header overflowed:"
"length %Zu > %Zu\n", hdrlen, iov->iov_len);
return -errno_NFSERR_IO;
} else if (iov->iov_len != hdrlen) {
dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
}
pglen = rcvbuf->page_len;
recvd = rcvbuf->len - hdrlen;
if (pglen > recvd)
pglen = recvd;
page = rcvbuf->pages;
kaddr = p = kmap_atomic(*page, KM_USER0);
end = (__be32 *)((char *)p + pglen);
entry = p;
/* Make sure the packet actually has a value_follows and EOF entry */
if ((entry + 1) > end)
goto short_pkt;
for (; *p++; nr++) {
if (p + 2 > end)
goto short_pkt;
p++; /* fileid */
len = ntohl(*p++);
p += XDR_QUADLEN(len) + 1; /* name plus cookie */
if (len > NFS2_MAXNAMLEN) {
dprintk("NFS: giant filename in readdir (len 0x%x)!\n",
len);
goto err_unmap;
}
if (p + 2 > end)
goto short_pkt;
entry = p;
}
/*
* Apparently some server sends responses that are a valid size, but
* contain no entries, and have value_follows==0 and EOF==0. For
* those, just set the EOF marker.
*/
if (!nr && entry[1] == 0) {
dprintk("NFS: readdir reply truncated!\n");
entry[1] = 1;
}
out:
kunmap_atomic(kaddr, KM_USER0);
return nr;
short_pkt:
/*
* When we get a short packet there are 2 possibilities. We can
* return an error, or fix up the response to look like a valid
* response and return what we have so far. If there are no
* entries and the packet was short, then return -EIO. If there
* are valid entries in the response, return them and pretend that
* the call was successful, but incomplete. The caller can retry the
* readdir starting at the last cookie.
*/
entry[0] = entry[1] = 0;
if (!nr)
nr = -errno_NFSERR_IO;
goto out;
err_unmap:
nr = -errno_NFSERR_IO;
goto out;
}
__be32 *
nfs_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus)
{
if (!*p++) {
if (!*p)
return ERR_PTR(-EAGAIN);
entry->eof = 1;
return ERR_PTR(-EBADCOOKIE);
}
entry->ino = ntohl(*p++);
entry->len = ntohl(*p++);
entry->name = (const char *) p;
p += XDR_QUADLEN(entry->len);
entry->prev_cookie = entry->cookie;
entry->cookie = ntohl(*p++);
entry->eof = !p[0] && p[1];
return p;
}
/*
* NFS XDR decode functions
*/
/*
* Decode simple status reply
*/
static int
nfs_xdr_stat(struct rpc_rqst *req, __be32 *p, void *dummy)
{
int status;
if ((status = ntohl(*p++)) != 0)
status = nfs_stat_to_errno(status);
return status;
}
/*
* Decode attrstat reply
* GETATTR, SETATTR, WRITE
*/
static int
nfs_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
{
int status;
if ((status = ntohl(*p++)))
return nfs_stat_to_errno(status);
xdr_decode_fattr(p, fattr);
return 0;
}
/*
* Decode diropres reply
* LOOKUP, CREATE, MKDIR
*/
static int
nfs_xdr_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *res)
{
int status;
if ((status = ntohl(*p++)))
return nfs_stat_to_errno(status);
p = xdr_decode_fhandle(p, res->fh);
xdr_decode_fattr(p, res->fattr);
return 0;
}
/*
* Encode READLINK args
*/
static int
nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args)
{
struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
unsigned int replen;
p = xdr_encode_fhandle(p, args->fh);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
/* Inline the page array */
replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readlinkres_sz) << 2;
xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen);
return 0;
}
/*
* Decode READLINK reply
*/
static int
nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct kvec *iov = rcvbuf->head;
size_t hdrlen;
u32 len, recvd;
char *kaddr;
int status;
if ((status = ntohl(*p++)))
return nfs_stat_to_errno(status);
/* Convert length of symlink */
len = ntohl(*p++);
if (len >= rcvbuf->page_len) {
dprintk("nfs: server returned giant symlink!\n");
return -ENAMETOOLONG;
}
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len < hdrlen) {
dprintk("NFS: READLINK reply header overflowed:"
"length %Zu > %Zu\n", hdrlen, iov->iov_len);
return -errno_NFSERR_IO;
} else if (iov->iov_len != hdrlen) {
dprintk("NFS: READLINK header is short. iovec will be shifted.\n");
xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
}
recvd = req->rq_rcv_buf.len - hdrlen;
if (recvd < len) {
dprintk("NFS: server cheating in readlink reply: "
"count %u > recvd %u\n", len, recvd);
return -EIO;
}
/* NULL terminate the string we got */
kaddr = (char *)kmap_atomic(rcvbuf->pages[0], KM_USER0);
kaddr[len+rcvbuf->page_base] = '\0';
kunmap_atomic(kaddr, KM_USER0);
return 0;
}
/*
* Decode WRITE reply
*/
static int
nfs_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
{
res->verf->committed = NFS_FILE_SYNC;
return nfs_xdr_attrstat(req, p, res->fattr);
}
/*
* Decode STATFS reply
*/
static int
nfs_xdr_statfsres(struct rpc_rqst *req, __be32 *p, struct nfs2_fsstat *res)
{
int status;
if ((status = ntohl(*p++)))
return nfs_stat_to_errno(status);
res->tsize = ntohl(*p++);
res->bsize = ntohl(*p++);
res->blocks = ntohl(*p++);
res->bfree = ntohl(*p++);
res->bavail = ntohl(*p++);
return 0;
}
/*
* We need to translate between nfs status return values and
* the local errno values which may not be the same.
*/
static struct {
int stat;
int errno;
} nfs_errtbl[] = {
{ NFS_OK, 0 },
{ NFSERR_PERM, -EPERM },
{ NFSERR_NOENT, -ENOENT },
{ NFSERR_IO, -errno_NFSERR_IO},
{ NFSERR_NXIO, -ENXIO },
/* { NFSERR_EAGAIN, -EAGAIN }, */
{ NFSERR_ACCES, -EACCES },
{ NFSERR_EXIST, -EEXIST },
{ NFSERR_XDEV, -EXDEV },
{ NFSERR_NODEV, -ENODEV },
{ NFSERR_NOTDIR, -ENOTDIR },
{ NFSERR_ISDIR, -EISDIR },
{ NFSERR_INVAL, -EINVAL },
{ NFSERR_FBIG, -EFBIG },
{ NFSERR_NOSPC, -ENOSPC },
{ NFSERR_ROFS, -EROFS },
{ NFSERR_MLINK, -EMLINK },
{ NFSERR_NAMETOOLONG, -ENAMETOOLONG },
{ NFSERR_NOTEMPTY, -ENOTEMPTY },
{ NFSERR_DQUOT, -EDQUOT },
{ NFSERR_STALE, -ESTALE },
{ NFSERR_REMOTE, -EREMOTE },
#ifdef EWFLUSH
{ NFSERR_WFLUSH, -EWFLUSH },
#endif
{ NFSERR_BADHANDLE, -EBADHANDLE },
{ NFSERR_NOT_SYNC, -ENOTSYNC },
{ NFSERR_BAD_COOKIE, -EBADCOOKIE },
{ NFSERR_NOTSUPP, -ENOTSUPP },
{ NFSERR_TOOSMALL, -ETOOSMALL },
{ NFSERR_SERVERFAULT, -EREMOTEIO },
{ NFSERR_BADTYPE, -EBADTYPE },
{ NFSERR_JUKEBOX, -EJUKEBOX },
{ -1, -EIO }
};
/*
* Convert an NFS error code to a local one.
* This one is used jointly by NFSv2 and NFSv3.
*/
int
nfs_stat_to_errno(int stat)
{
int i;
for (i = 0; nfs_errtbl[i].stat != -1; i++) {
if (nfs_errtbl[i].stat == stat)
return nfs_errtbl[i].errno;
}
dprintk("nfs_stat_to_errno: bad nfs status return value: %d\n", stat);
return nfs_errtbl[i].errno;
}
#define PROC(proc, argtype, restype, timer) \
[NFSPROC_##proc] = { \
.p_proc = NFSPROC_##proc, \
.p_encode = (kxdrproc_t) nfs_xdr_##argtype, \
.p_decode = (kxdrproc_t) nfs_xdr_##restype, \
.p_arglen = NFS_##argtype##_sz, \
.p_replen = NFS_##restype##_sz, \
.p_timer = timer, \
.p_statidx = NFSPROC_##proc, \
.p_name = #proc, \
}
struct rpc_procinfo nfs_procedures[] = {
PROC(GETATTR, fhandle, attrstat, 1),
PROC(SETATTR, sattrargs, attrstat, 0),
PROC(LOOKUP, diropargs, diropres, 2),
PROC(READLINK, readlinkargs, readlinkres, 3),
PROC(READ, readargs, readres, 3),
PROC(WRITE, writeargs, writeres, 4),
PROC(CREATE, createargs, diropres, 0),
PROC(REMOVE, removeargs, stat, 0),
PROC(RENAME, renameargs, stat, 0),
PROC(LINK, linkargs, stat, 0),
PROC(SYMLINK, symlinkargs, stat, 0),
PROC(MKDIR, createargs, diropres, 0),
PROC(RMDIR, diropargs, stat, 0),
PROC(READDIR, readdirargs, readdirres, 3),
PROC(STATFS, fhandle, statfsres, 0),
};
struct rpc_version nfs_version2 = {
.number = 2,
.nrprocs = ARRAY_SIZE(nfs_procedures),
.procs = nfs_procedures
};
| gpl-2.0 |
eugene373/Streamline | crypto/vmac.c | 1058 | 18297 | /*
* Modified to interface to the Linux kernel
* Copyright (c) 2009, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
/* --------------------------------------------------------------------------
* VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
* This implementation is herby placed in the public domain.
* The authors offers no warranty. Use at your own risk.
* Please send bug reports to the authors.
* Last modified: 17 APR 08, 1700 PDT
* ----------------------------------------------------------------------- */
#include <linux/init.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <asm/byteorder.h>
#include <crypto/scatterwalk.h>
#include <crypto/vmac.h>
#include <crypto/internal/hash.h>
/*
* Constants and masks
*/
#define UINT64_C(x) x##ULL
const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
#ifdef __LITTLE_ENDIAN
#define INDEX_HIGH 1
#define INDEX_LOW 0
#else
#define INDEX_HIGH 0
#define INDEX_LOW 1
#endif
/*
* The following routines are used in this implementation. They are
* written via macros to simulate zero-overhead call-by-reference.
*
* MUL64: 64x64->128-bit multiplication
* PMUL64: assumes top bits cleared on inputs
* ADD128: 128x128->128-bit addition
*/
#define ADD128(rh, rl, ih, il) \
do { \
u64 _il = (il); \
(rl) += (_il); \
if ((rl) < (_il)) \
(rh)++; \
(rh) += (ih); \
} while (0)
#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
do { \
u64 _i1 = (i1), _i2 = (i2); \
u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
rh = MUL32(_i1>>32, _i2>>32); \
rl = MUL32(_i1, _i2); \
ADD128(rh, rl, (m >> 32), (m << 32)); \
} while (0)
#define MUL64(rh, rl, i1, i2) \
do { \
u64 _i1 = (i1), _i2 = (i2); \
u64 m1 = MUL32(_i1, _i2>>32); \
u64 m2 = MUL32(_i1>>32, _i2); \
rh = MUL32(_i1>>32, _i2>>32); \
rl = MUL32(_i1, _i2); \
ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
} while (0)
/*
* For highest performance the L1 NH and L2 polynomial hashes should be
* carefully implemented to take advantage of one's target architechture.
* Here these two hash functions are defined multiple time; once for
* 64-bit architectures, once for 32-bit SSE2 architectures, and once
* for the rest (32-bit) architectures.
* For each, nh_16 *must* be defined (works on multiples of 16 bytes).
* Optionally, nh_vmac_nhbytes can be defined (for multiples of
* VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
* NH computations at once).
*/
#ifdef CONFIG_64BIT
#define nh_16(mp, kp, nw, rh, rl) \
do { \
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
do { \
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
do { \
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
do { \
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
ADD128(rh1, rl1, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
ADD128(rh1, rl1, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
#endif
#define poly_step(ah, al, kh, kl, mh, ml) \
do { \
u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
/* compute ab*cd, put bd into result registers */ \
PMUL64(t3h, t3l, al, kh); \
PMUL64(t2h, t2l, ah, kl); \
PMUL64(t1h, t1l, ah, 2*kh); \
PMUL64(ah, al, al, kl); \
/* add 2 * ac to result */ \
ADD128(ah, al, t1h, t1l); \
/* add together ad + bc */ \
ADD128(t2h, t2l, t3h, t3l); \
/* now (ah,al), (t2l,2*t2h) need summing */ \
/* first add the high registers, carrying into t2h */ \
ADD128(t2h, ah, z, t2l); \
/* double t2h and add top bit of ah */ \
t2h = 2 * t2h + (ah >> 63); \
ah &= m63; \
/* now add the low registers */ \
ADD128(ah, al, mh, ml); \
ADD128(ah, al, z, t2h); \
} while (0)
#else /* ! CONFIG_64BIT */
#ifndef nh_16
#define nh_16(mp, kp, nw, rh, rl) \
do { \
u64 t1, t2, m1, m2, t; \
int i; \
rh = rl = t = 0; \
for (i = 0; i < nw; i += 2) { \
t1 = pe64_to_cpup(mp+i) + kp[i]; \
t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
m2 = MUL32(t1 >> 32, t2); \
m1 = MUL32(t1, t2 >> 32); \
ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
MUL32(t1, t2)); \
rh += (u64)(u32)(m1 >> 32) \
+ (u32)(m2 >> 32); \
t += (u64)(u32)m1 + (u32)m2; \
} \
ADD128(rh, rl, (t >> 32), (t << 32)); \
} while (0)
#endif
static void poly_step_func(u64 *ahi, u64 *alo,
const u64 *kh, const u64 *kl,
const u64 *mh, const u64 *ml)
{
#define a0 (*(((u32 *)alo)+INDEX_LOW))
#define a1 (*(((u32 *)alo)+INDEX_HIGH))
#define a2 (*(((u32 *)ahi)+INDEX_LOW))
#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
#define k0 (*(((u32 *)kl)+INDEX_LOW))
#define k1 (*(((u32 *)kl)+INDEX_HIGH))
#define k2 (*(((u32 *)kh)+INDEX_LOW))
#define k3 (*(((u32 *)kh)+INDEX_HIGH))
u64 p, q, t;
u32 t2;
p = MUL32(a3, k3);
p += p;
p += *(u64 *)mh;
p += MUL32(a0, k2);
p += MUL32(a1, k1);
p += MUL32(a2, k0);
t = (u32)(p);
p >>= 32;
p += MUL32(a0, k3);
p += MUL32(a1, k2);
p += MUL32(a2, k1);
p += MUL32(a3, k0);
t |= ((u64)((u32)p & 0x7fffffff)) << 32;
p >>= 31;
p += (u64)(((u32 *)ml)[INDEX_LOW]);
p += MUL32(a0, k0);
q = MUL32(a1, k3);
q += MUL32(a2, k2);
q += MUL32(a3, k1);
q += q;
p += q;
t2 = (u32)(p);
p >>= 32;
p += (u64)(((u32 *)ml)[INDEX_HIGH]);
p += MUL32(a0, k1);
p += MUL32(a1, k0);
q = MUL32(a2, k3);
q += MUL32(a3, k2);
q += q;
p += q;
*(u64 *)(alo) = (p << 32) | t2;
p >>= 32;
*(u64 *)(ahi) = p + t;
#undef a0
#undef a1
#undef a2
#undef a3
#undef k0
#undef k1
#undef k2
#undef k3
}
#define poly_step(ah, al, kh, kl, mh, ml) \
poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
#endif /* end of specialized NH and poly definitions */
/* At least nh_16 is defined. Defined others as needed here */
#ifndef nh_16_2
#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
do { \
nh_16(mp, kp, nw, rh, rl); \
nh_16(mp, ((kp)+2), nw, rh2, rl2); \
} while (0)
#endif
#ifndef nh_vmac_nhbytes
#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
nh_16(mp, kp, nw, rh, rl)
#endif
#ifndef nh_vmac_nhbytes_2
#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
do { \
nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
} while (0)
#endif
static void vhash_abort(struct vmac_ctx *ctx)
{
ctx->polytmp[0] = ctx->polykey[0] ;
ctx->polytmp[1] = ctx->polykey[1] ;
ctx->first_block_processed = 0;
}
static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
{
u64 rh, rl, t, z = 0;
/* fully reduce (p1,p2)+(len,0) mod p127 */
t = p1 >> 63;
p1 &= m63;
ADD128(p1, p2, len, t);
/* At this point, (p1,p2) is at most 2^127+(len<<64) */
t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
ADD128(p1, p2, z, t);
p1 &= m63;
/* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
t = p1 + (p2 >> 32);
t += (t >> 32);
t += (u32)t > 0xfffffffeu;
p1 += (t >> 32);
p2 += (p1 << 32);
/* compute (p1+k1)%p64 and (p2+k2)%p64 */
p1 += k1;
p1 += (0 - (p1 < k1)) & 257;
p2 += k2;
p2 += (0 - (p2 < k2)) & 257;
/* compute (p1+k1)*(p2+k2)%p64 */
MUL64(rh, rl, p1, p2);
t = rh >> 56;
ADD128(t, rl, z, rh);
rh <<= 8;
ADD128(t, rl, z, rh);
t += t << 8;
rl += t;
rl += (0 - (rl < t)) & 257;
rl += (0 - (rl > p64-1)) & 257;
return rl;
}
static void vhash_update(const unsigned char *m,
unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
struct vmac_ctx *ctx)
{
u64 rh, rl, *mptr;
const u64 *kptr = (u64 *)ctx->nhkey;
int i;
u64 ch, cl;
u64 pkh = ctx->polykey[0];
u64 pkl = ctx->polykey[1];
mptr = (u64 *)m;
i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
ch = ctx->polytmp[0];
cl = ctx->polytmp[1];
if (!ctx->first_block_processed) {
ctx->first_block_processed = 1;
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
ADD128(ch, cl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
i--;
}
while (i--) {
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
poly_step(ch, cl, pkh, pkl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
}
ctx->polytmp[0] = ch;
ctx->polytmp[1] = cl;
}
static u64 vhash(unsigned char m[], unsigned int mbytes,
u64 *tagl, struct vmac_ctx *ctx)
{
u64 rh, rl, *mptr;
const u64 *kptr = (u64 *)ctx->nhkey;
int i, remaining;
u64 ch, cl;
u64 pkh = ctx->polykey[0];
u64 pkl = ctx->polykey[1];
mptr = (u64 *)m;
i = mbytes / VMAC_NHBYTES;
remaining = mbytes % VMAC_NHBYTES;
if (ctx->first_block_processed) {
ch = ctx->polytmp[0];
cl = ctx->polytmp[1];
} else if (i) {
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
ch &= m62;
ADD128(ch, cl, pkh, pkl);
mptr += (VMAC_NHBYTES/sizeof(u64));
i--;
} else if (remaining) {
nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
ch &= m62;
ADD128(ch, cl, pkh, pkl);
mptr += (VMAC_NHBYTES/sizeof(u64));
goto do_l3;
} else {/* Empty String */
ch = pkh; cl = pkl;
goto do_l3;
}
while (i--) {
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
poly_step(ch, cl, pkh, pkl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
}
if (remaining) {
nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
rh &= m62;
poly_step(ch, cl, pkh, pkl, rh, rl);
}
do_l3:
vhash_abort(ctx);
remaining *= 8;
return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
}
static u64 vmac(unsigned char m[], unsigned int mbytes,
unsigned char n[16], u64 *tagl,
struct vmac_ctx_t *ctx)
{
u64 *in_n, *out_p;
u64 p, h;
int i;
in_n = ctx->__vmac_ctx.cached_nonce;
out_p = ctx->__vmac_ctx.cached_aes;
i = n[15] & 1;
if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
in_n[0] = *(u64 *)(n);
in_n[1] = *(u64 *)(n+8);
((unsigned char *)in_n)[15] &= 0xFE;
crypto_cipher_encrypt_one(ctx->child,
(unsigned char *)out_p, (unsigned char *)in_n);
((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
}
p = be64_to_cpup(out_p + i);
h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
return le64_to_cpu(p + h);
}
static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
{
u64 in[2] = {0}, out[2];
unsigned i;
int err = 0;
err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
if (err)
return err;
/* Fill nh key */
((unsigned char *)in)[0] = 0x80;
for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
crypto_cipher_encrypt_one(ctx->child,
(unsigned char *)out, (unsigned char *)in);
ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
((unsigned char *)in)[15] += 1;
}
/* Fill poly key */
((unsigned char *)in)[0] = 0xC0;
in[1] = 0;
for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
crypto_cipher_encrypt_one(ctx->child,
(unsigned char *)out, (unsigned char *)in);
ctx->__vmac_ctx.polytmp[i] =
ctx->__vmac_ctx.polykey[i] =
be64_to_cpup(out) & mpoly;
ctx->__vmac_ctx.polytmp[i+1] =
ctx->__vmac_ctx.polykey[i+1] =
be64_to_cpup(out+1) & mpoly;
((unsigned char *)in)[15] += 1;
}
/* Fill ip key */
((unsigned char *)in)[0] = 0xE0;
in[1] = 0;
for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
do {
crypto_cipher_encrypt_one(ctx->child,
(unsigned char *)out, (unsigned char *)in);
ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
((unsigned char *)in)[15] += 1;
} while (ctx->__vmac_ctx.l3key[i] >= p64
|| ctx->__vmac_ctx.l3key[i+1] >= p64);
}
/* Invalidate nonce/aes cache and reset other elements */
ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
ctx->__vmac_ctx.first_block_processed = 0;
return err;
}
static int vmac_setkey(struct crypto_shash *parent,
const u8 *key, unsigned int keylen)
{
struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
if (keylen != VMAC_KEY_LEN) {
crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return vmac_set_key((u8 *)key, ctx);
}
static int vmac_init(struct shash_desc *pdesc)
{
return 0;
}
static int vmac_update(struct shash_desc *pdesc, const u8 *p,
unsigned int len)
{
struct crypto_shash *parent = pdesc->tfm;
struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
vhash_update(p, len, &ctx->__vmac_ctx);
return 0;
}
static int vmac_final(struct shash_desc *pdesc, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
vmac_t mac;
u8 nonce[16] = {};
mac = vmac(NULL, 0, nonce, NULL, ctx);
memcpy(out, &mac, sizeof(vmac_t));
memset(&mac, 0, sizeof(vmac_t));
memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
return 0;
}
static int vmac_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void vmac_exit_tfm(struct crypto_tfm *tfm)
{
struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct shash_instance *inst;
struct crypto_alg *alg;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
if (err)
return err;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return PTR_ERR(alg);
inst = shash_alloc_instance("vmac", alg);
err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
err = crypto_init_spawn(shash_instance_ctx(inst), alg,
shash_crypto_instance(inst),
CRYPTO_ALG_TYPE_MASK);
if (err)
goto out_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
inst->alg.digestsize = sizeof(vmac_t);
inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
inst->alg.base.cra_init = vmac_init_tfm;
inst->alg.base.cra_exit = vmac_exit_tfm;
inst->alg.init = vmac_init;
inst->alg.update = vmac_update;
inst->alg.final = vmac_final;
inst->alg.setkey = vmac_setkey;
err = shash_register_instance(tmpl, inst);
if (err) {
out_free_inst:
shash_free_instance(shash_crypto_instance(inst));
}
out_put_alg:
crypto_mod_put(alg);
return err;
}
static struct crypto_template vmac_tmpl = {
.name = "vmac",
.create = vmac_create,
.free = shash_free_instance,
.module = THIS_MODULE,
};
static int __init vmac_module_init(void)
{
return crypto_register_template(&vmac_tmpl);
}
static void __exit vmac_module_exit(void)
{
crypto_unregister_template(&vmac_tmpl);
}
module_init(vmac_module_init);
module_exit(vmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VMAC hash algorithm");
| gpl-2.0 |
nerdyblonde/N80XX_Kernel | drivers/infiniband/ulp/iser/iser_initiator.c | 1570 | 16280 | /*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/kfifo.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include "iscsi_iser.h"
/* Register user buffer memory and initialize passive rdma
* dto descriptor. Total data size is stored in
* iser_task->data[ISER_DIR_IN].data_len
*/
static int iser_prepare_read_cmd(struct iscsi_task *task,
unsigned int edtl)
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_regd_buf *regd_buf;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
err = iser_dma_map_task_data(iser_task,
buf_in,
ISER_DIR_IN,
DMA_FROM_DEVICE);
if (err)
return err;
if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
iser_err("Total data length: %ld, less than EDTL: "
"%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
iser_task->data[ISER_DIR_IN].data_len, edtl,
task->itt, iser_task->iser_conn);
return -EINVAL;
}
err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
if (err) {
iser_err("Failed to set up Data-IN RDMA\n");
return err;
}
regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
hdr->flags |= ISER_RSV;
hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
hdr->read_va = cpu_to_be64(regd_buf->reg.va);
iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
task->itt, regd_buf->reg.rkey,
(unsigned long long)regd_buf->reg.va);
return 0;
}
/* Register user buffer memory and initialize passive rdma
* dto descriptor. Total data size is stored in
* task->data[ISER_DIR_OUT].data_len
*/
static int
iser_prepare_write_cmd(struct iscsi_task *task,
unsigned int imm_sz,
unsigned int unsol_sz,
unsigned int edtl)
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_regd_buf *regd_buf;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
err = iser_dma_map_task_data(iser_task,
buf_out,
ISER_DIR_OUT,
DMA_TO_DEVICE);
if (err)
return err;
if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
iser_err("Total data length: %ld, less than EDTL: %d, "
"in WRITE cmd BHS itt: %d, conn: 0x%p\n",
iser_task->data[ISER_DIR_OUT].data_len,
edtl, task->itt, task->conn);
return -EINVAL;
}
err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
if (err != 0) {
iser_err("Failed to register write cmd RDMA mem\n");
return err;
}
regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
if (unsol_sz < edtl) {
hdr->flags |= ISER_WSV;
hdr->write_stag = cpu_to_be32(regd_buf->reg.rkey);
hdr->write_va = cpu_to_be64(regd_buf->reg.va + unsol_sz);
iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
"VA:%#llX + unsol:%d\n",
task->itt, regd_buf->reg.rkey,
(unsigned long long)regd_buf->reg.va, unsol_sz);
}
if (imm_sz > 0) {
iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
task->itt, imm_sz);
tx_dsg->addr = regd_buf->reg.va;
tx_dsg->length = imm_sz;
tx_dsg->lkey = regd_buf->reg.lkey;
iser_task->desc.num_sge = 2;
}
return 0;
}
/* creates a new tx descriptor and adds header regd buffer */
static void iser_create_send_desc(struct iser_conn *ib_conn,
struct iser_tx_desc *tx_desc)
{
struct iser_device *device = ib_conn->device;
ib_dma_sync_single_for_cpu(device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
tx_desc->iser_header.flags = ISER_VER;
tx_desc->num_sge = 1;
if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
tx_desc->tx_sg[0].lkey = device->mr->lkey;
iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc);
}
}
int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
{
int i, j;
u64 dma_addr;
struct iser_rx_desc *rx_desc;
struct ib_sge *rx_sg;
struct iser_device *device = ib_conn->device;
ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS *
sizeof(struct iser_rx_desc), GFP_KERNEL);
if (!ib_conn->rx_descs)
goto rx_desc_alloc_fail;
rx_desc = ib_conn->rx_descs;
for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++) {
dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(device->ib_device, dma_addr))
goto rx_desc_dma_map_failed;
rx_desc->dma_addr = dma_addr;
rx_sg = &rx_desc->rx_sg;
rx_sg->addr = rx_desc->dma_addr;
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
rx_sg->lkey = device->mr->lkey;
}
ib_conn->rx_desc_head = 0;
return 0;
rx_desc_dma_map_failed:
rx_desc = ib_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++)
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
kfree(ib_conn->rx_descs);
ib_conn->rx_descs = NULL;
rx_desc_alloc_fail:
iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM;
}
void iser_free_rx_descriptors(struct iser_conn *ib_conn)
{
int i;
struct iser_rx_desc *rx_desc;
struct iser_device *device = ib_conn->device;
if (ib_conn->login_buf) {
ib_dma_unmap_single(device->ib_device, ib_conn->login_dma,
ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
kfree(ib_conn->login_buf);
}
if (!ib_conn->rx_descs)
return;
rx_desc = ib_conn->rx_descs;
for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
kfree(ib_conn->rx_descs);
}
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
/* check if this is the last login - going to full feature phase */
if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
return 0;
/*
* Check that there is one posted recv buffer (for the last login
* response) and no posted send buffers left - they must have been
* consumed during previous login phases.
*/
WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1);
WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
/* Initial post receive buffers */
if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX))
return -ENOMEM;
return 0;
}
/**
* iser_send_command - send command PDU
*/
int iser_send_command(struct iscsi_conn *conn,
struct iscsi_task *task)
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
unsigned long edtl;
int err;
struct iser_data_buf *data_buf;
struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr;
struct scsi_cmnd *sc = task->sc;
struct iser_tx_desc *tx_desc = &iser_task->desc;
edtl = ntohl(hdr->data_length);
/* build the tx desc regd header and add it to the tx desc dto */
tx_desc->type = ISCSI_TX_SCSI_COMMAND;
iser_create_send_desc(iser_conn->ib_conn, tx_desc);
if (hdr->flags & ISCSI_FLAG_CMD_READ)
data_buf = &iser_task->data[ISER_DIR_IN];
else
data_buf = &iser_task->data[ISER_DIR_OUT];
if (scsi_sg_count(sc)) { /* using a scatter list */
data_buf->buf = scsi_sglist(sc);
data_buf->size = scsi_sg_count(sc);
}
data_buf->data_len = scsi_bufflen(sc);
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
err = iser_prepare_read_cmd(task, edtl);
if (err)
goto send_command_error;
}
if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
err = iser_prepare_write_cmd(task,
task->imm_count,
task->imm_count +
task->unsol_r2t.data_length,
edtl);
if (err)
goto send_command_error;
}
iser_task->status = ISER_TASK_STATUS_STARTED;
err = iser_post_send(iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
send_command_error:
iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
return err;
}
/**
* iser_send_data_out - send data out PDU
*/
int iser_send_data_out(struct iscsi_conn *conn,
struct iscsi_task *task,
struct iscsi_data *hdr)
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *tx_desc = NULL;
struct iser_regd_buf *regd_buf;
unsigned long buf_offset;
unsigned long data_seg_len;
uint32_t itt;
int err = 0;
struct ib_sge *tx_dsg;
itt = (__force uint32_t)hdr->itt;
data_seg_len = ntoh24(hdr->dlength);
buf_offset = ntohl(hdr->offset);
iser_dbg("%s itt %d dseg_len %d offset %d\n",
__func__,(int)itt,(int)data_seg_len,(int)buf_offset);
tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
if (tx_desc == NULL) {
iser_err("Failed to alloc desc for post dataout\n");
return -ENOMEM;
}
tx_desc->type = ISCSI_TX_DATAOUT;
tx_desc->iser_header.flags = ISER_VER;
memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
/* build the tx desc */
iser_initialize_task_headers(task, tx_desc);
regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
tx_dsg = &tx_desc->tx_sg[1];
tx_dsg->addr = regd_buf->reg.va + buf_offset;
tx_dsg->length = data_seg_len;
tx_dsg->lkey = regd_buf->reg.lkey;
tx_desc->num_sge = 2;
if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
iser_err("Offset:%ld & DSL:%ld in Data-Out "
"inconsistent with total len:%ld, itt:%d\n",
buf_offset, data_seg_len,
iser_task->data[ISER_DIR_OUT].data_len, itt);
err = -EINVAL;
goto send_data_out_error;
}
iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
itt, buf_offset, data_seg_len);
err = iser_post_send(iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
send_data_out_error:
kmem_cache_free(ig.desc_cache, tx_desc);
iser_err("conn %p failed err %d\n",conn, err);
return err;
}
int iser_send_control(struct iscsi_conn *conn,
struct iscsi_task *task)
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *mdesc = &iser_task->desc;
unsigned long data_seg_len;
int err = 0;
struct iser_device *device;
/* build the tx desc regd header and add it to the tx desc dto */
mdesc->type = ISCSI_TX_CONTROL;
iser_create_send_desc(iser_conn->ib_conn, mdesc);
device = iser_conn->ib_conn->device;
data_seg_len = ntoh24(task->hdr->dlength);
if (data_seg_len > 0) {
struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
if (task != conn->login_task) {
iser_err("data present on non login task!!!\n");
goto send_control_error;
}
memcpy(iser_conn->ib_conn->login_buf, task->data,
task->data_count);
tx_dsg->addr = iser_conn->ib_conn->login_dma;
tx_dsg->length = data_seg_len;
tx_dsg->lkey = device->mr->lkey;
mdesc->num_sge = 2;
}
if (task == conn->login_task) {
err = iser_post_recvl(iser_conn->ib_conn);
if (err)
goto send_control_error;
err = iser_post_rx_bufs(conn, task->hdr);
if (err)
goto send_control_error;
}
err = iser_post_send(iser_conn->ib_conn, mdesc);
if (!err)
return 0;
send_control_error:
iser_err("conn %p failed err %d\n",conn, err);
return err;
}
/**
* iser_rcv_dto_completion - recv DTO completion
*/
void iser_rcv_completion(struct iser_rx_desc *rx_desc,
unsigned long rx_xfer_len,
struct iser_conn *ib_conn)
{
struct iscsi_iser_conn *conn = ib_conn->iser_conn;
struct iscsi_hdr *hdr;
u64 rx_dma;
int rx_buflen, outstanding, count, err;
/* differentiate between login to all other PDUs */
if ((char *)rx_desc == ib_conn->login_buf) {
rx_dma = ib_conn->login_dma;
rx_buflen = ISER_RX_LOGIN_SIZE;
} else {
rx_dma = rx_desc->dma_addr;
rx_buflen = ISER_RX_PAYLOAD_SIZE;
}
ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
rx_buflen, DMA_FROM_DEVICE);
hdr = &rx_desc->iscsi_header;
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
iscsi_iser_recv(conn->iscsi_conn, hdr,
rx_desc->data, rx_xfer_len - ISER_HEADERS_LEN);
ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
rx_buflen, DMA_FROM_DEVICE);
/* decrementing conn->post_recv_buf_count only --after-- freeing the *
* task eliminates the need to worry on tasks which are completed in *
* parallel to the execution of iser_conn_term. So the code that waits *
* for the posted rx bufs refcount to become zero handles everything */
conn->ib_conn->post_recv_buf_count--;
if (rx_dma == ib_conn->login_dma)
return;
outstanding = ib_conn->post_recv_buf_count;
if (outstanding + ISER_MIN_POSTED_RX <= ISER_QP_MAX_RECV_DTOS) {
count = min(ISER_QP_MAX_RECV_DTOS - outstanding,
ISER_MIN_POSTED_RX);
err = iser_post_recvm(ib_conn, count);
if (err)
iser_err("posting %d rx bufs err %d\n", count, err);
}
}
void iser_snd_completion(struct iser_tx_desc *tx_desc,
struct iser_conn *ib_conn)
{
struct iscsi_task *task;
struct iser_device *device = ib_conn->device;
if (tx_desc->type == ISCSI_TX_DATAOUT) {
ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
kmem_cache_free(ig.desc_cache, tx_desc);
}
atomic_dec(&ib_conn->post_send_buf_count);
if (tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */
task = (void *) ((long)(void *)tx_desc -
sizeof(struct iscsi_task));
if (task->hdr->itt == RESERVED_ITT)
iscsi_put_task(task);
}
}
void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
{
iser_task->status = ISER_TASK_STATUS_INIT;
iser_task->dir[ISER_DIR_IN] = 0;
iser_task->dir[ISER_DIR_OUT] = 0;
iser_task->data[ISER_DIR_IN].data_len = 0;
iser_task->data[ISER_DIR_OUT].data_len = 0;
memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
sizeof(struct iser_regd_buf));
memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
sizeof(struct iser_regd_buf));
}
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
int is_rdma_aligned = 1;
struct iser_regd_buf *regd;
/* if we were reading, copy back to unaligned sglist,
* anyway dma_unmap and free the copy
*/
if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
is_rdma_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
}
if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
is_rdma_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
}
if (iser_task->dir[ISER_DIR_IN]) {
regd = &iser_task->rdma_regd[ISER_DIR_IN];
if (regd->reg.is_fmr)
iser_unreg_mem(®d->reg);
}
if (iser_task->dir[ISER_DIR_OUT]) {
regd = &iser_task->rdma_regd[ISER_DIR_OUT];
if (regd->reg.is_fmr)
iser_unreg_mem(®d->reg);
}
/* if the data was unaligned, it was already unmapped and then copied */
if (is_rdma_aligned)
iser_dma_unmap_task_data(iser_task);
}
| gpl-2.0 |
ManhIT-CMB/SpaceX-Kernel-Exynos5433 | drivers/rtc/rtc-msm6242.c | 2082 | 7209 | /*
* Oki MSM6242 RTC Driver
*
* Copyright 2009 Geert Uytterhoeven
*
* Based on the A2000 TOD code in arch/m68k/amiga/config.c
* Copyright (C) 1993 Hamish Macdonald
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/slab.h>
enum {
MSM6242_SECOND1 = 0x0, /* 1-second digit register */
MSM6242_SECOND10 = 0x1, /* 10-second digit register */
MSM6242_MINUTE1 = 0x2, /* 1-minute digit register */
MSM6242_MINUTE10 = 0x3, /* 10-minute digit register */
MSM6242_HOUR1 = 0x4, /* 1-hour digit register */
MSM6242_HOUR10 = 0x5, /* PM/AM, 10-hour digit register */
MSM6242_DAY1 = 0x6, /* 1-day digit register */
MSM6242_DAY10 = 0x7, /* 10-day digit register */
MSM6242_MONTH1 = 0x8, /* 1-month digit register */
MSM6242_MONTH10 = 0x9, /* 10-month digit register */
MSM6242_YEAR1 = 0xa, /* 1-year digit register */
MSM6242_YEAR10 = 0xb, /* 10-year digit register */
MSM6242_WEEK = 0xc, /* Week register */
MSM6242_CD = 0xd, /* Control Register D */
MSM6242_CE = 0xe, /* Control Register E */
MSM6242_CF = 0xf, /* Control Register F */
};
#define MSM6242_HOUR10_AM (0 << 2)
#define MSM6242_HOUR10_PM (1 << 2)
#define MSM6242_HOUR10_HR_MASK (3 << 0)
#define MSM6242_WEEK_SUNDAY 0
#define MSM6242_WEEK_MONDAY 1
#define MSM6242_WEEK_TUESDAY 2
#define MSM6242_WEEK_WEDNESDAY 3
#define MSM6242_WEEK_THURSDAY 4
#define MSM6242_WEEK_FRIDAY 5
#define MSM6242_WEEK_SATURDAY 6
#define MSM6242_CD_30_S_ADJ (1 << 3) /* 30-second adjustment */
#define MSM6242_CD_IRQ_FLAG (1 << 2)
#define MSM6242_CD_BUSY (1 << 1)
#define MSM6242_CD_HOLD (1 << 0)
#define MSM6242_CE_T_MASK (3 << 2)
#define MSM6242_CE_T_64HZ (0 << 2) /* period 1/64 second */
#define MSM6242_CE_T_1HZ (1 << 2) /* period 1 second */
#define MSM6242_CE_T_1MINUTE (2 << 2) /* period 1 minute */
#define MSM6242_CE_T_1HOUR (3 << 2) /* period 1 hour */
#define MSM6242_CE_ITRPT_STND (1 << 1)
#define MSM6242_CE_MASK (1 << 0) /* STD.P output control */
#define MSM6242_CF_TEST (1 << 3)
#define MSM6242_CF_12H (0 << 2)
#define MSM6242_CF_24H (1 << 2)
#define MSM6242_CF_STOP (1 << 1)
#define MSM6242_CF_REST (1 << 0) /* reset */
struct msm6242_priv {
u32 __iomem *regs;
struct rtc_device *rtc;
};
static inline unsigned int msm6242_read(struct msm6242_priv *priv,
unsigned int reg)
{
return __raw_readl(&priv->regs[reg]) & 0xf;
}
static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val,
unsigned int reg)
{
__raw_writel(val, &priv->regs[reg]);
}
static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val,
unsigned int reg)
{
msm6242_write(priv, msm6242_read(priv, reg) | val, reg);
}
static inline void msm6242_clear(struct msm6242_priv *priv, unsigned int val,
unsigned int reg)
{
msm6242_write(priv, msm6242_read(priv, reg) & ~val, reg);
}
static void msm6242_lock(struct msm6242_priv *priv)
{
int cnt = 5;
msm6242_set(priv, MSM6242_CD_HOLD, MSM6242_CD);
while ((msm6242_read(priv, MSM6242_CD) & MSM6242_CD_BUSY) && cnt) {
msm6242_clear(priv, MSM6242_CD_HOLD, MSM6242_CD);
udelay(70);
msm6242_set(priv, MSM6242_CD_HOLD, MSM6242_CD);
cnt--;
}
if (!cnt)
pr_warning("msm6242: timed out waiting for RTC (0x%x)\n",
msm6242_read(priv, MSM6242_CD));
}
static void msm6242_unlock(struct msm6242_priv *priv)
{
msm6242_clear(priv, MSM6242_CD_HOLD, MSM6242_CD);
}
static int msm6242_read_time(struct device *dev, struct rtc_time *tm)
{
struct msm6242_priv *priv = dev_get_drvdata(dev);
msm6242_lock(priv);
tm->tm_sec = msm6242_read(priv, MSM6242_SECOND10) * 10 +
msm6242_read(priv, MSM6242_SECOND1);
tm->tm_min = msm6242_read(priv, MSM6242_MINUTE10) * 10 +
msm6242_read(priv, MSM6242_MINUTE1);
tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10 & 3)) * 10 +
msm6242_read(priv, MSM6242_HOUR1);
tm->tm_mday = msm6242_read(priv, MSM6242_DAY10) * 10 +
msm6242_read(priv, MSM6242_DAY1);
tm->tm_wday = msm6242_read(priv, MSM6242_WEEK);
tm->tm_mon = msm6242_read(priv, MSM6242_MONTH10) * 10 +
msm6242_read(priv, MSM6242_MONTH1) - 1;
tm->tm_year = msm6242_read(priv, MSM6242_YEAR10) * 10 +
msm6242_read(priv, MSM6242_YEAR1);
if (tm->tm_year <= 69)
tm->tm_year += 100;
if (!(msm6242_read(priv, MSM6242_CF) & MSM6242_CF_24H)) {
unsigned int pm = msm6242_read(priv, MSM6242_HOUR10) &
MSM6242_HOUR10_PM;
if (!pm && tm->tm_hour == 12)
tm->tm_hour = 0;
else if (pm && tm->tm_hour != 12)
tm->tm_hour += 12;
}
msm6242_unlock(priv);
return rtc_valid_tm(tm);
}
static int msm6242_set_time(struct device *dev, struct rtc_time *tm)
{
struct msm6242_priv *priv = dev_get_drvdata(dev);
msm6242_lock(priv);
msm6242_write(priv, tm->tm_sec / 10, MSM6242_SECOND10);
msm6242_write(priv, tm->tm_sec % 10, MSM6242_SECOND1);
msm6242_write(priv, tm->tm_min / 10, MSM6242_MINUTE10);
msm6242_write(priv, tm->tm_min % 10, MSM6242_MINUTE1);
if (msm6242_read(priv, MSM6242_CF) & MSM6242_CF_24H)
msm6242_write(priv, tm->tm_hour / 10, MSM6242_HOUR10);
else if (tm->tm_hour >= 12)
msm6242_write(priv, MSM6242_HOUR10_PM + (tm->tm_hour - 12) / 10,
MSM6242_HOUR10);
else
msm6242_write(priv, tm->tm_hour / 10, MSM6242_HOUR10);
msm6242_write(priv, tm->tm_hour % 10, MSM6242_HOUR1);
msm6242_write(priv, tm->tm_mday / 10, MSM6242_DAY10);
msm6242_write(priv, tm->tm_mday % 10, MSM6242_DAY1);
if (tm->tm_wday != -1)
msm6242_write(priv, tm->tm_wday, MSM6242_WEEK);
msm6242_write(priv, (tm->tm_mon + 1) / 10, MSM6242_MONTH10);
msm6242_write(priv, (tm->tm_mon + 1) % 10, MSM6242_MONTH1);
if (tm->tm_year >= 100)
tm->tm_year -= 100;
msm6242_write(priv, tm->tm_year / 10, MSM6242_YEAR10);
msm6242_write(priv, tm->tm_year % 10, MSM6242_YEAR1);
msm6242_unlock(priv);
return 0;
}
static const struct rtc_class_ops msm6242_rtc_ops = {
.read_time = msm6242_read_time,
.set_time = msm6242_set_time,
};
static int __init msm6242_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct msm6242_priv *priv;
struct rtc_device *rtc;
int error;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!priv->regs)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
rtc = devm_rtc_device_register(&pdev->dev, "rtc-msm6242",
&msm6242_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
error = PTR_ERR(rtc);
goto out_unmap;
}
priv->rtc = rtc;
return 0;
out_unmap:
platform_set_drvdata(pdev, NULL);
return error;
}
static int __exit msm6242_rtc_remove(struct platform_device *pdev)
{
return 0;
}
static struct platform_driver msm6242_rtc_driver = {
.driver = {
.name = "rtc-msm6242",
.owner = THIS_MODULE,
},
.remove = __exit_p(msm6242_rtc_remove),
};
module_platform_driver_probe(msm6242_rtc_driver, msm6242_rtc_probe);
MODULE_AUTHOR("Geert Uytterhoeven <geert@linux-m68k.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Oki MSM6242 RTC driver");
MODULE_ALIAS("platform:rtc-msm6242");
| gpl-2.0 |
CODEG3EK/Odin | drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c | 2082 | 108209 | /*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nvc0.h"
static void
nve0_grctx_generate_icmd(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x400208, 0x80000000);
nv_icmd(priv, 0x001000, 0x00000004);
nv_icmd(priv, 0x000039, 0x00000000);
nv_icmd(priv, 0x00003a, 0x00000000);
nv_icmd(priv, 0x00003b, 0x00000000);
nv_icmd(priv, 0x0000a9, 0x0000ffff);
nv_icmd(priv, 0x000038, 0x0fac6881);
nv_icmd(priv, 0x00003d, 0x00000001);
nv_icmd(priv, 0x0000e8, 0x00000400);
nv_icmd(priv, 0x0000e9, 0x00000400);
nv_icmd(priv, 0x0000ea, 0x00000400);
nv_icmd(priv, 0x0000eb, 0x00000400);
nv_icmd(priv, 0x0000ec, 0x00000400);
nv_icmd(priv, 0x0000ed, 0x00000400);
nv_icmd(priv, 0x0000ee, 0x00000400);
nv_icmd(priv, 0x0000ef, 0x00000400);
nv_icmd(priv, 0x000078, 0x00000300);
nv_icmd(priv, 0x000079, 0x00000300);
nv_icmd(priv, 0x00007a, 0x00000300);
nv_icmd(priv, 0x00007b, 0x00000300);
nv_icmd(priv, 0x00007c, 0x00000300);
nv_icmd(priv, 0x00007d, 0x00000300);
nv_icmd(priv, 0x00007e, 0x00000300);
nv_icmd(priv, 0x00007f, 0x00000300);
nv_icmd(priv, 0x000050, 0x00000011);
nv_icmd(priv, 0x000058, 0x00000008);
nv_icmd(priv, 0x000059, 0x00000008);
nv_icmd(priv, 0x00005a, 0x00000008);
nv_icmd(priv, 0x00005b, 0x00000008);
nv_icmd(priv, 0x00005c, 0x00000008);
nv_icmd(priv, 0x00005d, 0x00000008);
nv_icmd(priv, 0x00005e, 0x00000008);
nv_icmd(priv, 0x00005f, 0x00000008);
nv_icmd(priv, 0x000208, 0x00000001);
nv_icmd(priv, 0x000209, 0x00000001);
nv_icmd(priv, 0x00020a, 0x00000001);
nv_icmd(priv, 0x00020b, 0x00000001);
nv_icmd(priv, 0x00020c, 0x00000001);
nv_icmd(priv, 0x00020d, 0x00000001);
nv_icmd(priv, 0x00020e, 0x00000001);
nv_icmd(priv, 0x00020f, 0x00000001);
nv_icmd(priv, 0x000081, 0x00000001);
nv_icmd(priv, 0x000085, 0x00000004);
nv_icmd(priv, 0x000088, 0x00000400);
nv_icmd(priv, 0x000090, 0x00000300);
nv_icmd(priv, 0x000098, 0x00001001);
nv_icmd(priv, 0x0000e3, 0x00000001);
nv_icmd(priv, 0x0000da, 0x00000001);
nv_icmd(priv, 0x0000f8, 0x00000003);
nv_icmd(priv, 0x0000fa, 0x00000001);
nv_icmd(priv, 0x00009f, 0x0000ffff);
nv_icmd(priv, 0x0000a0, 0x0000ffff);
nv_icmd(priv, 0x0000a1, 0x0000ffff);
nv_icmd(priv, 0x0000a2, 0x0000ffff);
nv_icmd(priv, 0x0000b1, 0x00000001);
nv_icmd(priv, 0x0000ad, 0x0000013e);
nv_icmd(priv, 0x0000e1, 0x00000010);
nv_icmd(priv, 0x000290, 0x00000000);
nv_icmd(priv, 0x000291, 0x00000000);
nv_icmd(priv, 0x000292, 0x00000000);
nv_icmd(priv, 0x000293, 0x00000000);
nv_icmd(priv, 0x000294, 0x00000000);
nv_icmd(priv, 0x000295, 0x00000000);
nv_icmd(priv, 0x000296, 0x00000000);
nv_icmd(priv, 0x000297, 0x00000000);
nv_icmd(priv, 0x000298, 0x00000000);
nv_icmd(priv, 0x000299, 0x00000000);
nv_icmd(priv, 0x00029a, 0x00000000);
nv_icmd(priv, 0x00029b, 0x00000000);
nv_icmd(priv, 0x00029c, 0x00000000);
nv_icmd(priv, 0x00029d, 0x00000000);
nv_icmd(priv, 0x00029e, 0x00000000);
nv_icmd(priv, 0x00029f, 0x00000000);
nv_icmd(priv, 0x0003b0, 0x00000000);
nv_icmd(priv, 0x0003b1, 0x00000000);
nv_icmd(priv, 0x0003b2, 0x00000000);
nv_icmd(priv, 0x0003b3, 0x00000000);
nv_icmd(priv, 0x0003b4, 0x00000000);
nv_icmd(priv, 0x0003b5, 0x00000000);
nv_icmd(priv, 0x0003b6, 0x00000000);
nv_icmd(priv, 0x0003b7, 0x00000000);
nv_icmd(priv, 0x0003b8, 0x00000000);
nv_icmd(priv, 0x0003b9, 0x00000000);
nv_icmd(priv, 0x0003ba, 0x00000000);
nv_icmd(priv, 0x0003bb, 0x00000000);
nv_icmd(priv, 0x0003bc, 0x00000000);
nv_icmd(priv, 0x0003bd, 0x00000000);
nv_icmd(priv, 0x0003be, 0x00000000);
nv_icmd(priv, 0x0003bf, 0x00000000);
nv_icmd(priv, 0x0002a0, 0x00000000);
nv_icmd(priv, 0x0002a1, 0x00000000);
nv_icmd(priv, 0x0002a2, 0x00000000);
nv_icmd(priv, 0x0002a3, 0x00000000);
nv_icmd(priv, 0x0002a4, 0x00000000);
nv_icmd(priv, 0x0002a5, 0x00000000);
nv_icmd(priv, 0x0002a6, 0x00000000);
nv_icmd(priv, 0x0002a7, 0x00000000);
nv_icmd(priv, 0x0002a8, 0x00000000);
nv_icmd(priv, 0x0002a9, 0x00000000);
nv_icmd(priv, 0x0002aa, 0x00000000);
nv_icmd(priv, 0x0002ab, 0x00000000);
nv_icmd(priv, 0x0002ac, 0x00000000);
nv_icmd(priv, 0x0002ad, 0x00000000);
nv_icmd(priv, 0x0002ae, 0x00000000);
nv_icmd(priv, 0x0002af, 0x00000000);
nv_icmd(priv, 0x000420, 0x00000000);
nv_icmd(priv, 0x000421, 0x00000000);
nv_icmd(priv, 0x000422, 0x00000000);
nv_icmd(priv, 0x000423, 0x00000000);
nv_icmd(priv, 0x000424, 0x00000000);
nv_icmd(priv, 0x000425, 0x00000000);
nv_icmd(priv, 0x000426, 0x00000000);
nv_icmd(priv, 0x000427, 0x00000000);
nv_icmd(priv, 0x000428, 0x00000000);
nv_icmd(priv, 0x000429, 0x00000000);
nv_icmd(priv, 0x00042a, 0x00000000);
nv_icmd(priv, 0x00042b, 0x00000000);
nv_icmd(priv, 0x00042c, 0x00000000);
nv_icmd(priv, 0x00042d, 0x00000000);
nv_icmd(priv, 0x00042e, 0x00000000);
nv_icmd(priv, 0x00042f, 0x00000000);
nv_icmd(priv, 0x0002b0, 0x00000000);
nv_icmd(priv, 0x0002b1, 0x00000000);
nv_icmd(priv, 0x0002b2, 0x00000000);
nv_icmd(priv, 0x0002b3, 0x00000000);
nv_icmd(priv, 0x0002b4, 0x00000000);
nv_icmd(priv, 0x0002b5, 0x00000000);
nv_icmd(priv, 0x0002b6, 0x00000000);
nv_icmd(priv, 0x0002b7, 0x00000000);
nv_icmd(priv, 0x0002b8, 0x00000000);
nv_icmd(priv, 0x0002b9, 0x00000000);
nv_icmd(priv, 0x0002ba, 0x00000000);
nv_icmd(priv, 0x0002bb, 0x00000000);
nv_icmd(priv, 0x0002bc, 0x00000000);
nv_icmd(priv, 0x0002bd, 0x00000000);
nv_icmd(priv, 0x0002be, 0x00000000);
nv_icmd(priv, 0x0002bf, 0x00000000);
nv_icmd(priv, 0x000430, 0x00000000);
nv_icmd(priv, 0x000431, 0x00000000);
nv_icmd(priv, 0x000432, 0x00000000);
nv_icmd(priv, 0x000433, 0x00000000);
nv_icmd(priv, 0x000434, 0x00000000);
nv_icmd(priv, 0x000435, 0x00000000);
nv_icmd(priv, 0x000436, 0x00000000);
nv_icmd(priv, 0x000437, 0x00000000);
nv_icmd(priv, 0x000438, 0x00000000);
nv_icmd(priv, 0x000439, 0x00000000);
nv_icmd(priv, 0x00043a, 0x00000000);
nv_icmd(priv, 0x00043b, 0x00000000);
nv_icmd(priv, 0x00043c, 0x00000000);
nv_icmd(priv, 0x00043d, 0x00000000);
nv_icmd(priv, 0x00043e, 0x00000000);
nv_icmd(priv, 0x00043f, 0x00000000);
nv_icmd(priv, 0x0002c0, 0x00000000);
nv_icmd(priv, 0x0002c1, 0x00000000);
nv_icmd(priv, 0x0002c2, 0x00000000);
nv_icmd(priv, 0x0002c3, 0x00000000);
nv_icmd(priv, 0x0002c4, 0x00000000);
nv_icmd(priv, 0x0002c5, 0x00000000);
nv_icmd(priv, 0x0002c6, 0x00000000);
nv_icmd(priv, 0x0002c7, 0x00000000);
nv_icmd(priv, 0x0002c8, 0x00000000);
nv_icmd(priv, 0x0002c9, 0x00000000);
nv_icmd(priv, 0x0002ca, 0x00000000);
nv_icmd(priv, 0x0002cb, 0x00000000);
nv_icmd(priv, 0x0002cc, 0x00000000);
nv_icmd(priv, 0x0002cd, 0x00000000);
nv_icmd(priv, 0x0002ce, 0x00000000);
nv_icmd(priv, 0x0002cf, 0x00000000);
nv_icmd(priv, 0x0004d0, 0x00000000);
nv_icmd(priv, 0x0004d1, 0x00000000);
nv_icmd(priv, 0x0004d2, 0x00000000);
nv_icmd(priv, 0x0004d3, 0x00000000);
nv_icmd(priv, 0x0004d4, 0x00000000);
nv_icmd(priv, 0x0004d5, 0x00000000);
nv_icmd(priv, 0x0004d6, 0x00000000);
nv_icmd(priv, 0x0004d7, 0x00000000);
nv_icmd(priv, 0x0004d8, 0x00000000);
nv_icmd(priv, 0x0004d9, 0x00000000);
nv_icmd(priv, 0x0004da, 0x00000000);
nv_icmd(priv, 0x0004db, 0x00000000);
nv_icmd(priv, 0x0004dc, 0x00000000);
nv_icmd(priv, 0x0004dd, 0x00000000);
nv_icmd(priv, 0x0004de, 0x00000000);
nv_icmd(priv, 0x0004df, 0x00000000);
nv_icmd(priv, 0x000720, 0x00000000);
nv_icmd(priv, 0x000721, 0x00000000);
nv_icmd(priv, 0x000722, 0x00000000);
nv_icmd(priv, 0x000723, 0x00000000);
nv_icmd(priv, 0x000724, 0x00000000);
nv_icmd(priv, 0x000725, 0x00000000);
nv_icmd(priv, 0x000726, 0x00000000);
nv_icmd(priv, 0x000727, 0x00000000);
nv_icmd(priv, 0x000728, 0x00000000);
nv_icmd(priv, 0x000729, 0x00000000);
nv_icmd(priv, 0x00072a, 0x00000000);
nv_icmd(priv, 0x00072b, 0x00000000);
nv_icmd(priv, 0x00072c, 0x00000000);
nv_icmd(priv, 0x00072d, 0x00000000);
nv_icmd(priv, 0x00072e, 0x00000000);
nv_icmd(priv, 0x00072f, 0x00000000);
nv_icmd(priv, 0x0008c0, 0x00000000);
nv_icmd(priv, 0x0008c1, 0x00000000);
nv_icmd(priv, 0x0008c2, 0x00000000);
nv_icmd(priv, 0x0008c3, 0x00000000);
nv_icmd(priv, 0x0008c4, 0x00000000);
nv_icmd(priv, 0x0008c5, 0x00000000);
nv_icmd(priv, 0x0008c6, 0x00000000);
nv_icmd(priv, 0x0008c7, 0x00000000);
nv_icmd(priv, 0x0008c8, 0x00000000);
nv_icmd(priv, 0x0008c9, 0x00000000);
nv_icmd(priv, 0x0008ca, 0x00000000);
nv_icmd(priv, 0x0008cb, 0x00000000);
nv_icmd(priv, 0x0008cc, 0x00000000);
nv_icmd(priv, 0x0008cd, 0x00000000);
nv_icmd(priv, 0x0008ce, 0x00000000);
nv_icmd(priv, 0x0008cf, 0x00000000);
nv_icmd(priv, 0x000890, 0x00000000);
nv_icmd(priv, 0x000891, 0x00000000);
nv_icmd(priv, 0x000892, 0x00000000);
nv_icmd(priv, 0x000893, 0x00000000);
nv_icmd(priv, 0x000894, 0x00000000);
nv_icmd(priv, 0x000895, 0x00000000);
nv_icmd(priv, 0x000896, 0x00000000);
nv_icmd(priv, 0x000897, 0x00000000);
nv_icmd(priv, 0x000898, 0x00000000);
nv_icmd(priv, 0x000899, 0x00000000);
nv_icmd(priv, 0x00089a, 0x00000000);
nv_icmd(priv, 0x00089b, 0x00000000);
nv_icmd(priv, 0x00089c, 0x00000000);
nv_icmd(priv, 0x00089d, 0x00000000);
nv_icmd(priv, 0x00089e, 0x00000000);
nv_icmd(priv, 0x00089f, 0x00000000);
nv_icmd(priv, 0x0008e0, 0x00000000);
nv_icmd(priv, 0x0008e1, 0x00000000);
nv_icmd(priv, 0x0008e2, 0x00000000);
nv_icmd(priv, 0x0008e3, 0x00000000);
nv_icmd(priv, 0x0008e4, 0x00000000);
nv_icmd(priv, 0x0008e5, 0x00000000);
nv_icmd(priv, 0x0008e6, 0x00000000);
nv_icmd(priv, 0x0008e7, 0x00000000);
nv_icmd(priv, 0x0008e8, 0x00000000);
nv_icmd(priv, 0x0008e9, 0x00000000);
nv_icmd(priv, 0x0008ea, 0x00000000);
nv_icmd(priv, 0x0008eb, 0x00000000);
nv_icmd(priv, 0x0008ec, 0x00000000);
nv_icmd(priv, 0x0008ed, 0x00000000);
nv_icmd(priv, 0x0008ee, 0x00000000);
nv_icmd(priv, 0x0008ef, 0x00000000);
nv_icmd(priv, 0x0008a0, 0x00000000);
nv_icmd(priv, 0x0008a1, 0x00000000);
nv_icmd(priv, 0x0008a2, 0x00000000);
nv_icmd(priv, 0x0008a3, 0x00000000);
nv_icmd(priv, 0x0008a4, 0x00000000);
nv_icmd(priv, 0x0008a5, 0x00000000);
nv_icmd(priv, 0x0008a6, 0x00000000);
nv_icmd(priv, 0x0008a7, 0x00000000);
nv_icmd(priv, 0x0008a8, 0x00000000);
nv_icmd(priv, 0x0008a9, 0x00000000);
nv_icmd(priv, 0x0008aa, 0x00000000);
nv_icmd(priv, 0x0008ab, 0x00000000);
nv_icmd(priv, 0x0008ac, 0x00000000);
nv_icmd(priv, 0x0008ad, 0x00000000);
nv_icmd(priv, 0x0008ae, 0x00000000);
nv_icmd(priv, 0x0008af, 0x00000000);
nv_icmd(priv, 0x0008f0, 0x00000000);
nv_icmd(priv, 0x0008f1, 0x00000000);
nv_icmd(priv, 0x0008f2, 0x00000000);
nv_icmd(priv, 0x0008f3, 0x00000000);
nv_icmd(priv, 0x0008f4, 0x00000000);
nv_icmd(priv, 0x0008f5, 0x00000000);
nv_icmd(priv, 0x0008f6, 0x00000000);
nv_icmd(priv, 0x0008f7, 0x00000000);
nv_icmd(priv, 0x0008f8, 0x00000000);
nv_icmd(priv, 0x0008f9, 0x00000000);
nv_icmd(priv, 0x0008fa, 0x00000000);
nv_icmd(priv, 0x0008fb, 0x00000000);
nv_icmd(priv, 0x0008fc, 0x00000000);
nv_icmd(priv, 0x0008fd, 0x00000000);
nv_icmd(priv, 0x0008fe, 0x00000000);
nv_icmd(priv, 0x0008ff, 0x00000000);
nv_icmd(priv, 0x00094c, 0x000000ff);
nv_icmd(priv, 0x00094d, 0xffffffff);
nv_icmd(priv, 0x00094e, 0x00000002);
nv_icmd(priv, 0x0002ec, 0x00000001);
nv_icmd(priv, 0x000303, 0x00000001);
nv_icmd(priv, 0x0002e6, 0x00000001);
nv_icmd(priv, 0x000466, 0x00000052);
nv_icmd(priv, 0x000301, 0x3f800000);
nv_icmd(priv, 0x000304, 0x30201000);
nv_icmd(priv, 0x000305, 0x70605040);
nv_icmd(priv, 0x000306, 0xb8a89888);
nv_icmd(priv, 0x000307, 0xf8e8d8c8);
nv_icmd(priv, 0x00030a, 0x00ffff00);
nv_icmd(priv, 0x00030b, 0x0000001a);
nv_icmd(priv, 0x00030c, 0x00000001);
nv_icmd(priv, 0x000318, 0x00000001);
nv_icmd(priv, 0x000340, 0x00000000);
nv_icmd(priv, 0x000375, 0x00000001);
nv_icmd(priv, 0x00037d, 0x00000006);
nv_icmd(priv, 0x0003a0, 0x00000002);
nv_icmd(priv, 0x0003aa, 0x00000001);
nv_icmd(priv, 0x0003a9, 0x00000001);
nv_icmd(priv, 0x000380, 0x00000001);
nv_icmd(priv, 0x000383, 0x00000011);
nv_icmd(priv, 0x000360, 0x00000040);
nv_icmd(priv, 0x000366, 0x00000000);
nv_icmd(priv, 0x000367, 0x00000000);
nv_icmd(priv, 0x000368, 0x00000fff);
nv_icmd(priv, 0x000370, 0x00000000);
nv_icmd(priv, 0x000371, 0x00000000);
nv_icmd(priv, 0x000372, 0x000fffff);
nv_icmd(priv, 0x00037a, 0x00000012);
nv_icmd(priv, 0x000619, 0x00000003);
nv_icmd(priv, 0x000811, 0x00000003);
nv_icmd(priv, 0x000812, 0x00000004);
nv_icmd(priv, 0x000813, 0x00000006);
nv_icmd(priv, 0x000814, 0x00000008);
nv_icmd(priv, 0x000815, 0x0000000b);
nv_icmd(priv, 0x000800, 0x00000001);
nv_icmd(priv, 0x000801, 0x00000001);
nv_icmd(priv, 0x000802, 0x00000001);
nv_icmd(priv, 0x000803, 0x00000001);
nv_icmd(priv, 0x000804, 0x00000001);
nv_icmd(priv, 0x000805, 0x00000001);
nv_icmd(priv, 0x000632, 0x00000001);
nv_icmd(priv, 0x000633, 0x00000002);
nv_icmd(priv, 0x000634, 0x00000003);
nv_icmd(priv, 0x000635, 0x00000004);
nv_icmd(priv, 0x000654, 0x3f800000);
nv_icmd(priv, 0x000657, 0x3f800000);
nv_icmd(priv, 0x000655, 0x3f800000);
nv_icmd(priv, 0x000656, 0x3f800000);
nv_icmd(priv, 0x0006cd, 0x3f800000);
nv_icmd(priv, 0x0007f5, 0x3f800000);
nv_icmd(priv, 0x0007dc, 0x39291909);
nv_icmd(priv, 0x0007dd, 0x79695949);
nv_icmd(priv, 0x0007de, 0xb9a99989);
nv_icmd(priv, 0x0007df, 0xf9e9d9c9);
nv_icmd(priv, 0x0007e8, 0x00003210);
nv_icmd(priv, 0x0007e9, 0x00007654);
nv_icmd(priv, 0x0007ea, 0x00000098);
nv_icmd(priv, 0x0007ec, 0x39291909);
nv_icmd(priv, 0x0007ed, 0x79695949);
nv_icmd(priv, 0x0007ee, 0xb9a99989);
nv_icmd(priv, 0x0007ef, 0xf9e9d9c9);
nv_icmd(priv, 0x0007f0, 0x00003210);
nv_icmd(priv, 0x0007f1, 0x00007654);
nv_icmd(priv, 0x0007f2, 0x00000098);
nv_icmd(priv, 0x0005a5, 0x00000001);
nv_icmd(priv, 0x000980, 0x00000000);
nv_icmd(priv, 0x000981, 0x00000000);
nv_icmd(priv, 0x000982, 0x00000000);
nv_icmd(priv, 0x000983, 0x00000000);
nv_icmd(priv, 0x000984, 0x00000000);
nv_icmd(priv, 0x000985, 0x00000000);
nv_icmd(priv, 0x000986, 0x00000000);
nv_icmd(priv, 0x000987, 0x00000000);
nv_icmd(priv, 0x000988, 0x00000000);
nv_icmd(priv, 0x000989, 0x00000000);
nv_icmd(priv, 0x00098a, 0x00000000);
nv_icmd(priv, 0x00098b, 0x00000000);
nv_icmd(priv, 0x00098c, 0x00000000);
nv_icmd(priv, 0x00098d, 0x00000000);
nv_icmd(priv, 0x00098e, 0x00000000);
nv_icmd(priv, 0x00098f, 0x00000000);
nv_icmd(priv, 0x000990, 0x00000000);
nv_icmd(priv, 0x000991, 0x00000000);
nv_icmd(priv, 0x000992, 0x00000000);
nv_icmd(priv, 0x000993, 0x00000000);
nv_icmd(priv, 0x000994, 0x00000000);
nv_icmd(priv, 0x000995, 0x00000000);
nv_icmd(priv, 0x000996, 0x00000000);
nv_icmd(priv, 0x000997, 0x00000000);
nv_icmd(priv, 0x000998, 0x00000000);
nv_icmd(priv, 0x000999, 0x00000000);
nv_icmd(priv, 0x00099a, 0x00000000);
nv_icmd(priv, 0x00099b, 0x00000000);
nv_icmd(priv, 0x00099c, 0x00000000);
nv_icmd(priv, 0x00099d, 0x00000000);
nv_icmd(priv, 0x00099e, 0x00000000);
nv_icmd(priv, 0x00099f, 0x00000000);
nv_icmd(priv, 0x0009a0, 0x00000000);
nv_icmd(priv, 0x0009a1, 0x00000000);
nv_icmd(priv, 0x0009a2, 0x00000000);
nv_icmd(priv, 0x0009a3, 0x00000000);
nv_icmd(priv, 0x0009a4, 0x00000000);
nv_icmd(priv, 0x0009a5, 0x00000000);
nv_icmd(priv, 0x0009a6, 0x00000000);
nv_icmd(priv, 0x0009a7, 0x00000000);
nv_icmd(priv, 0x0009a8, 0x00000000);
nv_icmd(priv, 0x0009a9, 0x00000000);
nv_icmd(priv, 0x0009aa, 0x00000000);
nv_icmd(priv, 0x0009ab, 0x00000000);
nv_icmd(priv, 0x0009ac, 0x00000000);
nv_icmd(priv, 0x0009ad, 0x00000000);
nv_icmd(priv, 0x0009ae, 0x00000000);
nv_icmd(priv, 0x0009af, 0x00000000);
nv_icmd(priv, 0x0009b0, 0x00000000);
nv_icmd(priv, 0x0009b1, 0x00000000);
nv_icmd(priv, 0x0009b2, 0x00000000);
nv_icmd(priv, 0x0009b3, 0x00000000);
nv_icmd(priv, 0x0009b4, 0x00000000);
nv_icmd(priv, 0x0009b5, 0x00000000);
nv_icmd(priv, 0x0009b6, 0x00000000);
nv_icmd(priv, 0x0009b7, 0x00000000);
nv_icmd(priv, 0x0009b8, 0x00000000);
nv_icmd(priv, 0x0009b9, 0x00000000);
nv_icmd(priv, 0x0009ba, 0x00000000);
nv_icmd(priv, 0x0009bb, 0x00000000);
nv_icmd(priv, 0x0009bc, 0x00000000);
nv_icmd(priv, 0x0009bd, 0x00000000);
nv_icmd(priv, 0x0009be, 0x00000000);
nv_icmd(priv, 0x0009bf, 0x00000000);
nv_icmd(priv, 0x0009c0, 0x00000000);
nv_icmd(priv, 0x0009c1, 0x00000000);
nv_icmd(priv, 0x0009c2, 0x00000000);
nv_icmd(priv, 0x0009c3, 0x00000000);
nv_icmd(priv, 0x0009c4, 0x00000000);
nv_icmd(priv, 0x0009c5, 0x00000000);
nv_icmd(priv, 0x0009c6, 0x00000000);
nv_icmd(priv, 0x0009c7, 0x00000000);
nv_icmd(priv, 0x0009c8, 0x00000000);
nv_icmd(priv, 0x0009c9, 0x00000000);
nv_icmd(priv, 0x0009ca, 0x00000000);
nv_icmd(priv, 0x0009cb, 0x00000000);
nv_icmd(priv, 0x0009cc, 0x00000000);
nv_icmd(priv, 0x0009cd, 0x00000000);
nv_icmd(priv, 0x0009ce, 0x00000000);
nv_icmd(priv, 0x0009cf, 0x00000000);
nv_icmd(priv, 0x0009d0, 0x00000000);
nv_icmd(priv, 0x0009d1, 0x00000000);
nv_icmd(priv, 0x0009d2, 0x00000000);
nv_icmd(priv, 0x0009d3, 0x00000000);
nv_icmd(priv, 0x0009d4, 0x00000000);
nv_icmd(priv, 0x0009d5, 0x00000000);
nv_icmd(priv, 0x0009d6, 0x00000000);
nv_icmd(priv, 0x0009d7, 0x00000000);
nv_icmd(priv, 0x0009d8, 0x00000000);
nv_icmd(priv, 0x0009d9, 0x00000000);
nv_icmd(priv, 0x0009da, 0x00000000);
nv_icmd(priv, 0x0009db, 0x00000000);
nv_icmd(priv, 0x0009dc, 0x00000000);
nv_icmd(priv, 0x0009dd, 0x00000000);
nv_icmd(priv, 0x0009de, 0x00000000);
nv_icmd(priv, 0x0009df, 0x00000000);
nv_icmd(priv, 0x0009e0, 0x00000000);
nv_icmd(priv, 0x0009e1, 0x00000000);
nv_icmd(priv, 0x0009e2, 0x00000000);
nv_icmd(priv, 0x0009e3, 0x00000000);
nv_icmd(priv, 0x0009e4, 0x00000000);
nv_icmd(priv, 0x0009e5, 0x00000000);
nv_icmd(priv, 0x0009e6, 0x00000000);
nv_icmd(priv, 0x0009e7, 0x00000000);
nv_icmd(priv, 0x0009e8, 0x00000000);
nv_icmd(priv, 0x0009e9, 0x00000000);
nv_icmd(priv, 0x0009ea, 0x00000000);
nv_icmd(priv, 0x0009eb, 0x00000000);
nv_icmd(priv, 0x0009ec, 0x00000000);
nv_icmd(priv, 0x0009ed, 0x00000000);
nv_icmd(priv, 0x0009ee, 0x00000000);
nv_icmd(priv, 0x0009ef, 0x00000000);
nv_icmd(priv, 0x0009f0, 0x00000000);
nv_icmd(priv, 0x0009f1, 0x00000000);
nv_icmd(priv, 0x0009f2, 0x00000000);
nv_icmd(priv, 0x0009f3, 0x00000000);
nv_icmd(priv, 0x0009f4, 0x00000000);
nv_icmd(priv, 0x0009f5, 0x00000000);
nv_icmd(priv, 0x0009f6, 0x00000000);
nv_icmd(priv, 0x0009f7, 0x00000000);
nv_icmd(priv, 0x0009f8, 0x00000000);
nv_icmd(priv, 0x0009f9, 0x00000000);
nv_icmd(priv, 0x0009fa, 0x00000000);
nv_icmd(priv, 0x0009fb, 0x00000000);
nv_icmd(priv, 0x0009fc, 0x00000000);
nv_icmd(priv, 0x0009fd, 0x00000000);
nv_icmd(priv, 0x0009fe, 0x00000000);
nv_icmd(priv, 0x0009ff, 0x00000000);
nv_icmd(priv, 0x000468, 0x00000004);
nv_icmd(priv, 0x00046c, 0x00000001);
nv_icmd(priv, 0x000470, 0x00000000);
nv_icmd(priv, 0x000471, 0x00000000);
nv_icmd(priv, 0x000472, 0x00000000);
nv_icmd(priv, 0x000473, 0x00000000);
nv_icmd(priv, 0x000474, 0x00000000);
nv_icmd(priv, 0x000475, 0x00000000);
nv_icmd(priv, 0x000476, 0x00000000);
nv_icmd(priv, 0x000477, 0x00000000);
nv_icmd(priv, 0x000478, 0x00000000);
nv_icmd(priv, 0x000479, 0x00000000);
nv_icmd(priv, 0x00047a, 0x00000000);
nv_icmd(priv, 0x00047b, 0x00000000);
nv_icmd(priv, 0x00047c, 0x00000000);
nv_icmd(priv, 0x00047d, 0x00000000);
nv_icmd(priv, 0x00047e, 0x00000000);
nv_icmd(priv, 0x00047f, 0x00000000);
nv_icmd(priv, 0x000480, 0x00000000);
nv_icmd(priv, 0x000481, 0x00000000);
nv_icmd(priv, 0x000482, 0x00000000);
nv_icmd(priv, 0x000483, 0x00000000);
nv_icmd(priv, 0x000484, 0x00000000);
nv_icmd(priv, 0x000485, 0x00000000);
nv_icmd(priv, 0x000486, 0x00000000);
nv_icmd(priv, 0x000487, 0x00000000);
nv_icmd(priv, 0x000488, 0x00000000);
nv_icmd(priv, 0x000489, 0x00000000);
nv_icmd(priv, 0x00048a, 0x00000000);
nv_icmd(priv, 0x00048b, 0x00000000);
nv_icmd(priv, 0x00048c, 0x00000000);
nv_icmd(priv, 0x00048d, 0x00000000);
nv_icmd(priv, 0x00048e, 0x00000000);
nv_icmd(priv, 0x00048f, 0x00000000);
nv_icmd(priv, 0x000490, 0x00000000);
nv_icmd(priv, 0x000491, 0x00000000);
nv_icmd(priv, 0x000492, 0x00000000);
nv_icmd(priv, 0x000493, 0x00000000);
nv_icmd(priv, 0x000494, 0x00000000);
nv_icmd(priv, 0x000495, 0x00000000);
nv_icmd(priv, 0x000496, 0x00000000);
nv_icmd(priv, 0x000497, 0x00000000);
nv_icmd(priv, 0x000498, 0x00000000);
nv_icmd(priv, 0x000499, 0x00000000);
nv_icmd(priv, 0x00049a, 0x00000000);
nv_icmd(priv, 0x00049b, 0x00000000);
nv_icmd(priv, 0x00049c, 0x00000000);
nv_icmd(priv, 0x00049d, 0x00000000);
nv_icmd(priv, 0x00049e, 0x00000000);
nv_icmd(priv, 0x00049f, 0x00000000);
nv_icmd(priv, 0x0004a0, 0x00000000);
nv_icmd(priv, 0x0004a1, 0x00000000);
nv_icmd(priv, 0x0004a2, 0x00000000);
nv_icmd(priv, 0x0004a3, 0x00000000);
nv_icmd(priv, 0x0004a4, 0x00000000);
nv_icmd(priv, 0x0004a5, 0x00000000);
nv_icmd(priv, 0x0004a6, 0x00000000);
nv_icmd(priv, 0x0004a7, 0x00000000);
nv_icmd(priv, 0x0004a8, 0x00000000);
nv_icmd(priv, 0x0004a9, 0x00000000);
nv_icmd(priv, 0x0004aa, 0x00000000);
nv_icmd(priv, 0x0004ab, 0x00000000);
nv_icmd(priv, 0x0004ac, 0x00000000);
nv_icmd(priv, 0x0004ad, 0x00000000);
nv_icmd(priv, 0x0004ae, 0x00000000);
nv_icmd(priv, 0x0004af, 0x00000000);
nv_icmd(priv, 0x0004b0, 0x00000000);
nv_icmd(priv, 0x0004b1, 0x00000000);
nv_icmd(priv, 0x0004b2, 0x00000000);
nv_icmd(priv, 0x0004b3, 0x00000000);
nv_icmd(priv, 0x0004b4, 0x00000000);
nv_icmd(priv, 0x0004b5, 0x00000000);
nv_icmd(priv, 0x0004b6, 0x00000000);
nv_icmd(priv, 0x0004b7, 0x00000000);
nv_icmd(priv, 0x0004b8, 0x00000000);
nv_icmd(priv, 0x0004b9, 0x00000000);
nv_icmd(priv, 0x0004ba, 0x00000000);
nv_icmd(priv, 0x0004bb, 0x00000000);
nv_icmd(priv, 0x0004bc, 0x00000000);
nv_icmd(priv, 0x0004bd, 0x00000000);
nv_icmd(priv, 0x0004be, 0x00000000);
nv_icmd(priv, 0x0004bf, 0x00000000);
nv_icmd(priv, 0x0004c0, 0x00000000);
nv_icmd(priv, 0x0004c1, 0x00000000);
nv_icmd(priv, 0x0004c2, 0x00000000);
nv_icmd(priv, 0x0004c3, 0x00000000);
nv_icmd(priv, 0x0004c4, 0x00000000);
nv_icmd(priv, 0x0004c5, 0x00000000);
nv_icmd(priv, 0x0004c6, 0x00000000);
nv_icmd(priv, 0x0004c7, 0x00000000);
nv_icmd(priv, 0x0004c8, 0x00000000);
nv_icmd(priv, 0x0004c9, 0x00000000);
nv_icmd(priv, 0x0004ca, 0x00000000);
nv_icmd(priv, 0x0004cb, 0x00000000);
nv_icmd(priv, 0x0004cc, 0x00000000);
nv_icmd(priv, 0x0004cd, 0x00000000);
nv_icmd(priv, 0x0004ce, 0x00000000);
nv_icmd(priv, 0x0004cf, 0x00000000);
nv_icmd(priv, 0x000510, 0x3f800000);
nv_icmd(priv, 0x000511, 0x3f800000);
nv_icmd(priv, 0x000512, 0x3f800000);
nv_icmd(priv, 0x000513, 0x3f800000);
nv_icmd(priv, 0x000514, 0x3f800000);
nv_icmd(priv, 0x000515, 0x3f800000);
nv_icmd(priv, 0x000516, 0x3f800000);
nv_icmd(priv, 0x000517, 0x3f800000);
nv_icmd(priv, 0x000518, 0x3f800000);
nv_icmd(priv, 0x000519, 0x3f800000);
nv_icmd(priv, 0x00051a, 0x3f800000);
nv_icmd(priv, 0x00051b, 0x3f800000);
nv_icmd(priv, 0x00051c, 0x3f800000);
nv_icmd(priv, 0x00051d, 0x3f800000);
nv_icmd(priv, 0x00051e, 0x3f800000);
nv_icmd(priv, 0x00051f, 0x3f800000);
nv_icmd(priv, 0x000520, 0x000002b6);
nv_icmd(priv, 0x000529, 0x00000001);
nv_icmd(priv, 0x000530, 0xffff0000);
nv_icmd(priv, 0x000531, 0xffff0000);
nv_icmd(priv, 0x000532, 0xffff0000);
nv_icmd(priv, 0x000533, 0xffff0000);
nv_icmd(priv, 0x000534, 0xffff0000);
nv_icmd(priv, 0x000535, 0xffff0000);
nv_icmd(priv, 0x000536, 0xffff0000);
nv_icmd(priv, 0x000537, 0xffff0000);
nv_icmd(priv, 0x000538, 0xffff0000);
nv_icmd(priv, 0x000539, 0xffff0000);
nv_icmd(priv, 0x00053a, 0xffff0000);
nv_icmd(priv, 0x00053b, 0xffff0000);
nv_icmd(priv, 0x00053c, 0xffff0000);
nv_icmd(priv, 0x00053d, 0xffff0000);
nv_icmd(priv, 0x00053e, 0xffff0000);
nv_icmd(priv, 0x00053f, 0xffff0000);
nv_icmd(priv, 0x000585, 0x0000003f);
nv_icmd(priv, 0x000576, 0x00000003);
nv_icmd(priv, 0x00057b, 0x00000059);
nv_icmd(priv, 0x000586, 0x00000040);
nv_icmd(priv, 0x000582, 0x00000080);
nv_icmd(priv, 0x000583, 0x00000080);
nv_icmd(priv, 0x0005c2, 0x00000001);
nv_icmd(priv, 0x000638, 0x00000001);
nv_icmd(priv, 0x000639, 0x00000001);
nv_icmd(priv, 0x00063a, 0x00000002);
nv_icmd(priv, 0x00063b, 0x00000001);
nv_icmd(priv, 0x00063c, 0x00000001);
nv_icmd(priv, 0x00063d, 0x00000002);
nv_icmd(priv, 0x00063e, 0x00000001);
nv_icmd(priv, 0x0008b8, 0x00000001);
nv_icmd(priv, 0x0008b9, 0x00000001);
nv_icmd(priv, 0x0008ba, 0x00000001);
nv_icmd(priv, 0x0008bb, 0x00000001);
nv_icmd(priv, 0x0008bc, 0x00000001);
nv_icmd(priv, 0x0008bd, 0x00000001);
nv_icmd(priv, 0x0008be, 0x00000001);
nv_icmd(priv, 0x0008bf, 0x00000001);
nv_icmd(priv, 0x000900, 0x00000001);
nv_icmd(priv, 0x000901, 0x00000001);
nv_icmd(priv, 0x000902, 0x00000001);
nv_icmd(priv, 0x000903, 0x00000001);
nv_icmd(priv, 0x000904, 0x00000001);
nv_icmd(priv, 0x000905, 0x00000001);
nv_icmd(priv, 0x000906, 0x00000001);
nv_icmd(priv, 0x000907, 0x00000001);
nv_icmd(priv, 0x000908, 0x00000002);
nv_icmd(priv, 0x000909, 0x00000002);
nv_icmd(priv, 0x00090a, 0x00000002);
nv_icmd(priv, 0x00090b, 0x00000002);
nv_icmd(priv, 0x00090c, 0x00000002);
nv_icmd(priv, 0x00090d, 0x00000002);
nv_icmd(priv, 0x00090e, 0x00000002);
nv_icmd(priv, 0x00090f, 0x00000002);
nv_icmd(priv, 0x000910, 0x00000001);
nv_icmd(priv, 0x000911, 0x00000001);
nv_icmd(priv, 0x000912, 0x00000001);
nv_icmd(priv, 0x000913, 0x00000001);
nv_icmd(priv, 0x000914, 0x00000001);
nv_icmd(priv, 0x000915, 0x00000001);
nv_icmd(priv, 0x000916, 0x00000001);
nv_icmd(priv, 0x000917, 0x00000001);
nv_icmd(priv, 0x000918, 0x00000001);
nv_icmd(priv, 0x000919, 0x00000001);
nv_icmd(priv, 0x00091a, 0x00000001);
nv_icmd(priv, 0x00091b, 0x00000001);
nv_icmd(priv, 0x00091c, 0x00000001);
nv_icmd(priv, 0x00091d, 0x00000001);
nv_icmd(priv, 0x00091e, 0x00000001);
nv_icmd(priv, 0x00091f, 0x00000001);
nv_icmd(priv, 0x000920, 0x00000002);
nv_icmd(priv, 0x000921, 0x00000002);
nv_icmd(priv, 0x000922, 0x00000002);
nv_icmd(priv, 0x000923, 0x00000002);
nv_icmd(priv, 0x000924, 0x00000002);
nv_icmd(priv, 0x000925, 0x00000002);
nv_icmd(priv, 0x000926, 0x00000002);
nv_icmd(priv, 0x000927, 0x00000002);
nv_icmd(priv, 0x000928, 0x00000001);
nv_icmd(priv, 0x000929, 0x00000001);
nv_icmd(priv, 0x00092a, 0x00000001);
nv_icmd(priv, 0x00092b, 0x00000001);
nv_icmd(priv, 0x00092c, 0x00000001);
nv_icmd(priv, 0x00092d, 0x00000001);
nv_icmd(priv, 0x00092e, 0x00000001);
nv_icmd(priv, 0x00092f, 0x00000001);
nv_icmd(priv, 0x000648, 0x00000001);
nv_icmd(priv, 0x000649, 0x00000001);
nv_icmd(priv, 0x00064a, 0x00000001);
nv_icmd(priv, 0x00064b, 0x00000001);
nv_icmd(priv, 0x00064c, 0x00000001);
nv_icmd(priv, 0x00064d, 0x00000001);
nv_icmd(priv, 0x00064e, 0x00000001);
nv_icmd(priv, 0x00064f, 0x00000001);
nv_icmd(priv, 0x000650, 0x00000001);
nv_icmd(priv, 0x000658, 0x0000000f);
nv_icmd(priv, 0x0007ff, 0x0000000a);
nv_icmd(priv, 0x00066a, 0x40000000);
nv_icmd(priv, 0x00066b, 0x10000000);
nv_icmd(priv, 0x00066c, 0xffff0000);
nv_icmd(priv, 0x00066d, 0xffff0000);
nv_icmd(priv, 0x0007af, 0x00000008);
nv_icmd(priv, 0x0007b0, 0x00000008);
nv_icmd(priv, 0x0007f6, 0x00000001);
nv_icmd(priv, 0x0006b2, 0x00000055);
nv_icmd(priv, 0x0007ad, 0x00000003);
nv_icmd(priv, 0x000937, 0x00000001);
nv_icmd(priv, 0x000971, 0x00000008);
nv_icmd(priv, 0x000972, 0x00000040);
nv_icmd(priv, 0x000973, 0x0000012c);
nv_icmd(priv, 0x00097c, 0x00000040);
nv_icmd(priv, 0x000979, 0x00000003);
nv_icmd(priv, 0x000975, 0x00000020);
nv_icmd(priv, 0x000976, 0x00000001);
nv_icmd(priv, 0x000977, 0x00000020);
nv_icmd(priv, 0x000978, 0x00000001);
nv_icmd(priv, 0x000957, 0x00000003);
nv_icmd(priv, 0x00095e, 0x20164010);
nv_icmd(priv, 0x00095f, 0x00000020);
nv_icmd(priv, 0x00097d, 0x00000020);
nv_icmd(priv, 0x000683, 0x00000006);
nv_icmd(priv, 0x000685, 0x003fffff);
nv_icmd(priv, 0x000687, 0x003fffff);
nv_icmd(priv, 0x0006a0, 0x00000005);
nv_icmd(priv, 0x000840, 0x00400008);
nv_icmd(priv, 0x000841, 0x08000080);
nv_icmd(priv, 0x000842, 0x00400008);
nv_icmd(priv, 0x000843, 0x08000080);
nv_icmd(priv, 0x000818, 0x00000000);
nv_icmd(priv, 0x000819, 0x00000000);
nv_icmd(priv, 0x00081a, 0x00000000);
nv_icmd(priv, 0x00081b, 0x00000000);
nv_icmd(priv, 0x00081c, 0x00000000);
nv_icmd(priv, 0x00081d, 0x00000000);
nv_icmd(priv, 0x00081e, 0x00000000);
nv_icmd(priv, 0x00081f, 0x00000000);
nv_icmd(priv, 0x000848, 0x00000000);
nv_icmd(priv, 0x000849, 0x00000000);
nv_icmd(priv, 0x00084a, 0x00000000);
nv_icmd(priv, 0x00084b, 0x00000000);
nv_icmd(priv, 0x00084c, 0x00000000);
nv_icmd(priv, 0x00084d, 0x00000000);
nv_icmd(priv, 0x00084e, 0x00000000);
nv_icmd(priv, 0x00084f, 0x00000000);
nv_icmd(priv, 0x000850, 0x00000000);
nv_icmd(priv, 0x000851, 0x00000000);
nv_icmd(priv, 0x000852, 0x00000000);
nv_icmd(priv, 0x000853, 0x00000000);
nv_icmd(priv, 0x000854, 0x00000000);
nv_icmd(priv, 0x000855, 0x00000000);
nv_icmd(priv, 0x000856, 0x00000000);
nv_icmd(priv, 0x000857, 0x00000000);
nv_icmd(priv, 0x000738, 0x00000000);
nv_icmd(priv, 0x0006aa, 0x00000001);
nv_icmd(priv, 0x0006ab, 0x00000002);
nv_icmd(priv, 0x0006ac, 0x00000080);
nv_icmd(priv, 0x0006ad, 0x00000100);
nv_icmd(priv, 0x0006ae, 0x00000100);
nv_icmd(priv, 0x0006b1, 0x00000011);
nv_icmd(priv, 0x0006bb, 0x000000cf);
nv_icmd(priv, 0x0006ce, 0x2a712488);
nv_icmd(priv, 0x000739, 0x4085c000);
nv_icmd(priv, 0x00073a, 0x00000080);
nv_icmd(priv, 0x000786, 0x80000100);
nv_icmd(priv, 0x00073c, 0x00010100);
nv_icmd(priv, 0x00073d, 0x02800000);
nv_icmd(priv, 0x000787, 0x000000cf);
nv_icmd(priv, 0x00078c, 0x00000008);
nv_icmd(priv, 0x000792, 0x00000001);
nv_icmd(priv, 0x000794, 0x00000001);
nv_icmd(priv, 0x000795, 0x00000001);
nv_icmd(priv, 0x000796, 0x00000001);
nv_icmd(priv, 0x000797, 0x000000cf);
nv_icmd(priv, 0x000836, 0x00000001);
nv_icmd(priv, 0x00079a, 0x00000002);
nv_icmd(priv, 0x000833, 0x04444480);
nv_icmd(priv, 0x0007a1, 0x00000001);
nv_icmd(priv, 0x0007a3, 0x00000001);
nv_icmd(priv, 0x0007a4, 0x00000001);
nv_icmd(priv, 0x0007a5, 0x00000001);
nv_icmd(priv, 0x000831, 0x00000004);
nv_icmd(priv, 0x000b07, 0x00000002);
nv_icmd(priv, 0x000b08, 0x00000100);
nv_icmd(priv, 0x000b09, 0x00000100);
nv_icmd(priv, 0x000b0a, 0x00000001);
nv_icmd(priv, 0x000a04, 0x000000ff);
nv_icmd(priv, 0x000a0b, 0x00000040);
nv_icmd(priv, 0x00097f, 0x00000100);
nv_icmd(priv, 0x000a02, 0x00000001);
nv_icmd(priv, 0x000809, 0x00000007);
nv_icmd(priv, 0x00c221, 0x00000040);
nv_icmd(priv, 0x00c1b0, 0x0000000f);
nv_icmd(priv, 0x00c1b1, 0x0000000f);
nv_icmd(priv, 0x00c1b2, 0x0000000f);
nv_icmd(priv, 0x00c1b3, 0x0000000f);
nv_icmd(priv, 0x00c1b4, 0x0000000f);
nv_icmd(priv, 0x00c1b5, 0x0000000f);
nv_icmd(priv, 0x00c1b6, 0x0000000f);
nv_icmd(priv, 0x00c1b7, 0x0000000f);
nv_icmd(priv, 0x00c1b8, 0x0fac6881);
nv_icmd(priv, 0x00c1b9, 0x00fac688);
nv_icmd(priv, 0x00c401, 0x00000001);
nv_icmd(priv, 0x00c402, 0x00010001);
nv_icmd(priv, 0x00c403, 0x00000001);
nv_icmd(priv, 0x00c404, 0x00000001);
nv_icmd(priv, 0x00c40e, 0x00000020);
nv_icmd(priv, 0x00c500, 0x00000003);
nv_icmd(priv, 0x01e100, 0x00000001);
nv_icmd(priv, 0x001000, 0x00000002);
nv_icmd(priv, 0x0006aa, 0x00000001);
nv_icmd(priv, 0x0006ad, 0x00000100);
nv_icmd(priv, 0x0006ae, 0x00000100);
nv_icmd(priv, 0x0006b1, 0x00000011);
nv_icmd(priv, 0x00078c, 0x00000008);
nv_icmd(priv, 0x000792, 0x00000001);
nv_icmd(priv, 0x000794, 0x00000001);
nv_icmd(priv, 0x000795, 0x00000001);
nv_icmd(priv, 0x000796, 0x00000001);
nv_icmd(priv, 0x000797, 0x000000cf);
nv_icmd(priv, 0x00079a, 0x00000002);
nv_icmd(priv, 0x000833, 0x04444480);
nv_icmd(priv, 0x0007a1, 0x00000001);
nv_icmd(priv, 0x0007a3, 0x00000001);
nv_icmd(priv, 0x0007a4, 0x00000001);
nv_icmd(priv, 0x0007a5, 0x00000001);
nv_icmd(priv, 0x000831, 0x00000004);
nv_icmd(priv, 0x01e100, 0x00000001);
nv_icmd(priv, 0x001000, 0x00000008);
nv_icmd(priv, 0x000039, 0x00000000);
nv_icmd(priv, 0x00003a, 0x00000000);
nv_icmd(priv, 0x00003b, 0x00000000);
nv_icmd(priv, 0x000380, 0x00000001);
nv_icmd(priv, 0x000366, 0x00000000);
nv_icmd(priv, 0x000367, 0x00000000);
nv_icmd(priv, 0x000368, 0x00000fff);
nv_icmd(priv, 0x000370, 0x00000000);
nv_icmd(priv, 0x000371, 0x00000000);
nv_icmd(priv, 0x000372, 0x000fffff);
nv_icmd(priv, 0x000813, 0x00000006);
nv_icmd(priv, 0x000814, 0x00000008);
nv_icmd(priv, 0x000957, 0x00000003);
nv_icmd(priv, 0x000818, 0x00000000);
nv_icmd(priv, 0x000819, 0x00000000);
nv_icmd(priv, 0x00081a, 0x00000000);
nv_icmd(priv, 0x00081b, 0x00000000);
nv_icmd(priv, 0x00081c, 0x00000000);
nv_icmd(priv, 0x00081d, 0x00000000);
nv_icmd(priv, 0x00081e, 0x00000000);
nv_icmd(priv, 0x00081f, 0x00000000);
nv_icmd(priv, 0x000848, 0x00000000);
nv_icmd(priv, 0x000849, 0x00000000);
nv_icmd(priv, 0x00084a, 0x00000000);
nv_icmd(priv, 0x00084b, 0x00000000);
nv_icmd(priv, 0x00084c, 0x00000000);
nv_icmd(priv, 0x00084d, 0x00000000);
nv_icmd(priv, 0x00084e, 0x00000000);
nv_icmd(priv, 0x00084f, 0x00000000);
nv_icmd(priv, 0x000850, 0x00000000);
nv_icmd(priv, 0x000851, 0x00000000);
nv_icmd(priv, 0x000852, 0x00000000);
nv_icmd(priv, 0x000853, 0x00000000);
nv_icmd(priv, 0x000854, 0x00000000);
nv_icmd(priv, 0x000855, 0x00000000);
nv_icmd(priv, 0x000856, 0x00000000);
nv_icmd(priv, 0x000857, 0x00000000);
nv_icmd(priv, 0x000738, 0x00000000);
nv_icmd(priv, 0x000b07, 0x00000002);
nv_icmd(priv, 0x000b08, 0x00000100);
nv_icmd(priv, 0x000b09, 0x00000100);
nv_icmd(priv, 0x000b0a, 0x00000001);
nv_icmd(priv, 0x000a04, 0x000000ff);
nv_icmd(priv, 0x00097f, 0x00000100);
nv_icmd(priv, 0x000a02, 0x00000001);
nv_icmd(priv, 0x000809, 0x00000007);
nv_icmd(priv, 0x00c221, 0x00000040);
nv_icmd(priv, 0x00c401, 0x00000001);
nv_icmd(priv, 0x00c402, 0x00010001);
nv_icmd(priv, 0x00c403, 0x00000001);
nv_icmd(priv, 0x00c404, 0x00000001);
nv_icmd(priv, 0x00c40e, 0x00000020);
nv_icmd(priv, 0x00c500, 0x00000003);
nv_icmd(priv, 0x01e100, 0x00000001);
nv_icmd(priv, 0x001000, 0x00000001);
nv_icmd(priv, 0x000b07, 0x00000002);
nv_icmd(priv, 0x000b08, 0x00000100);
nv_icmd(priv, 0x000b09, 0x00000100);
nv_icmd(priv, 0x000b0a, 0x00000001);
nv_icmd(priv, 0x01e100, 0x00000001);
nv_wr32(priv, 0x400208, 0x00000000);
}
static void
nve0_grctx_generate_a097(struct nvc0_graph_priv *priv)
{
nv_mthd(priv, 0xa097, 0x0800, 0x00000000);
nv_mthd(priv, 0xa097, 0x0840, 0x00000000);
nv_mthd(priv, 0xa097, 0x0880, 0x00000000);
nv_mthd(priv, 0xa097, 0x08c0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0900, 0x00000000);
nv_mthd(priv, 0xa097, 0x0940, 0x00000000);
nv_mthd(priv, 0xa097, 0x0980, 0x00000000);
nv_mthd(priv, 0xa097, 0x09c0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0804, 0x00000000);
nv_mthd(priv, 0xa097, 0x0844, 0x00000000);
nv_mthd(priv, 0xa097, 0x0884, 0x00000000);
nv_mthd(priv, 0xa097, 0x08c4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0904, 0x00000000);
nv_mthd(priv, 0xa097, 0x0944, 0x00000000);
nv_mthd(priv, 0xa097, 0x0984, 0x00000000);
nv_mthd(priv, 0xa097, 0x09c4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0808, 0x00000400);
nv_mthd(priv, 0xa097, 0x0848, 0x00000400);
nv_mthd(priv, 0xa097, 0x0888, 0x00000400);
nv_mthd(priv, 0xa097, 0x08c8, 0x00000400);
nv_mthd(priv, 0xa097, 0x0908, 0x00000400);
nv_mthd(priv, 0xa097, 0x0948, 0x00000400);
nv_mthd(priv, 0xa097, 0x0988, 0x00000400);
nv_mthd(priv, 0xa097, 0x09c8, 0x00000400);
nv_mthd(priv, 0xa097, 0x080c, 0x00000300);
nv_mthd(priv, 0xa097, 0x084c, 0x00000300);
nv_mthd(priv, 0xa097, 0x088c, 0x00000300);
nv_mthd(priv, 0xa097, 0x08cc, 0x00000300);
nv_mthd(priv, 0xa097, 0x090c, 0x00000300);
nv_mthd(priv, 0xa097, 0x094c, 0x00000300);
nv_mthd(priv, 0xa097, 0x098c, 0x00000300);
nv_mthd(priv, 0xa097, 0x09cc, 0x00000300);
nv_mthd(priv, 0xa097, 0x0810, 0x000000cf);
nv_mthd(priv, 0xa097, 0x0850, 0x00000000);
nv_mthd(priv, 0xa097, 0x0890, 0x00000000);
nv_mthd(priv, 0xa097, 0x08d0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0910, 0x00000000);
nv_mthd(priv, 0xa097, 0x0950, 0x00000000);
nv_mthd(priv, 0xa097, 0x0990, 0x00000000);
nv_mthd(priv, 0xa097, 0x09d0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0814, 0x00000040);
nv_mthd(priv, 0xa097, 0x0854, 0x00000040);
nv_mthd(priv, 0xa097, 0x0894, 0x00000040);
nv_mthd(priv, 0xa097, 0x08d4, 0x00000040);
nv_mthd(priv, 0xa097, 0x0914, 0x00000040);
nv_mthd(priv, 0xa097, 0x0954, 0x00000040);
nv_mthd(priv, 0xa097, 0x0994, 0x00000040);
nv_mthd(priv, 0xa097, 0x09d4, 0x00000040);
nv_mthd(priv, 0xa097, 0x0818, 0x00000001);
nv_mthd(priv, 0xa097, 0x0858, 0x00000001);
nv_mthd(priv, 0xa097, 0x0898, 0x00000001);
nv_mthd(priv, 0xa097, 0x08d8, 0x00000001);
nv_mthd(priv, 0xa097, 0x0918, 0x00000001);
nv_mthd(priv, 0xa097, 0x0958, 0x00000001);
nv_mthd(priv, 0xa097, 0x0998, 0x00000001);
nv_mthd(priv, 0xa097, 0x09d8, 0x00000001);
nv_mthd(priv, 0xa097, 0x081c, 0x00000000);
nv_mthd(priv, 0xa097, 0x085c, 0x00000000);
nv_mthd(priv, 0xa097, 0x089c, 0x00000000);
nv_mthd(priv, 0xa097, 0x08dc, 0x00000000);
nv_mthd(priv, 0xa097, 0x091c, 0x00000000);
nv_mthd(priv, 0xa097, 0x095c, 0x00000000);
nv_mthd(priv, 0xa097, 0x099c, 0x00000000);
nv_mthd(priv, 0xa097, 0x09dc, 0x00000000);
nv_mthd(priv, 0xa097, 0x0820, 0x00000000);
nv_mthd(priv, 0xa097, 0x0860, 0x00000000);
nv_mthd(priv, 0xa097, 0x08a0, 0x00000000);
nv_mthd(priv, 0xa097, 0x08e0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0920, 0x00000000);
nv_mthd(priv, 0xa097, 0x0960, 0x00000000);
nv_mthd(priv, 0xa097, 0x09a0, 0x00000000);
nv_mthd(priv, 0xa097, 0x09e0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c00, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c10, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c20, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c30, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c40, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c50, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c60, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c70, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c80, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c90, 0x00000000);
nv_mthd(priv, 0xa097, 0x1ca0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cb0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cc0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cd0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1ce0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cf0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c04, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c14, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c24, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c34, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c44, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c54, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c64, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c74, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c84, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c94, 0x00000000);
nv_mthd(priv, 0xa097, 0x1ca4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cb4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cc4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cd4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1ce4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cf4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c08, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c18, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c28, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c38, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c48, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c58, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c68, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c78, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c88, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c98, 0x00000000);
nv_mthd(priv, 0xa097, 0x1ca8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cb8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cc8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cd8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1ce8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cf8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c0c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c1c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c2c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c3c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c4c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c5c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c6c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c7c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c8c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1c9c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cac, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cbc, 0x00000000);
nv_mthd(priv, 0xa097, 0x1ccc, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cdc, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cec, 0x00000000);
nv_mthd(priv, 0xa097, 0x1cfc, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d00, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d10, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d20, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d30, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d40, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d50, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d60, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d70, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d80, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d90, 0x00000000);
nv_mthd(priv, 0xa097, 0x1da0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1db0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1dc0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1dd0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1de0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1df0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d04, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d14, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d24, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d34, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d44, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d54, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d64, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d74, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d84, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d94, 0x00000000);
nv_mthd(priv, 0xa097, 0x1da4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1db4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1dc4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1dd4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1de4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1df4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d08, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d18, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d28, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d38, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d48, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d58, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d68, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d78, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d88, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d98, 0x00000000);
nv_mthd(priv, 0xa097, 0x1da8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1db8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1dc8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1dd8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1de8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1df8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d0c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d1c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d2c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d3c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d4c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d5c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d6c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d7c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d8c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1d9c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1dac, 0x00000000);
nv_mthd(priv, 0xa097, 0x1dbc, 0x00000000);
nv_mthd(priv, 0xa097, 0x1dcc, 0x00000000);
nv_mthd(priv, 0xa097, 0x1ddc, 0x00000000);
nv_mthd(priv, 0xa097, 0x1dec, 0x00000000);
nv_mthd(priv, 0xa097, 0x1dfc, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f00, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f08, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f10, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f18, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f20, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f28, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f30, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f38, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f40, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f48, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f50, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f58, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f60, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f68, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f70, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f78, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f04, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f0c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f14, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f1c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f24, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f2c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f34, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f3c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f44, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f4c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f54, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f5c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f64, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f6c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f74, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f7c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f80, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f88, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f90, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f98, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fa0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fa8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fb0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fb8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fc0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fc8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fd0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fd8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fe0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fe8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1ff0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1ff8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f84, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f8c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f94, 0x00000000);
nv_mthd(priv, 0xa097, 0x1f9c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fa4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fac, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fb4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fbc, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fc4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fcc, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fd4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fdc, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fe4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1fec, 0x00000000);
nv_mthd(priv, 0xa097, 0x1ff4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1ffc, 0x00000000);
nv_mthd(priv, 0xa097, 0x2000, 0x00000000);
nv_mthd(priv, 0xa097, 0x2040, 0x00000011);
nv_mthd(priv, 0xa097, 0x2080, 0x00000020);
nv_mthd(priv, 0xa097, 0x20c0, 0x00000030);
nv_mthd(priv, 0xa097, 0x2100, 0x00000040);
nv_mthd(priv, 0xa097, 0x2140, 0x00000051);
nv_mthd(priv, 0xa097, 0x200c, 0x00000001);
nv_mthd(priv, 0xa097, 0x204c, 0x00000001);
nv_mthd(priv, 0xa097, 0x208c, 0x00000001);
nv_mthd(priv, 0xa097, 0x20cc, 0x00000001);
nv_mthd(priv, 0xa097, 0x210c, 0x00000001);
nv_mthd(priv, 0xa097, 0x214c, 0x00000001);
nv_mthd(priv, 0xa097, 0x2010, 0x00000000);
nv_mthd(priv, 0xa097, 0x2050, 0x00000000);
nv_mthd(priv, 0xa097, 0x2090, 0x00000001);
nv_mthd(priv, 0xa097, 0x20d0, 0x00000002);
nv_mthd(priv, 0xa097, 0x2110, 0x00000003);
nv_mthd(priv, 0xa097, 0x2150, 0x00000004);
nv_mthd(priv, 0xa097, 0x0380, 0x00000000);
nv_mthd(priv, 0xa097, 0x03a0, 0x00000000);
nv_mthd(priv, 0xa097, 0x03c0, 0x00000000);
nv_mthd(priv, 0xa097, 0x03e0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0384, 0x00000000);
nv_mthd(priv, 0xa097, 0x03a4, 0x00000000);
nv_mthd(priv, 0xa097, 0x03c4, 0x00000000);
nv_mthd(priv, 0xa097, 0x03e4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0388, 0x00000000);
nv_mthd(priv, 0xa097, 0x03a8, 0x00000000);
nv_mthd(priv, 0xa097, 0x03c8, 0x00000000);
nv_mthd(priv, 0xa097, 0x03e8, 0x00000000);
nv_mthd(priv, 0xa097, 0x038c, 0x00000000);
nv_mthd(priv, 0xa097, 0x03ac, 0x00000000);
nv_mthd(priv, 0xa097, 0x03cc, 0x00000000);
nv_mthd(priv, 0xa097, 0x03ec, 0x00000000);
nv_mthd(priv, 0xa097, 0x0700, 0x00000000);
nv_mthd(priv, 0xa097, 0x0710, 0x00000000);
nv_mthd(priv, 0xa097, 0x0720, 0x00000000);
nv_mthd(priv, 0xa097, 0x0730, 0x00000000);
nv_mthd(priv, 0xa097, 0x0704, 0x00000000);
nv_mthd(priv, 0xa097, 0x0714, 0x00000000);
nv_mthd(priv, 0xa097, 0x0724, 0x00000000);
nv_mthd(priv, 0xa097, 0x0734, 0x00000000);
nv_mthd(priv, 0xa097, 0x0708, 0x00000000);
nv_mthd(priv, 0xa097, 0x0718, 0x00000000);
nv_mthd(priv, 0xa097, 0x0728, 0x00000000);
nv_mthd(priv, 0xa097, 0x0738, 0x00000000);
nv_mthd(priv, 0xa097, 0x2800, 0x00000000);
nv_mthd(priv, 0xa097, 0x2804, 0x00000000);
nv_mthd(priv, 0xa097, 0x2808, 0x00000000);
nv_mthd(priv, 0xa097, 0x280c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2810, 0x00000000);
nv_mthd(priv, 0xa097, 0x2814, 0x00000000);
nv_mthd(priv, 0xa097, 0x2818, 0x00000000);
nv_mthd(priv, 0xa097, 0x281c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2820, 0x00000000);
nv_mthd(priv, 0xa097, 0x2824, 0x00000000);
nv_mthd(priv, 0xa097, 0x2828, 0x00000000);
nv_mthd(priv, 0xa097, 0x282c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2830, 0x00000000);
nv_mthd(priv, 0xa097, 0x2834, 0x00000000);
nv_mthd(priv, 0xa097, 0x2838, 0x00000000);
nv_mthd(priv, 0xa097, 0x283c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2840, 0x00000000);
nv_mthd(priv, 0xa097, 0x2844, 0x00000000);
nv_mthd(priv, 0xa097, 0x2848, 0x00000000);
nv_mthd(priv, 0xa097, 0x284c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2850, 0x00000000);
nv_mthd(priv, 0xa097, 0x2854, 0x00000000);
nv_mthd(priv, 0xa097, 0x2858, 0x00000000);
nv_mthd(priv, 0xa097, 0x285c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2860, 0x00000000);
nv_mthd(priv, 0xa097, 0x2864, 0x00000000);
nv_mthd(priv, 0xa097, 0x2868, 0x00000000);
nv_mthd(priv, 0xa097, 0x286c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2870, 0x00000000);
nv_mthd(priv, 0xa097, 0x2874, 0x00000000);
nv_mthd(priv, 0xa097, 0x2878, 0x00000000);
nv_mthd(priv, 0xa097, 0x287c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2880, 0x00000000);
nv_mthd(priv, 0xa097, 0x2884, 0x00000000);
nv_mthd(priv, 0xa097, 0x2888, 0x00000000);
nv_mthd(priv, 0xa097, 0x288c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2890, 0x00000000);
nv_mthd(priv, 0xa097, 0x2894, 0x00000000);
nv_mthd(priv, 0xa097, 0x2898, 0x00000000);
nv_mthd(priv, 0xa097, 0x289c, 0x00000000);
nv_mthd(priv, 0xa097, 0x28a0, 0x00000000);
nv_mthd(priv, 0xa097, 0x28a4, 0x00000000);
nv_mthd(priv, 0xa097, 0x28a8, 0x00000000);
nv_mthd(priv, 0xa097, 0x28ac, 0x00000000);
nv_mthd(priv, 0xa097, 0x28b0, 0x00000000);
nv_mthd(priv, 0xa097, 0x28b4, 0x00000000);
nv_mthd(priv, 0xa097, 0x28b8, 0x00000000);
nv_mthd(priv, 0xa097, 0x28bc, 0x00000000);
nv_mthd(priv, 0xa097, 0x28c0, 0x00000000);
nv_mthd(priv, 0xa097, 0x28c4, 0x00000000);
nv_mthd(priv, 0xa097, 0x28c8, 0x00000000);
nv_mthd(priv, 0xa097, 0x28cc, 0x00000000);
nv_mthd(priv, 0xa097, 0x28d0, 0x00000000);
nv_mthd(priv, 0xa097, 0x28d4, 0x00000000);
nv_mthd(priv, 0xa097, 0x28d8, 0x00000000);
nv_mthd(priv, 0xa097, 0x28dc, 0x00000000);
nv_mthd(priv, 0xa097, 0x28e0, 0x00000000);
nv_mthd(priv, 0xa097, 0x28e4, 0x00000000);
nv_mthd(priv, 0xa097, 0x28e8, 0x00000000);
nv_mthd(priv, 0xa097, 0x28ec, 0x00000000);
nv_mthd(priv, 0xa097, 0x28f0, 0x00000000);
nv_mthd(priv, 0xa097, 0x28f4, 0x00000000);
nv_mthd(priv, 0xa097, 0x28f8, 0x00000000);
nv_mthd(priv, 0xa097, 0x28fc, 0x00000000);
nv_mthd(priv, 0xa097, 0x2900, 0x00000000);
nv_mthd(priv, 0xa097, 0x2904, 0x00000000);
nv_mthd(priv, 0xa097, 0x2908, 0x00000000);
nv_mthd(priv, 0xa097, 0x290c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2910, 0x00000000);
nv_mthd(priv, 0xa097, 0x2914, 0x00000000);
nv_mthd(priv, 0xa097, 0x2918, 0x00000000);
nv_mthd(priv, 0xa097, 0x291c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2920, 0x00000000);
nv_mthd(priv, 0xa097, 0x2924, 0x00000000);
nv_mthd(priv, 0xa097, 0x2928, 0x00000000);
nv_mthd(priv, 0xa097, 0x292c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2930, 0x00000000);
nv_mthd(priv, 0xa097, 0x2934, 0x00000000);
nv_mthd(priv, 0xa097, 0x2938, 0x00000000);
nv_mthd(priv, 0xa097, 0x293c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2940, 0x00000000);
nv_mthd(priv, 0xa097, 0x2944, 0x00000000);
nv_mthd(priv, 0xa097, 0x2948, 0x00000000);
nv_mthd(priv, 0xa097, 0x294c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2950, 0x00000000);
nv_mthd(priv, 0xa097, 0x2954, 0x00000000);
nv_mthd(priv, 0xa097, 0x2958, 0x00000000);
nv_mthd(priv, 0xa097, 0x295c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2960, 0x00000000);
nv_mthd(priv, 0xa097, 0x2964, 0x00000000);
nv_mthd(priv, 0xa097, 0x2968, 0x00000000);
nv_mthd(priv, 0xa097, 0x296c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2970, 0x00000000);
nv_mthd(priv, 0xa097, 0x2974, 0x00000000);
nv_mthd(priv, 0xa097, 0x2978, 0x00000000);
nv_mthd(priv, 0xa097, 0x297c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2980, 0x00000000);
nv_mthd(priv, 0xa097, 0x2984, 0x00000000);
nv_mthd(priv, 0xa097, 0x2988, 0x00000000);
nv_mthd(priv, 0xa097, 0x298c, 0x00000000);
nv_mthd(priv, 0xa097, 0x2990, 0x00000000);
nv_mthd(priv, 0xa097, 0x2994, 0x00000000);
nv_mthd(priv, 0xa097, 0x2998, 0x00000000);
nv_mthd(priv, 0xa097, 0x299c, 0x00000000);
nv_mthd(priv, 0xa097, 0x29a0, 0x00000000);
nv_mthd(priv, 0xa097, 0x29a4, 0x00000000);
nv_mthd(priv, 0xa097, 0x29a8, 0x00000000);
nv_mthd(priv, 0xa097, 0x29ac, 0x00000000);
nv_mthd(priv, 0xa097, 0x29b0, 0x00000000);
nv_mthd(priv, 0xa097, 0x29b4, 0x00000000);
nv_mthd(priv, 0xa097, 0x29b8, 0x00000000);
nv_mthd(priv, 0xa097, 0x29bc, 0x00000000);
nv_mthd(priv, 0xa097, 0x29c0, 0x00000000);
nv_mthd(priv, 0xa097, 0x29c4, 0x00000000);
nv_mthd(priv, 0xa097, 0x29c8, 0x00000000);
nv_mthd(priv, 0xa097, 0x29cc, 0x00000000);
nv_mthd(priv, 0xa097, 0x29d0, 0x00000000);
nv_mthd(priv, 0xa097, 0x29d4, 0x00000000);
nv_mthd(priv, 0xa097, 0x29d8, 0x00000000);
nv_mthd(priv, 0xa097, 0x29dc, 0x00000000);
nv_mthd(priv, 0xa097, 0x29e0, 0x00000000);
nv_mthd(priv, 0xa097, 0x29e4, 0x00000000);
nv_mthd(priv, 0xa097, 0x29e8, 0x00000000);
nv_mthd(priv, 0xa097, 0x29ec, 0x00000000);
nv_mthd(priv, 0xa097, 0x29f0, 0x00000000);
nv_mthd(priv, 0xa097, 0x29f4, 0x00000000);
nv_mthd(priv, 0xa097, 0x29f8, 0x00000000);
nv_mthd(priv, 0xa097, 0x29fc, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a00, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a20, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a40, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a60, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a80, 0x00000000);
nv_mthd(priv, 0xa097, 0x0aa0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ac0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ae0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b00, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b20, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b40, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b60, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b80, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ba0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0bc0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0be0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a04, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a24, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a44, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a64, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a84, 0x00000000);
nv_mthd(priv, 0xa097, 0x0aa4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ac4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ae4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b04, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b24, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b44, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b64, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b84, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ba4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0bc4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0be4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a08, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a28, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a48, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a68, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a88, 0x00000000);
nv_mthd(priv, 0xa097, 0x0aa8, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ac8, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ae8, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b08, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b28, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b48, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b68, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b88, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ba8, 0x00000000);
nv_mthd(priv, 0xa097, 0x0bc8, 0x00000000);
nv_mthd(priv, 0xa097, 0x0be8, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a0c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a2c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a4c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a6c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a8c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0aac, 0x00000000);
nv_mthd(priv, 0xa097, 0x0acc, 0x00000000);
nv_mthd(priv, 0xa097, 0x0aec, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b0c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b2c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b4c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b6c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b8c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0bac, 0x00000000);
nv_mthd(priv, 0xa097, 0x0bcc, 0x00000000);
nv_mthd(priv, 0xa097, 0x0bec, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a10, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a30, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a50, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a70, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a90, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ab0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ad0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0af0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b10, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b30, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b50, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b70, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b90, 0x00000000);
nv_mthd(priv, 0xa097, 0x0bb0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0bd0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0bf0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a14, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a34, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a54, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a74, 0x00000000);
nv_mthd(priv, 0xa097, 0x0a94, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ab4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ad4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0af4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b14, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b34, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b54, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b74, 0x00000000);
nv_mthd(priv, 0xa097, 0x0b94, 0x00000000);
nv_mthd(priv, 0xa097, 0x0bb4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0bd4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0bf4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c00, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c10, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c20, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c30, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c40, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c50, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c60, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c70, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c80, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c90, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ca0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0cb0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0cc0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0cd0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ce0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0cf0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c04, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c14, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c24, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c34, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c44, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c54, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c64, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c74, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c84, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c94, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ca4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0cb4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0cc4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0cd4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ce4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0cf4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c08, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c18, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c28, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c38, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c48, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c58, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c68, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c78, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c88, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c98, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ca8, 0x00000000);
nv_mthd(priv, 0xa097, 0x0cb8, 0x00000000);
nv_mthd(priv, 0xa097, 0x0cc8, 0x00000000);
nv_mthd(priv, 0xa097, 0x0cd8, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ce8, 0x00000000);
nv_mthd(priv, 0xa097, 0x0cf8, 0x00000000);
nv_mthd(priv, 0xa097, 0x0c0c, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0c1c, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0c2c, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0c3c, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0c4c, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0c5c, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0c6c, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0c7c, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0c8c, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0c9c, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0cac, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0cbc, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0ccc, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0cdc, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0cec, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0cfc, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0d00, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d08, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d10, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d18, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d20, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d28, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d30, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d38, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d04, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d0c, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d14, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d1c, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d24, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d2c, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d34, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d3c, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e00, 0x00000000);
nv_mthd(priv, 0xa097, 0x0e10, 0x00000000);
nv_mthd(priv, 0xa097, 0x0e20, 0x00000000);
nv_mthd(priv, 0xa097, 0x0e30, 0x00000000);
nv_mthd(priv, 0xa097, 0x0e40, 0x00000000);
nv_mthd(priv, 0xa097, 0x0e50, 0x00000000);
nv_mthd(priv, 0xa097, 0x0e60, 0x00000000);
nv_mthd(priv, 0xa097, 0x0e70, 0x00000000);
nv_mthd(priv, 0xa097, 0x0e80, 0x00000000);
nv_mthd(priv, 0xa097, 0x0e90, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ea0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0eb0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ec0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ed0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ee0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ef0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0e04, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e14, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e24, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e34, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e44, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e54, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e64, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e74, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e84, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e94, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0ea4, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0eb4, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0ec4, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0ed4, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0ee4, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0ef4, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e08, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e18, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e28, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e38, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e48, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e58, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e68, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e78, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e88, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0e98, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0ea8, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0eb8, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0ec8, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0ed8, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0ee8, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0ef8, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d40, 0x00000000);
nv_mthd(priv, 0xa097, 0x0d48, 0x00000000);
nv_mthd(priv, 0xa097, 0x0d50, 0x00000000);
nv_mthd(priv, 0xa097, 0x0d58, 0x00000000);
nv_mthd(priv, 0xa097, 0x0d44, 0x00000000);
nv_mthd(priv, 0xa097, 0x0d4c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0d54, 0x00000000);
nv_mthd(priv, 0xa097, 0x0d5c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1e00, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e20, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e40, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e60, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e80, 0x00000001);
nv_mthd(priv, 0xa097, 0x1ea0, 0x00000001);
nv_mthd(priv, 0xa097, 0x1ec0, 0x00000001);
nv_mthd(priv, 0xa097, 0x1ee0, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e04, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e24, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e44, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e64, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e84, 0x00000001);
nv_mthd(priv, 0xa097, 0x1ea4, 0x00000001);
nv_mthd(priv, 0xa097, 0x1ec4, 0x00000001);
nv_mthd(priv, 0xa097, 0x1ee4, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e08, 0x00000002);
nv_mthd(priv, 0xa097, 0x1e28, 0x00000002);
nv_mthd(priv, 0xa097, 0x1e48, 0x00000002);
nv_mthd(priv, 0xa097, 0x1e68, 0x00000002);
nv_mthd(priv, 0xa097, 0x1e88, 0x00000002);
nv_mthd(priv, 0xa097, 0x1ea8, 0x00000002);
nv_mthd(priv, 0xa097, 0x1ec8, 0x00000002);
nv_mthd(priv, 0xa097, 0x1ee8, 0x00000002);
nv_mthd(priv, 0xa097, 0x1e0c, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e2c, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e4c, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e6c, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e8c, 0x00000001);
nv_mthd(priv, 0xa097, 0x1eac, 0x00000001);
nv_mthd(priv, 0xa097, 0x1ecc, 0x00000001);
nv_mthd(priv, 0xa097, 0x1eec, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e10, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e30, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e50, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e70, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e90, 0x00000001);
nv_mthd(priv, 0xa097, 0x1eb0, 0x00000001);
nv_mthd(priv, 0xa097, 0x1ed0, 0x00000001);
nv_mthd(priv, 0xa097, 0x1ef0, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e14, 0x00000002);
nv_mthd(priv, 0xa097, 0x1e34, 0x00000002);
nv_mthd(priv, 0xa097, 0x1e54, 0x00000002);
nv_mthd(priv, 0xa097, 0x1e74, 0x00000002);
nv_mthd(priv, 0xa097, 0x1e94, 0x00000002);
nv_mthd(priv, 0xa097, 0x1eb4, 0x00000002);
nv_mthd(priv, 0xa097, 0x1ed4, 0x00000002);
nv_mthd(priv, 0xa097, 0x1ef4, 0x00000002);
nv_mthd(priv, 0xa097, 0x1e18, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e38, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e58, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e78, 0x00000001);
nv_mthd(priv, 0xa097, 0x1e98, 0x00000001);
nv_mthd(priv, 0xa097, 0x1eb8, 0x00000001);
nv_mthd(priv, 0xa097, 0x1ed8, 0x00000001);
nv_mthd(priv, 0xa097, 0x1ef8, 0x00000001);
nv_mthd(priv, 0xa097, 0x3400, 0x00000000);
nv_mthd(priv, 0xa097, 0x3404, 0x00000000);
nv_mthd(priv, 0xa097, 0x3408, 0x00000000);
nv_mthd(priv, 0xa097, 0x340c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3410, 0x00000000);
nv_mthd(priv, 0xa097, 0x3414, 0x00000000);
nv_mthd(priv, 0xa097, 0x3418, 0x00000000);
nv_mthd(priv, 0xa097, 0x341c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3420, 0x00000000);
nv_mthd(priv, 0xa097, 0x3424, 0x00000000);
nv_mthd(priv, 0xa097, 0x3428, 0x00000000);
nv_mthd(priv, 0xa097, 0x342c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3430, 0x00000000);
nv_mthd(priv, 0xa097, 0x3434, 0x00000000);
nv_mthd(priv, 0xa097, 0x3438, 0x00000000);
nv_mthd(priv, 0xa097, 0x343c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3440, 0x00000000);
nv_mthd(priv, 0xa097, 0x3444, 0x00000000);
nv_mthd(priv, 0xa097, 0x3448, 0x00000000);
nv_mthd(priv, 0xa097, 0x344c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3450, 0x00000000);
nv_mthd(priv, 0xa097, 0x3454, 0x00000000);
nv_mthd(priv, 0xa097, 0x3458, 0x00000000);
nv_mthd(priv, 0xa097, 0x345c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3460, 0x00000000);
nv_mthd(priv, 0xa097, 0x3464, 0x00000000);
nv_mthd(priv, 0xa097, 0x3468, 0x00000000);
nv_mthd(priv, 0xa097, 0x346c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3470, 0x00000000);
nv_mthd(priv, 0xa097, 0x3474, 0x00000000);
nv_mthd(priv, 0xa097, 0x3478, 0x00000000);
nv_mthd(priv, 0xa097, 0x347c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3480, 0x00000000);
nv_mthd(priv, 0xa097, 0x3484, 0x00000000);
nv_mthd(priv, 0xa097, 0x3488, 0x00000000);
nv_mthd(priv, 0xa097, 0x348c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3490, 0x00000000);
nv_mthd(priv, 0xa097, 0x3494, 0x00000000);
nv_mthd(priv, 0xa097, 0x3498, 0x00000000);
nv_mthd(priv, 0xa097, 0x349c, 0x00000000);
nv_mthd(priv, 0xa097, 0x34a0, 0x00000000);
nv_mthd(priv, 0xa097, 0x34a4, 0x00000000);
nv_mthd(priv, 0xa097, 0x34a8, 0x00000000);
nv_mthd(priv, 0xa097, 0x34ac, 0x00000000);
nv_mthd(priv, 0xa097, 0x34b0, 0x00000000);
nv_mthd(priv, 0xa097, 0x34b4, 0x00000000);
nv_mthd(priv, 0xa097, 0x34b8, 0x00000000);
nv_mthd(priv, 0xa097, 0x34bc, 0x00000000);
nv_mthd(priv, 0xa097, 0x34c0, 0x00000000);
nv_mthd(priv, 0xa097, 0x34c4, 0x00000000);
nv_mthd(priv, 0xa097, 0x34c8, 0x00000000);
nv_mthd(priv, 0xa097, 0x34cc, 0x00000000);
nv_mthd(priv, 0xa097, 0x34d0, 0x00000000);
nv_mthd(priv, 0xa097, 0x34d4, 0x00000000);
nv_mthd(priv, 0xa097, 0x34d8, 0x00000000);
nv_mthd(priv, 0xa097, 0x34dc, 0x00000000);
nv_mthd(priv, 0xa097, 0x34e0, 0x00000000);
nv_mthd(priv, 0xa097, 0x34e4, 0x00000000);
nv_mthd(priv, 0xa097, 0x34e8, 0x00000000);
nv_mthd(priv, 0xa097, 0x34ec, 0x00000000);
nv_mthd(priv, 0xa097, 0x34f0, 0x00000000);
nv_mthd(priv, 0xa097, 0x34f4, 0x00000000);
nv_mthd(priv, 0xa097, 0x34f8, 0x00000000);
nv_mthd(priv, 0xa097, 0x34fc, 0x00000000);
nv_mthd(priv, 0xa097, 0x3500, 0x00000000);
nv_mthd(priv, 0xa097, 0x3504, 0x00000000);
nv_mthd(priv, 0xa097, 0x3508, 0x00000000);
nv_mthd(priv, 0xa097, 0x350c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3510, 0x00000000);
nv_mthd(priv, 0xa097, 0x3514, 0x00000000);
nv_mthd(priv, 0xa097, 0x3518, 0x00000000);
nv_mthd(priv, 0xa097, 0x351c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3520, 0x00000000);
nv_mthd(priv, 0xa097, 0x3524, 0x00000000);
nv_mthd(priv, 0xa097, 0x3528, 0x00000000);
nv_mthd(priv, 0xa097, 0x352c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3530, 0x00000000);
nv_mthd(priv, 0xa097, 0x3534, 0x00000000);
nv_mthd(priv, 0xa097, 0x3538, 0x00000000);
nv_mthd(priv, 0xa097, 0x353c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3540, 0x00000000);
nv_mthd(priv, 0xa097, 0x3544, 0x00000000);
nv_mthd(priv, 0xa097, 0x3548, 0x00000000);
nv_mthd(priv, 0xa097, 0x354c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3550, 0x00000000);
nv_mthd(priv, 0xa097, 0x3554, 0x00000000);
nv_mthd(priv, 0xa097, 0x3558, 0x00000000);
nv_mthd(priv, 0xa097, 0x355c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3560, 0x00000000);
nv_mthd(priv, 0xa097, 0x3564, 0x00000000);
nv_mthd(priv, 0xa097, 0x3568, 0x00000000);
nv_mthd(priv, 0xa097, 0x356c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3570, 0x00000000);
nv_mthd(priv, 0xa097, 0x3574, 0x00000000);
nv_mthd(priv, 0xa097, 0x3578, 0x00000000);
nv_mthd(priv, 0xa097, 0x357c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3580, 0x00000000);
nv_mthd(priv, 0xa097, 0x3584, 0x00000000);
nv_mthd(priv, 0xa097, 0x3588, 0x00000000);
nv_mthd(priv, 0xa097, 0x358c, 0x00000000);
nv_mthd(priv, 0xa097, 0x3590, 0x00000000);
nv_mthd(priv, 0xa097, 0x3594, 0x00000000);
nv_mthd(priv, 0xa097, 0x3598, 0x00000000);
nv_mthd(priv, 0xa097, 0x359c, 0x00000000);
nv_mthd(priv, 0xa097, 0x35a0, 0x00000000);
nv_mthd(priv, 0xa097, 0x35a4, 0x00000000);
nv_mthd(priv, 0xa097, 0x35a8, 0x00000000);
nv_mthd(priv, 0xa097, 0x35ac, 0x00000000);
nv_mthd(priv, 0xa097, 0x35b0, 0x00000000);
nv_mthd(priv, 0xa097, 0x35b4, 0x00000000);
nv_mthd(priv, 0xa097, 0x35b8, 0x00000000);
nv_mthd(priv, 0xa097, 0x35bc, 0x00000000);
nv_mthd(priv, 0xa097, 0x35c0, 0x00000000);
nv_mthd(priv, 0xa097, 0x35c4, 0x00000000);
nv_mthd(priv, 0xa097, 0x35c8, 0x00000000);
nv_mthd(priv, 0xa097, 0x35cc, 0x00000000);
nv_mthd(priv, 0xa097, 0x35d0, 0x00000000);
nv_mthd(priv, 0xa097, 0x35d4, 0x00000000);
nv_mthd(priv, 0xa097, 0x35d8, 0x00000000);
nv_mthd(priv, 0xa097, 0x35dc, 0x00000000);
nv_mthd(priv, 0xa097, 0x35e0, 0x00000000);
nv_mthd(priv, 0xa097, 0x35e4, 0x00000000);
nv_mthd(priv, 0xa097, 0x35e8, 0x00000000);
nv_mthd(priv, 0xa097, 0x35ec, 0x00000000);
nv_mthd(priv, 0xa097, 0x35f0, 0x00000000);
nv_mthd(priv, 0xa097, 0x35f4, 0x00000000);
nv_mthd(priv, 0xa097, 0x35f8, 0x00000000);
nv_mthd(priv, 0xa097, 0x35fc, 0x00000000);
nv_mthd(priv, 0xa097, 0x030c, 0x00000001);
nv_mthd(priv, 0xa097, 0x1944, 0x00000000);
nv_mthd(priv, 0xa097, 0x1514, 0x00000000);
nv_mthd(priv, 0xa097, 0x0d68, 0x0000ffff);
nv_mthd(priv, 0xa097, 0x121c, 0x0fac6881);
nv_mthd(priv, 0xa097, 0x0fac, 0x00000001);
nv_mthd(priv, 0xa097, 0x1538, 0x00000001);
nv_mthd(priv, 0xa097, 0x0fe0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0fe4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0fe8, 0x00000014);
nv_mthd(priv, 0xa097, 0x0fec, 0x00000040);
nv_mthd(priv, 0xa097, 0x0ff0, 0x00000000);
nv_mthd(priv, 0xa097, 0x179c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1228, 0x00000400);
nv_mthd(priv, 0xa097, 0x122c, 0x00000300);
nv_mthd(priv, 0xa097, 0x1230, 0x00010001);
nv_mthd(priv, 0xa097, 0x07f8, 0x00000000);
nv_mthd(priv, 0xa097, 0x15b4, 0x00000001);
nv_mthd(priv, 0xa097, 0x15cc, 0x00000000);
nv_mthd(priv, 0xa097, 0x1534, 0x00000000);
nv_mthd(priv, 0xa097, 0x0fb0, 0x00000000);
nv_mthd(priv, 0xa097, 0x15d0, 0x00000000);
nv_mthd(priv, 0xa097, 0x153c, 0x00000000);
nv_mthd(priv, 0xa097, 0x16b4, 0x00000003);
nv_mthd(priv, 0xa097, 0x0fbc, 0x0000ffff);
nv_mthd(priv, 0xa097, 0x0fc0, 0x0000ffff);
nv_mthd(priv, 0xa097, 0x0fc4, 0x0000ffff);
nv_mthd(priv, 0xa097, 0x0fc8, 0x0000ffff);
nv_mthd(priv, 0xa097, 0x0df8, 0x00000000);
nv_mthd(priv, 0xa097, 0x0dfc, 0x00000000);
nv_mthd(priv, 0xa097, 0x1948, 0x00000000);
nv_mthd(priv, 0xa097, 0x1970, 0x00000001);
nv_mthd(priv, 0xa097, 0x161c, 0x000009f0);
nv_mthd(priv, 0xa097, 0x0dcc, 0x00000010);
nv_mthd(priv, 0xa097, 0x163c, 0x00000000);
nv_mthd(priv, 0xa097, 0x15e4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1160, 0x25e00040);
nv_mthd(priv, 0xa097, 0x1164, 0x25e00040);
nv_mthd(priv, 0xa097, 0x1168, 0x25e00040);
nv_mthd(priv, 0xa097, 0x116c, 0x25e00040);
nv_mthd(priv, 0xa097, 0x1170, 0x25e00040);
nv_mthd(priv, 0xa097, 0x1174, 0x25e00040);
nv_mthd(priv, 0xa097, 0x1178, 0x25e00040);
nv_mthd(priv, 0xa097, 0x117c, 0x25e00040);
nv_mthd(priv, 0xa097, 0x1180, 0x25e00040);
nv_mthd(priv, 0xa097, 0x1184, 0x25e00040);
nv_mthd(priv, 0xa097, 0x1188, 0x25e00040);
nv_mthd(priv, 0xa097, 0x118c, 0x25e00040);
nv_mthd(priv, 0xa097, 0x1190, 0x25e00040);
nv_mthd(priv, 0xa097, 0x1194, 0x25e00040);
nv_mthd(priv, 0xa097, 0x1198, 0x25e00040);
nv_mthd(priv, 0xa097, 0x119c, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11a0, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11a4, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11a8, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11ac, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11b0, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11b4, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11b8, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11bc, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11c0, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11c4, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11c8, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11cc, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11d0, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11d4, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11d8, 0x25e00040);
nv_mthd(priv, 0xa097, 0x11dc, 0x25e00040);
nv_mthd(priv, 0xa097, 0x1880, 0x00000000);
nv_mthd(priv, 0xa097, 0x1884, 0x00000000);
nv_mthd(priv, 0xa097, 0x1888, 0x00000000);
nv_mthd(priv, 0xa097, 0x188c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1890, 0x00000000);
nv_mthd(priv, 0xa097, 0x1894, 0x00000000);
nv_mthd(priv, 0xa097, 0x1898, 0x00000000);
nv_mthd(priv, 0xa097, 0x189c, 0x00000000);
nv_mthd(priv, 0xa097, 0x18a0, 0x00000000);
nv_mthd(priv, 0xa097, 0x18a4, 0x00000000);
nv_mthd(priv, 0xa097, 0x18a8, 0x00000000);
nv_mthd(priv, 0xa097, 0x18ac, 0x00000000);
nv_mthd(priv, 0xa097, 0x18b0, 0x00000000);
nv_mthd(priv, 0xa097, 0x18b4, 0x00000000);
nv_mthd(priv, 0xa097, 0x18b8, 0x00000000);
nv_mthd(priv, 0xa097, 0x18bc, 0x00000000);
nv_mthd(priv, 0xa097, 0x18c0, 0x00000000);
nv_mthd(priv, 0xa097, 0x18c4, 0x00000000);
nv_mthd(priv, 0xa097, 0x18c8, 0x00000000);
nv_mthd(priv, 0xa097, 0x18cc, 0x00000000);
nv_mthd(priv, 0xa097, 0x18d0, 0x00000000);
nv_mthd(priv, 0xa097, 0x18d4, 0x00000000);
nv_mthd(priv, 0xa097, 0x18d8, 0x00000000);
nv_mthd(priv, 0xa097, 0x18dc, 0x00000000);
nv_mthd(priv, 0xa097, 0x18e0, 0x00000000);
nv_mthd(priv, 0xa097, 0x18e4, 0x00000000);
nv_mthd(priv, 0xa097, 0x18e8, 0x00000000);
nv_mthd(priv, 0xa097, 0x18ec, 0x00000000);
nv_mthd(priv, 0xa097, 0x18f0, 0x00000000);
nv_mthd(priv, 0xa097, 0x18f4, 0x00000000);
nv_mthd(priv, 0xa097, 0x18f8, 0x00000000);
nv_mthd(priv, 0xa097, 0x18fc, 0x00000000);
nv_mthd(priv, 0xa097, 0x0f84, 0x00000000);
nv_mthd(priv, 0xa097, 0x0f88, 0x00000000);
nv_mthd(priv, 0xa097, 0x17c8, 0x00000000);
nv_mthd(priv, 0xa097, 0x17cc, 0x00000000);
nv_mthd(priv, 0xa097, 0x17d0, 0x000000ff);
nv_mthd(priv, 0xa097, 0x17d4, 0xffffffff);
nv_mthd(priv, 0xa097, 0x17d8, 0x00000002);
nv_mthd(priv, 0xa097, 0x17dc, 0x00000000);
nv_mthd(priv, 0xa097, 0x15f4, 0x00000000);
nv_mthd(priv, 0xa097, 0x15f8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1434, 0x00000000);
nv_mthd(priv, 0xa097, 0x1438, 0x00000000);
nv_mthd(priv, 0xa097, 0x0d74, 0x00000000);
nv_mthd(priv, 0xa097, 0x0dec, 0x00000001);
nv_mthd(priv, 0xa097, 0x13a4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1318, 0x00000001);
nv_mthd(priv, 0xa097, 0x1644, 0x00000000);
nv_mthd(priv, 0xa097, 0x0748, 0x00000000);
nv_mthd(priv, 0xa097, 0x0de8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1648, 0x00000000);
nv_mthd(priv, 0xa097, 0x12a4, 0x00000000);
nv_mthd(priv, 0xa097, 0x1120, 0x00000000);
nv_mthd(priv, 0xa097, 0x1124, 0x00000000);
nv_mthd(priv, 0xa097, 0x1128, 0x00000000);
nv_mthd(priv, 0xa097, 0x112c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1118, 0x00000000);
nv_mthd(priv, 0xa097, 0x164c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1658, 0x00000000);
nv_mthd(priv, 0xa097, 0x1910, 0x00000290);
nv_mthd(priv, 0xa097, 0x1518, 0x00000000);
nv_mthd(priv, 0xa097, 0x165c, 0x00000001);
nv_mthd(priv, 0xa097, 0x1520, 0x00000000);
nv_mthd(priv, 0xa097, 0x1604, 0x00000000);
nv_mthd(priv, 0xa097, 0x1570, 0x00000000);
nv_mthd(priv, 0xa097, 0x13b0, 0x3f800000);
nv_mthd(priv, 0xa097, 0x13b4, 0x3f800000);
nv_mthd(priv, 0xa097, 0x020c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1670, 0x30201000);
nv_mthd(priv, 0xa097, 0x1674, 0x70605040);
nv_mthd(priv, 0xa097, 0x1678, 0xb8a89888);
nv_mthd(priv, 0xa097, 0x167c, 0xf8e8d8c8);
nv_mthd(priv, 0xa097, 0x166c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1680, 0x00ffff00);
nv_mthd(priv, 0xa097, 0x12d0, 0x00000003);
nv_mthd(priv, 0xa097, 0x12d4, 0x00000002);
nv_mthd(priv, 0xa097, 0x1684, 0x00000000);
nv_mthd(priv, 0xa097, 0x1688, 0x00000000);
nv_mthd(priv, 0xa097, 0x0dac, 0x00001b02);
nv_mthd(priv, 0xa097, 0x0db0, 0x00001b02);
nv_mthd(priv, 0xa097, 0x0db4, 0x00000000);
nv_mthd(priv, 0xa097, 0x168c, 0x00000000);
nv_mthd(priv, 0xa097, 0x15bc, 0x00000000);
nv_mthd(priv, 0xa097, 0x156c, 0x00000000);
nv_mthd(priv, 0xa097, 0x187c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1110, 0x00000001);
nv_mthd(priv, 0xa097, 0x0dc0, 0x00000000);
nv_mthd(priv, 0xa097, 0x0dc4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0dc8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1234, 0x00000000);
nv_mthd(priv, 0xa097, 0x1690, 0x00000000);
nv_mthd(priv, 0xa097, 0x12ac, 0x00000001);
nv_mthd(priv, 0xa097, 0x0790, 0x00000000);
nv_mthd(priv, 0xa097, 0x0794, 0x00000000);
nv_mthd(priv, 0xa097, 0x0798, 0x00000000);
nv_mthd(priv, 0xa097, 0x079c, 0x00000000);
nv_mthd(priv, 0xa097, 0x07a0, 0x00000000);
nv_mthd(priv, 0xa097, 0x077c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1000, 0x00000010);
nv_mthd(priv, 0xa097, 0x10fc, 0x00000000);
nv_mthd(priv, 0xa097, 0x1290, 0x00000000);
nv_mthd(priv, 0xa097, 0x0218, 0x00000010);
nv_mthd(priv, 0xa097, 0x12d8, 0x00000000);
nv_mthd(priv, 0xa097, 0x12dc, 0x00000010);
nv_mthd(priv, 0xa097, 0x0d94, 0x00000001);
nv_mthd(priv, 0xa097, 0x155c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1560, 0x00000000);
nv_mthd(priv, 0xa097, 0x1564, 0x00000fff);
nv_mthd(priv, 0xa097, 0x1574, 0x00000000);
nv_mthd(priv, 0xa097, 0x1578, 0x00000000);
nv_mthd(priv, 0xa097, 0x157c, 0x000fffff);
nv_mthd(priv, 0xa097, 0x1354, 0x00000000);
nv_mthd(priv, 0xa097, 0x1610, 0x00000012);
nv_mthd(priv, 0xa097, 0x1608, 0x00000000);
nv_mthd(priv, 0xa097, 0x160c, 0x00000000);
nv_mthd(priv, 0xa097, 0x260c, 0x00000000);
nv_mthd(priv, 0xa097, 0x07ac, 0x00000000);
nv_mthd(priv, 0xa097, 0x162c, 0x00000003);
nv_mthd(priv, 0xa097, 0x0210, 0x00000000);
nv_mthd(priv, 0xa097, 0x0320, 0x00000000);
nv_mthd(priv, 0xa097, 0x0324, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0328, 0x3f800000);
nv_mthd(priv, 0xa097, 0x032c, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0330, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0334, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0338, 0x3f800000);
nv_mthd(priv, 0xa097, 0x0750, 0x00000000);
nv_mthd(priv, 0xa097, 0x0760, 0x39291909);
nv_mthd(priv, 0xa097, 0x0764, 0x79695949);
nv_mthd(priv, 0xa097, 0x0768, 0xb9a99989);
nv_mthd(priv, 0xa097, 0x076c, 0xf9e9d9c9);
nv_mthd(priv, 0xa097, 0x0770, 0x30201000);
nv_mthd(priv, 0xa097, 0x0774, 0x70605040);
nv_mthd(priv, 0xa097, 0x0778, 0x00009080);
nv_mthd(priv, 0xa097, 0x0780, 0x39291909);
nv_mthd(priv, 0xa097, 0x0784, 0x79695949);
nv_mthd(priv, 0xa097, 0x0788, 0xb9a99989);
nv_mthd(priv, 0xa097, 0x078c, 0xf9e9d9c9);
nv_mthd(priv, 0xa097, 0x07d0, 0x30201000);
nv_mthd(priv, 0xa097, 0x07d4, 0x70605040);
nv_mthd(priv, 0xa097, 0x07d8, 0x00009080);
nv_mthd(priv, 0xa097, 0x037c, 0x00000001);
nv_mthd(priv, 0xa097, 0x0740, 0x00000000);
nv_mthd(priv, 0xa097, 0x0744, 0x00000000);
nv_mthd(priv, 0xa097, 0x2600, 0x00000000);
nv_mthd(priv, 0xa097, 0x1918, 0x00000000);
nv_mthd(priv, 0xa097, 0x191c, 0x00000900);
nv_mthd(priv, 0xa097, 0x1920, 0x00000405);
nv_mthd(priv, 0xa097, 0x1308, 0x00000001);
nv_mthd(priv, 0xa097, 0x1924, 0x00000000);
nv_mthd(priv, 0xa097, 0x13ac, 0x00000000);
nv_mthd(priv, 0xa097, 0x192c, 0x00000001);
nv_mthd(priv, 0xa097, 0x193c, 0x00002c1c);
nv_mthd(priv, 0xa097, 0x0d7c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0f8c, 0x00000000);
nv_mthd(priv, 0xa097, 0x02c0, 0x00000001);
nv_mthd(priv, 0xa097, 0x1510, 0x00000000);
nv_mthd(priv, 0xa097, 0x1940, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ff4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0ff8, 0x00000000);
nv_mthd(priv, 0xa097, 0x194c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1950, 0x00000000);
nv_mthd(priv, 0xa097, 0x1968, 0x00000000);
nv_mthd(priv, 0xa097, 0x1590, 0x0000003f);
nv_mthd(priv, 0xa097, 0x07e8, 0x00000000);
nv_mthd(priv, 0xa097, 0x07ec, 0x00000000);
nv_mthd(priv, 0xa097, 0x07f0, 0x00000000);
nv_mthd(priv, 0xa097, 0x07f4, 0x00000000);
nv_mthd(priv, 0xa097, 0x196c, 0x00000011);
nv_mthd(priv, 0xa097, 0x02e4, 0x0000b001);
nv_mthd(priv, 0xa097, 0x036c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0370, 0x00000000);
nv_mthd(priv, 0xa097, 0x197c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0fcc, 0x00000000);
nv_mthd(priv, 0xa097, 0x0fd0, 0x00000000);
nv_mthd(priv, 0xa097, 0x02d8, 0x00000040);
nv_mthd(priv, 0xa097, 0x1980, 0x00000080);
nv_mthd(priv, 0xa097, 0x1504, 0x00000080);
nv_mthd(priv, 0xa097, 0x1984, 0x00000000);
nv_mthd(priv, 0xa097, 0x0300, 0x00000001);
nv_mthd(priv, 0xa097, 0x13a8, 0x00000000);
nv_mthd(priv, 0xa097, 0x12ec, 0x00000000);
nv_mthd(priv, 0xa097, 0x1310, 0x00000000);
nv_mthd(priv, 0xa097, 0x1314, 0x00000001);
nv_mthd(priv, 0xa097, 0x1380, 0x00000000);
nv_mthd(priv, 0xa097, 0x1384, 0x00000001);
nv_mthd(priv, 0xa097, 0x1388, 0x00000001);
nv_mthd(priv, 0xa097, 0x138c, 0x00000001);
nv_mthd(priv, 0xa097, 0x1390, 0x00000001);
nv_mthd(priv, 0xa097, 0x1394, 0x00000000);
nv_mthd(priv, 0xa097, 0x139c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1398, 0x00000000);
nv_mthd(priv, 0xa097, 0x1594, 0x00000000);
nv_mthd(priv, 0xa097, 0x1598, 0x00000001);
nv_mthd(priv, 0xa097, 0x159c, 0x00000001);
nv_mthd(priv, 0xa097, 0x15a0, 0x00000001);
nv_mthd(priv, 0xa097, 0x15a4, 0x00000001);
nv_mthd(priv, 0xa097, 0x0f54, 0x00000000);
nv_mthd(priv, 0xa097, 0x0f58, 0x00000000);
nv_mthd(priv, 0xa097, 0x0f5c, 0x00000000);
nv_mthd(priv, 0xa097, 0x19bc, 0x00000000);
nv_mthd(priv, 0xa097, 0x0f9c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0fa0, 0x00000000);
nv_mthd(priv, 0xa097, 0x12cc, 0x00000000);
nv_mthd(priv, 0xa097, 0x12e8, 0x00000000);
nv_mthd(priv, 0xa097, 0x130c, 0x00000001);
nv_mthd(priv, 0xa097, 0x1360, 0x00000000);
nv_mthd(priv, 0xa097, 0x1364, 0x00000000);
nv_mthd(priv, 0xa097, 0x1368, 0x00000000);
nv_mthd(priv, 0xa097, 0x136c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1370, 0x00000000);
nv_mthd(priv, 0xa097, 0x1374, 0x00000000);
nv_mthd(priv, 0xa097, 0x1378, 0x00000000);
nv_mthd(priv, 0xa097, 0x137c, 0x00000000);
nv_mthd(priv, 0xa097, 0x133c, 0x00000001);
nv_mthd(priv, 0xa097, 0x1340, 0x00000001);
nv_mthd(priv, 0xa097, 0x1344, 0x00000002);
nv_mthd(priv, 0xa097, 0x1348, 0x00000001);
nv_mthd(priv, 0xa097, 0x134c, 0x00000001);
nv_mthd(priv, 0xa097, 0x1350, 0x00000002);
nv_mthd(priv, 0xa097, 0x1358, 0x00000001);
nv_mthd(priv, 0xa097, 0x12e4, 0x00000000);
nv_mthd(priv, 0xa097, 0x131c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1320, 0x00000000);
nv_mthd(priv, 0xa097, 0x1324, 0x00000000);
nv_mthd(priv, 0xa097, 0x1328, 0x00000000);
nv_mthd(priv, 0xa097, 0x19c0, 0x00000000);
nv_mthd(priv, 0xa097, 0x1140, 0x00000000);
nv_mthd(priv, 0xa097, 0x19c4, 0x00000000);
nv_mthd(priv, 0xa097, 0x19c8, 0x00001500);
nv_mthd(priv, 0xa097, 0x135c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0f90, 0x00000000);
nv_mthd(priv, 0xa097, 0x19e0, 0x00000001);
nv_mthd(priv, 0xa097, 0x19e4, 0x00000001);
nv_mthd(priv, 0xa097, 0x19e8, 0x00000001);
nv_mthd(priv, 0xa097, 0x19ec, 0x00000001);
nv_mthd(priv, 0xa097, 0x19f0, 0x00000001);
nv_mthd(priv, 0xa097, 0x19f4, 0x00000001);
nv_mthd(priv, 0xa097, 0x19f8, 0x00000001);
nv_mthd(priv, 0xa097, 0x19fc, 0x00000001);
nv_mthd(priv, 0xa097, 0x19cc, 0x00000001);
nv_mthd(priv, 0xa097, 0x15b8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1a00, 0x00001111);
nv_mthd(priv, 0xa097, 0x1a04, 0x00000000);
nv_mthd(priv, 0xa097, 0x1a08, 0x00000000);
nv_mthd(priv, 0xa097, 0x1a0c, 0x00000000);
nv_mthd(priv, 0xa097, 0x1a10, 0x00000000);
nv_mthd(priv, 0xa097, 0x1a14, 0x00000000);
nv_mthd(priv, 0xa097, 0x1a18, 0x00000000);
nv_mthd(priv, 0xa097, 0x1a1c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0d6c, 0xffff0000);
nv_mthd(priv, 0xa097, 0x0d70, 0xffff0000);
nv_mthd(priv, 0xa097, 0x10f8, 0x00001010);
nv_mthd(priv, 0xa097, 0x0d80, 0x00000000);
nv_mthd(priv, 0xa097, 0x0d84, 0x00000000);
nv_mthd(priv, 0xa097, 0x0d88, 0x00000000);
nv_mthd(priv, 0xa097, 0x0d8c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0d90, 0x00000000);
nv_mthd(priv, 0xa097, 0x0da0, 0x00000000);
nv_mthd(priv, 0xa097, 0x07a4, 0x00000000);
nv_mthd(priv, 0xa097, 0x07a8, 0x00000000);
nv_mthd(priv, 0xa097, 0x1508, 0x80000000);
nv_mthd(priv, 0xa097, 0x150c, 0x40000000);
nv_mthd(priv, 0xa097, 0x1668, 0x00000000);
nv_mthd(priv, 0xa097, 0x0318, 0x00000008);
nv_mthd(priv, 0xa097, 0x031c, 0x00000008);
nv_mthd(priv, 0xa097, 0x0d9c, 0x00000001);
nv_mthd(priv, 0xa097, 0x0374, 0x00000000);
nv_mthd(priv, 0xa097, 0x0378, 0x00000020);
nv_mthd(priv, 0xa097, 0x07dc, 0x00000000);
nv_mthd(priv, 0xa097, 0x074c, 0x00000055);
nv_mthd(priv, 0xa097, 0x1420, 0x00000003);
nv_mthd(priv, 0xa097, 0x17bc, 0x00000000);
nv_mthd(priv, 0xa097, 0x17c0, 0x00000000);
nv_mthd(priv, 0xa097, 0x17c4, 0x00000001);
nv_mthd(priv, 0xa097, 0x1008, 0x00000008);
nv_mthd(priv, 0xa097, 0x100c, 0x00000040);
nv_mthd(priv, 0xa097, 0x1010, 0x0000012c);
nv_mthd(priv, 0xa097, 0x0d60, 0x00000040);
nv_mthd(priv, 0xa097, 0x075c, 0x00000003);
nv_mthd(priv, 0xa097, 0x1018, 0x00000020);
nv_mthd(priv, 0xa097, 0x101c, 0x00000001);
nv_mthd(priv, 0xa097, 0x1020, 0x00000020);
nv_mthd(priv, 0xa097, 0x1024, 0x00000001);
nv_mthd(priv, 0xa097, 0x1444, 0x00000000);
nv_mthd(priv, 0xa097, 0x1448, 0x00000000);
nv_mthd(priv, 0xa097, 0x144c, 0x00000000);
nv_mthd(priv, 0xa097, 0x0360, 0x20164010);
nv_mthd(priv, 0xa097, 0x0364, 0x00000020);
nv_mthd(priv, 0xa097, 0x0368, 0x00000000);
nv_mthd(priv, 0xa097, 0x0de4, 0x00000000);
nv_mthd(priv, 0xa097, 0x0204, 0x00000006);
nv_mthd(priv, 0xa097, 0x0208, 0x00000000);
nv_mthd(priv, 0xa097, 0x02cc, 0x003fffff);
nv_mthd(priv, 0xa097, 0x02d0, 0x003fffff);
nv_mthd(priv, 0xa097, 0x1220, 0x00000005);
nv_mthd(priv, 0xa097, 0x0fdc, 0x00000000);
nv_mthd(priv, 0xa097, 0x0f98, 0x00400008);
nv_mthd(priv, 0xa097, 0x1284, 0x08000080);
nv_mthd(priv, 0xa097, 0x1450, 0x00400008);
nv_mthd(priv, 0xa097, 0x1454, 0x08000080);
nv_mthd(priv, 0xa097, 0x0214, 0x00000000);
}
static void
nve0_grctx_generate_902d(struct nvc0_graph_priv *priv)
{
nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
nv_mthd(priv, 0x902d, 0x0208, 0x00000020);
nv_mthd(priv, 0x902d, 0x020c, 0x00000001);
nv_mthd(priv, 0x902d, 0x0210, 0x00000000);
nv_mthd(priv, 0x902d, 0x0214, 0x00000080);
nv_mthd(priv, 0x902d, 0x0218, 0x00000100);
nv_mthd(priv, 0x902d, 0x021c, 0x00000100);
nv_mthd(priv, 0x902d, 0x0220, 0x00000000);
nv_mthd(priv, 0x902d, 0x0224, 0x00000000);
nv_mthd(priv, 0x902d, 0x0230, 0x000000cf);
nv_mthd(priv, 0x902d, 0x0234, 0x00000001);
nv_mthd(priv, 0x902d, 0x0238, 0x00000020);
nv_mthd(priv, 0x902d, 0x023c, 0x00000001);
nv_mthd(priv, 0x902d, 0x0244, 0x00000080);
nv_mthd(priv, 0x902d, 0x0248, 0x00000100);
nv_mthd(priv, 0x902d, 0x024c, 0x00000100);
nv_mthd(priv, 0x902d, 0x3410, 0x00000000);
}
static void
nve0_graph_generate_unk40xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404010, 0x0);
nv_wr32(priv, 0x404014, 0x0);
nv_wr32(priv, 0x404018, 0x0);
nv_wr32(priv, 0x40401c, 0x0);
nv_wr32(priv, 0x404020, 0x0);
nv_wr32(priv, 0x404024, 0xe000);
nv_wr32(priv, 0x404028, 0x0);
nv_wr32(priv, 0x4040a8, 0x0);
nv_wr32(priv, 0x4040ac, 0x0);
nv_wr32(priv, 0x4040b0, 0x0);
nv_wr32(priv, 0x4040b4, 0x0);
nv_wr32(priv, 0x4040b8, 0x0);
nv_wr32(priv, 0x4040bc, 0x0);
nv_wr32(priv, 0x4040c0, 0x0);
nv_wr32(priv, 0x4040c4, 0x0);
nv_wr32(priv, 0x4040c8, 0xf800008f);
nv_wr32(priv, 0x4040d0, 0x0);
nv_wr32(priv, 0x4040d4, 0x0);
nv_wr32(priv, 0x4040d8, 0x0);
nv_wr32(priv, 0x4040dc, 0x0);
nv_wr32(priv, 0x4040e0, 0x0);
nv_wr32(priv, 0x4040e4, 0x0);
nv_wr32(priv, 0x4040e8, 0x1000);
nv_wr32(priv, 0x4040f8, 0x0);
nv_wr32(priv, 0x404130, 0x0);
nv_wr32(priv, 0x404134, 0x0);
nv_wr32(priv, 0x404138, 0x20000040);
nv_wr32(priv, 0x404150, 0x2e);
nv_wr32(priv, 0x404154, 0x400);
nv_wr32(priv, 0x404158, 0x200);
nv_wr32(priv, 0x404164, 0x55);
nv_wr32(priv, 0x4041a0, 0x0);
nv_wr32(priv, 0x4041a4, 0x0);
nv_wr32(priv, 0x4041a8, 0x0);
nv_wr32(priv, 0x4041ac, 0x0);
nv_wr32(priv, 0x404200, 0x0);
nv_wr32(priv, 0x404204, 0x0);
nv_wr32(priv, 0x404208, 0x0);
nv_wr32(priv, 0x40420c, 0x0);
}
static void
nve0_graph_generate_unk44xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404404, 0x0);
nv_wr32(priv, 0x404408, 0x0);
nv_wr32(priv, 0x40440c, 0x0);
nv_wr32(priv, 0x404410, 0x0);
nv_wr32(priv, 0x404414, 0x0);
nv_wr32(priv, 0x404418, 0x0);
nv_wr32(priv, 0x40441c, 0x0);
nv_wr32(priv, 0x404420, 0x0);
nv_wr32(priv, 0x404424, 0x0);
nv_wr32(priv, 0x404428, 0x0);
nv_wr32(priv, 0x40442c, 0x0);
nv_wr32(priv, 0x404430, 0x0);
nv_wr32(priv, 0x404434, 0x0);
nv_wr32(priv, 0x404438, 0x0);
nv_wr32(priv, 0x404460, 0x0);
nv_wr32(priv, 0x404464, 0x0);
nv_wr32(priv, 0x404468, 0xffffff);
nv_wr32(priv, 0x40446c, 0x0);
nv_wr32(priv, 0x404480, 0x1);
nv_wr32(priv, 0x404498, 0x1);
}
static void
nve0_graph_generate_unk46xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404604, 0x14);
nv_wr32(priv, 0x404608, 0x0);
nv_wr32(priv, 0x40460c, 0x3fff);
nv_wr32(priv, 0x404610, 0x100);
nv_wr32(priv, 0x404618, 0x0);
nv_wr32(priv, 0x40461c, 0x0);
nv_wr32(priv, 0x404620, 0x0);
nv_wr32(priv, 0x404624, 0x0);
nv_wr32(priv, 0x40462c, 0x0);
nv_wr32(priv, 0x404630, 0x0);
nv_wr32(priv, 0x404640, 0x0);
nv_wr32(priv, 0x404654, 0x0);
nv_wr32(priv, 0x404660, 0x0);
nv_wr32(priv, 0x404678, 0x0);
nv_wr32(priv, 0x40467c, 0x2);
nv_wr32(priv, 0x404680, 0x0);
nv_wr32(priv, 0x404684, 0x0);
nv_wr32(priv, 0x404688, 0x0);
nv_wr32(priv, 0x40468c, 0x0);
nv_wr32(priv, 0x404690, 0x0);
nv_wr32(priv, 0x404694, 0x0);
nv_wr32(priv, 0x404698, 0x0);
nv_wr32(priv, 0x40469c, 0x0);
nv_wr32(priv, 0x4046a0, 0x7f0080);
nv_wr32(priv, 0x4046a4, 0x0);
nv_wr32(priv, 0x4046a8, 0x0);
nv_wr32(priv, 0x4046ac, 0x0);
nv_wr32(priv, 0x4046b0, 0x0);
nv_wr32(priv, 0x4046b4, 0x0);
nv_wr32(priv, 0x4046b8, 0x0);
nv_wr32(priv, 0x4046bc, 0x0);
nv_wr32(priv, 0x4046c0, 0x0);
nv_wr32(priv, 0x4046c8, 0x0);
nv_wr32(priv, 0x4046cc, 0x0);
nv_wr32(priv, 0x4046d0, 0x0);
}
static void
nve0_graph_generate_unk47xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404700, 0x0);
nv_wr32(priv, 0x404704, 0x0);
nv_wr32(priv, 0x404708, 0x0);
nv_wr32(priv, 0x404718, 0x0);
nv_wr32(priv, 0x40471c, 0x0);
nv_wr32(priv, 0x404720, 0x0);
nv_wr32(priv, 0x404724, 0x0);
nv_wr32(priv, 0x404728, 0x0);
nv_wr32(priv, 0x40472c, 0x0);
nv_wr32(priv, 0x404730, 0x0);
nv_wr32(priv, 0x404734, 0x100);
nv_wr32(priv, 0x404738, 0x0);
nv_wr32(priv, 0x40473c, 0x0);
nv_wr32(priv, 0x404744, 0x0);
nv_wr32(priv, 0x404748, 0x0);
nv_wr32(priv, 0x404754, 0x0);
}
static void
nve0_graph_generate_unk58xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x405800, 0xf8000bf);
nv_wr32(priv, 0x405830, 0x2180648);
nv_wr32(priv, 0x405834, 0x8000000);
nv_wr32(priv, 0x405838, 0x0);
nv_wr32(priv, 0x405854, 0x0);
nv_wr32(priv, 0x405870, 0x1);
nv_wr32(priv, 0x405874, 0x1);
nv_wr32(priv, 0x405878, 0x1);
nv_wr32(priv, 0x40587c, 0x1);
nv_wr32(priv, 0x405a00, 0x0);
nv_wr32(priv, 0x405a04, 0x0);
nv_wr32(priv, 0x405a18, 0x0);
nv_wr32(priv, 0x405b00, 0x0);
nv_wr32(priv, 0x405b10, 0x1000);
}
static void
nve0_graph_generate_unk60xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x406020, 0x4103c1);
nv_wr32(priv, 0x406028, 0x1);
nv_wr32(priv, 0x40602c, 0x1);
nv_wr32(priv, 0x406030, 0x1);
nv_wr32(priv, 0x406034, 0x1);
}
static void
nve0_graph_generate_unk64xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x4064a8, 0x0);
nv_wr32(priv, 0x4064ac, 0x3fff);
nv_wr32(priv, 0x4064b4, 0x0);
nv_wr32(priv, 0x4064b8, 0x0);
nv_wr32(priv, 0x4064c0, 0x801a00f0);
nv_wr32(priv, 0x4064c4, 0x192ffff);
nv_wr32(priv, 0x4064c8, 0x1800600);
nv_wr32(priv, 0x4064cc, 0x0);
nv_wr32(priv, 0x4064d0, 0x0);
nv_wr32(priv, 0x4064d4, 0x0);
nv_wr32(priv, 0x4064d8, 0x0);
nv_wr32(priv, 0x4064dc, 0x0);
nv_wr32(priv, 0x4064e0, 0x0);
nv_wr32(priv, 0x4064e4, 0x0);
nv_wr32(priv, 0x4064e8, 0x0);
nv_wr32(priv, 0x4064ec, 0x0);
nv_wr32(priv, 0x4064fc, 0x22a);
}
static void
nve0_graph_generate_unk70xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x407040, 0x0);
}
static void
nve0_graph_generate_unk78xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x407804, 0x23);
nv_wr32(priv, 0x40780c, 0xa418820);
nv_wr32(priv, 0x407810, 0x62080e6);
nv_wr32(priv, 0x407814, 0x20398a4);
nv_wr32(priv, 0x407818, 0xe629062);
nv_wr32(priv, 0x40781c, 0xa418820);
nv_wr32(priv, 0x407820, 0xe6);
nv_wr32(priv, 0x4078bc, 0x103);
}
static void
nve0_graph_generate_unk80xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x408000, 0x0);
nv_wr32(priv, 0x408004, 0x0);
nv_wr32(priv, 0x408008, 0x30);
nv_wr32(priv, 0x40800c, 0x0);
nv_wr32(priv, 0x408010, 0x0);
nv_wr32(priv, 0x408014, 0x69);
nv_wr32(priv, 0x408018, 0xe100e100);
nv_wr32(priv, 0x408064, 0x0);
}
static void
nve0_graph_generate_unk88xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x408800, 0x2802a3c);
nv_wr32(priv, 0x408804, 0x40);
nv_wr32(priv, 0x408808, 0x1043e005);
nv_wr32(priv, 0x408840, 0xb);
nv_wr32(priv, 0x408900, 0x3080b801);
nv_wr32(priv, 0x408904, 0x62000001);
nv_wr32(priv, 0x408908, 0xc8102f);
nv_wr32(priv, 0x408980, 0x11d);
}
static void
nve0_graph_generate_gpc(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x418380, 0x16);
nv_wr32(priv, 0x418400, 0x38004e00);
nv_wr32(priv, 0x418404, 0x71e0ffff);
nv_wr32(priv, 0x41840c, 0x1008);
nv_wr32(priv, 0x418410, 0xfff0fff);
nv_wr32(priv, 0x418414, 0x2200fff);
nv_wr32(priv, 0x418450, 0x0);
nv_wr32(priv, 0x418454, 0x0);
nv_wr32(priv, 0x418458, 0x0);
nv_wr32(priv, 0x41845c, 0x0);
nv_wr32(priv, 0x418460, 0x0);
nv_wr32(priv, 0x418464, 0x0);
nv_wr32(priv, 0x418468, 0x1);
nv_wr32(priv, 0x41846c, 0x0);
nv_wr32(priv, 0x418470, 0x0);
nv_wr32(priv, 0x418600, 0x1f);
nv_wr32(priv, 0x418684, 0xf);
nv_wr32(priv, 0x418700, 0x2);
nv_wr32(priv, 0x418704, 0x80);
nv_wr32(priv, 0x418708, 0x0);
nv_wr32(priv, 0x41870c, 0x0);
nv_wr32(priv, 0x418710, 0x0);
nv_wr32(priv, 0x418800, 0x7006860a);
nv_wr32(priv, 0x418808, 0x0);
nv_wr32(priv, 0x41880c, 0x0);
nv_wr32(priv, 0x418810, 0x0);
nv_wr32(priv, 0x418828, 0x44);
nv_wr32(priv, 0x418830, 0x10000001);
nv_wr32(priv, 0x4188d8, 0x8);
nv_wr32(priv, 0x4188e0, 0x1000000);
nv_wr32(priv, 0x4188e8, 0x0);
nv_wr32(priv, 0x4188ec, 0x0);
nv_wr32(priv, 0x4188f0, 0x0);
nv_wr32(priv, 0x4188f4, 0x0);
nv_wr32(priv, 0x4188f8, 0x0);
nv_wr32(priv, 0x4188fc, 0x20100018);
nv_wr32(priv, 0x41891c, 0xff00ff);
nv_wr32(priv, 0x418924, 0x0);
nv_wr32(priv, 0x418928, 0xffff00);
nv_wr32(priv, 0x41892c, 0xff00);
nv_wr32(priv, 0x418a00, 0x0);
nv_wr32(priv, 0x418a04, 0x0);
nv_wr32(priv, 0x418a08, 0x0);
nv_wr32(priv, 0x418a0c, 0x10000);
nv_wr32(priv, 0x418a10, 0x0);
nv_wr32(priv, 0x418a14, 0x0);
nv_wr32(priv, 0x418a18, 0x0);
nv_wr32(priv, 0x418a20, 0x0);
nv_wr32(priv, 0x418a24, 0x0);
nv_wr32(priv, 0x418a28, 0x0);
nv_wr32(priv, 0x418a2c, 0x10000);
nv_wr32(priv, 0x418a30, 0x0);
nv_wr32(priv, 0x418a34, 0x0);
nv_wr32(priv, 0x418a38, 0x0);
nv_wr32(priv, 0x418a40, 0x0);
nv_wr32(priv, 0x418a44, 0x0);
nv_wr32(priv, 0x418a48, 0x0);
nv_wr32(priv, 0x418a4c, 0x10000);
nv_wr32(priv, 0x418a50, 0x0);
nv_wr32(priv, 0x418a54, 0x0);
nv_wr32(priv, 0x418a58, 0x0);
nv_wr32(priv, 0x418a60, 0x0);
nv_wr32(priv, 0x418a64, 0x0);
nv_wr32(priv, 0x418a68, 0x0);
nv_wr32(priv, 0x418a6c, 0x10000);
nv_wr32(priv, 0x418a70, 0x0);
nv_wr32(priv, 0x418a74, 0x0);
nv_wr32(priv, 0x418a78, 0x0);
nv_wr32(priv, 0x418a80, 0x0);
nv_wr32(priv, 0x418a84, 0x0);
nv_wr32(priv, 0x418a88, 0x0);
nv_wr32(priv, 0x418a8c, 0x10000);
nv_wr32(priv, 0x418a90, 0x0);
nv_wr32(priv, 0x418a94, 0x0);
nv_wr32(priv, 0x418a98, 0x0);
nv_wr32(priv, 0x418aa0, 0x0);
nv_wr32(priv, 0x418aa4, 0x0);
nv_wr32(priv, 0x418aa8, 0x0);
nv_wr32(priv, 0x418aac, 0x10000);
nv_wr32(priv, 0x418ab0, 0x0);
nv_wr32(priv, 0x418ab4, 0x0);
nv_wr32(priv, 0x418ab8, 0x0);
nv_wr32(priv, 0x418ac0, 0x0);
nv_wr32(priv, 0x418ac4, 0x0);
nv_wr32(priv, 0x418ac8, 0x0);
nv_wr32(priv, 0x418acc, 0x10000);
nv_wr32(priv, 0x418ad0, 0x0);
nv_wr32(priv, 0x418ad4, 0x0);
nv_wr32(priv, 0x418ad8, 0x0);
nv_wr32(priv, 0x418ae0, 0x0);
nv_wr32(priv, 0x418ae4, 0x0);
nv_wr32(priv, 0x418ae8, 0x0);
nv_wr32(priv, 0x418aec, 0x10000);
nv_wr32(priv, 0x418af0, 0x0);
nv_wr32(priv, 0x418af4, 0x0);
nv_wr32(priv, 0x418af8, 0x0);
nv_wr32(priv, 0x418b00, 0x6);
nv_wr32(priv, 0x418b08, 0xa418820);
nv_wr32(priv, 0x418b0c, 0x62080e6);
nv_wr32(priv, 0x418b10, 0x20398a4);
nv_wr32(priv, 0x418b14, 0xe629062);
nv_wr32(priv, 0x418b18, 0xa418820);
nv_wr32(priv, 0x418b1c, 0xe6);
nv_wr32(priv, 0x418bb8, 0x103);
nv_wr32(priv, 0x418c08, 0x1);
nv_wr32(priv, 0x418c10, 0x0);
nv_wr32(priv, 0x418c14, 0x0);
nv_wr32(priv, 0x418c18, 0x0);
nv_wr32(priv, 0x418c1c, 0x0);
nv_wr32(priv, 0x418c20, 0x0);
nv_wr32(priv, 0x418c24, 0x0);
nv_wr32(priv, 0x418c28, 0x0);
nv_wr32(priv, 0x418c2c, 0x0);
nv_wr32(priv, 0x418c40, 0xffffffff);
nv_wr32(priv, 0x418c6c, 0x1);
nv_wr32(priv, 0x418c80, 0x20200004);
nv_wr32(priv, 0x418c8c, 0x1);
nv_wr32(priv, 0x419000, 0x780);
nv_wr32(priv, 0x419004, 0x0);
nv_wr32(priv, 0x419008, 0x0);
nv_wr32(priv, 0x419014, 0x4);
}
static void
nve0_graph_generate_tpc(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x419848, 0x0);
nv_wr32(priv, 0x419864, 0x129);
nv_wr32(priv, 0x419888, 0x0);
nv_wr32(priv, 0x419a00, 0xf0);
nv_wr32(priv, 0x419a04, 0x1);
nv_wr32(priv, 0x419a08, 0x21);
nv_wr32(priv, 0x419a0c, 0x20000);
nv_wr32(priv, 0x419a10, 0x0);
nv_wr32(priv, 0x419a14, 0x200);
nv_wr32(priv, 0x419a1c, 0xc000);
nv_wr32(priv, 0x419a20, 0x800);
nv_wr32(priv, 0x419a30, 0x1);
nv_wr32(priv, 0x419ac4, 0x37f440);
nv_wr32(priv, 0x419c00, 0xa);
nv_wr32(priv, 0x419c04, 0x80000006);
nv_wr32(priv, 0x419c08, 0x2);
nv_wr32(priv, 0x419c20, 0x0);
nv_wr32(priv, 0x419c24, 0x84210);
nv_wr32(priv, 0x419c28, 0x3efbefbe);
nv_wr32(priv, 0x419ce8, 0x0);
nv_wr32(priv, 0x419cf4, 0x3203);
nv_wr32(priv, 0x419e04, 0x0);
nv_wr32(priv, 0x419e08, 0x0);
nv_wr32(priv, 0x419e0c, 0x0);
nv_wr32(priv, 0x419e10, 0x402);
nv_wr32(priv, 0x419e44, 0x13eff2);
nv_wr32(priv, 0x419e48, 0x0);
nv_wr32(priv, 0x419e4c, 0x7f);
nv_wr32(priv, 0x419e50, 0x0);
nv_wr32(priv, 0x419e54, 0x0);
nv_wr32(priv, 0x419e58, 0x0);
nv_wr32(priv, 0x419e5c, 0x0);
nv_wr32(priv, 0x419e60, 0x0);
nv_wr32(priv, 0x419e64, 0x0);
nv_wr32(priv, 0x419e68, 0x0);
nv_wr32(priv, 0x419e6c, 0x0);
nv_wr32(priv, 0x419e70, 0x0);
nv_wr32(priv, 0x419e74, 0x0);
nv_wr32(priv, 0x419e78, 0x0);
nv_wr32(priv, 0x419e7c, 0x0);
nv_wr32(priv, 0x419e80, 0x0);
nv_wr32(priv, 0x419e84, 0x0);
nv_wr32(priv, 0x419e88, 0x0);
nv_wr32(priv, 0x419e8c, 0x0);
nv_wr32(priv, 0x419e90, 0x0);
nv_wr32(priv, 0x419e94, 0x0);
nv_wr32(priv, 0x419e98, 0x0);
nv_wr32(priv, 0x419eac, 0x1fcf);
nv_wr32(priv, 0x419eb0, 0xd3f);
nv_wr32(priv, 0x419ec8, 0x1304f);
nv_wr32(priv, 0x419f30, 0x0);
nv_wr32(priv, 0x419f34, 0x0);
nv_wr32(priv, 0x419f38, 0x0);
nv_wr32(priv, 0x419f3c, 0x0);
nv_wr32(priv, 0x419f40, 0x0);
nv_wr32(priv, 0x419f44, 0x0);
nv_wr32(priv, 0x419f48, 0x0);
nv_wr32(priv, 0x419f4c, 0x0);
nv_wr32(priv, 0x419f58, 0x0);
nv_wr32(priv, 0x419f78, 0xb);
}
static void
nve0_graph_generate_tpcunk(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x41be24, 0x6);
nv_wr32(priv, 0x41bec0, 0x12180000);
nv_wr32(priv, 0x41bec4, 0x37f7f);
nv_wr32(priv, 0x41bee4, 0x6480430);
nv_wr32(priv, 0x41bf00, 0xa418820);
nv_wr32(priv, 0x41bf04, 0x62080e6);
nv_wr32(priv, 0x41bf08, 0x20398a4);
nv_wr32(priv, 0x41bf0c, 0xe629062);
nv_wr32(priv, 0x41bf10, 0xa418820);
nv_wr32(priv, 0x41bf14, 0xe6);
nv_wr32(priv, 0x41bfd0, 0x900103);
nv_wr32(priv, 0x41bfe0, 0x400001);
nv_wr32(priv, 0x41bfe4, 0x0);
}
int
nve0_grctx_generate(struct nvc0_graph_priv *priv)
{
struct nvc0_grctx info;
int ret, i, gpc, tpc, id;
u32 data[6] = {}, data2[2] = {}, tmp;
u32 tpc_set = 0, tpc_mask = 0;
u32 magic[GPC_MAX][2], offset;
u8 tpcnr[GPC_MAX], a, b;
u8 shift, ntpcv;
ret = nvc0_grctx_init(priv, &info);
if (ret)
return ret;
nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
nv_wr32(priv, 0x400204, 0x00000000);
nv_wr32(priv, 0x400208, 0x00000000);
nve0_graph_generate_unk40xx(priv);
nve0_graph_generate_unk44xx(priv);
nve0_graph_generate_unk46xx(priv);
nve0_graph_generate_unk47xx(priv);
nve0_graph_generate_unk58xx(priv);
nve0_graph_generate_unk60xx(priv);
nve0_graph_generate_unk64xx(priv);
nve0_graph_generate_unk70xx(priv);
nve0_graph_generate_unk78xx(priv);
nve0_graph_generate_unk80xx(priv);
nve0_graph_generate_unk88xx(priv);
nve0_graph_generate_gpc(priv);
nve0_graph_generate_tpc(priv);
nve0_graph_generate_tpcunk(priv);
nv_wr32(priv, 0x404154, 0x0);
mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
mmio_list(0x40800c, 0x00000000, 8, 1);
mmio_list(0x408010, 0x80000000, 0, 0);
mmio_list(0x419004, 0x00000000, 8, 1);
mmio_list(0x419008, 0x00000000, 0, 0);
mmio_list(0x4064cc, 0x80000000, 0, 0);
mmio_list(0x408004, 0x00000000, 8, 0);
mmio_list(0x408008, 0x80000030, 0, 0);
mmio_list(0x418808, 0x00000000, 8, 0);
mmio_list(0x41880c, 0x80000030, 0, 0);
mmio_list(0x4064c8, 0x01800600, 0, 0);
mmio_list(0x418810, 0x80000000, 12, 2);
mmio_list(0x419848, 0x10000000, 12, 2);
mmio_list(0x405830, 0x02180648, 0, 0);
mmio_list(0x4064c4, 0x0192ffff, 0, 0);
for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
magic[gpc][1] = 0x00000000 | (magic1 << 16);
offset += 0x0324 * priv->tpc_nr[gpc];
}
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
offset += 0x07ff * priv->tpc_nr[gpc];
}
mmio_list(0x17e91c, 0x06060609, 0, 0);
mmio_list(0x17e920, 0x00090a05, 0, 0);
nv_wr32(priv, 0x418c6c, 0x1);
nv_wr32(priv, 0x41980c, 0x10);
nv_wr32(priv, 0x41be08, 0x4);
nv_wr32(priv, 0x4064c0, 0x801a00f0);
nv_wr32(priv, 0x405800, 0xf8000bf);
nv_wr32(priv, 0x419c00, 0xa);
for (tpc = 0, id = 0; tpc < 4; tpc++) {
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
if (tpc < priv->tpc_nr[gpc]) {
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0698), id);
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x04e8), id);
nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0088), id++);
}
nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
}
}
tmp = 0;
for (i = 0; i < priv->gpc_nr; i++)
tmp |= priv->tpc_nr[i] << (i * 4);
nv_wr32(priv, 0x406028, tmp);
nv_wr32(priv, 0x405870, tmp);
nv_wr32(priv, 0x40602c, 0x0);
nv_wr32(priv, 0x405874, 0x0);
nv_wr32(priv, 0x406030, 0x0);
nv_wr32(priv, 0x405878, 0x0);
nv_wr32(priv, 0x406034, 0x0);
nv_wr32(priv, 0x40587c, 0x0);
/* calculate first set of magics */
memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
gpc = -1;
for (tpc = 0; tpc < priv->tpc_total; tpc++) {
do {
gpc = (gpc + 1) % priv->gpc_nr;
} while (!tpcnr[gpc]);
tpcnr[gpc]--;
data[tpc / 6] |= gpc << ((tpc % 6) * 5);
}
for (; tpc < 32; tpc++)
data[tpc / 6] |= 7 << ((tpc % 6) * 5);
/* and the second... */
shift = 0;
ntpcv = priv->tpc_total;
while (!(ntpcv & (1 << 4))) {
ntpcv <<= 1;
shift++;
}
data2[0] = ntpcv << 16;
data2[0] |= shift << 21;
data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
data2[0] |= priv->tpc_total << 8;
data2[0] |= priv->magic_not_rop_nr;
for (i = 1; i < 7; i++)
data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
/* and write it all the various parts of PGRAPH */
nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
for (i = 0; i < 6; i++)
nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
nv_wr32(priv, 0x41bfd0, data2[0]);
nv_wr32(priv, 0x41bfe4, data2[1]);
for (i = 0; i < 6; i++)
nv_wr32(priv, 0x41bf00 + (i * 4), data[i]);
nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
for (i = 0; i < 6; i++)
nv_wr32(priv, 0x40780c + (i * 4), data[i]);
memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
for (gpc = 0; gpc < priv->gpc_nr; gpc++)
tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
for (i = 0, gpc = -1, b = -1; i < 32; i++) {
a = (i * (priv->tpc_total - 1)) / 32;
if (a != b) {
b = a;
do {
gpc = (gpc + 1) % priv->gpc_nr;
} while (!tpcnr[gpc]);
tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
tpc_set |= 1 << ((gpc * 8) + tpc);
}
nv_wr32(priv, 0x406800 + (i * 0x20), tpc_set);
nv_wr32(priv, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
}
for (i = 0; i < 8; i++)
nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
if (priv->gpc_nr == 1) {
nv_mask(priv, 0x408850, 0x0000000f, priv->tpc_nr[0]);
nv_mask(priv, 0x408958, 0x0000000f, priv->tpc_nr[0]);
} else {
nv_mask(priv, 0x408850, 0x0000000f, priv->gpc_nr);
nv_mask(priv, 0x408958, 0x0000000f, priv->gpc_nr);
}
nv_mask(priv, 0x419f78, 0x00000001, 0x00000000);
nve0_grctx_generate_icmd(priv);
nve0_grctx_generate_a097(priv);
nve0_grctx_generate_902d(priv);
nv_mask(priv, 0x000260, 0x00000001, 0x00000001);
nv_wr32(priv, 0x418800, 0x7026860a); //XXX
nv_wr32(priv, 0x41be10, 0x00bb8bc7); //XXX
return nvc0_grctx_fini(&info);
}
| gpl-2.0 |
BORETS24/Zenfone-2-500CL | linux/kernel/drivers/net/ethernet/freescale/ucc_geth.c | 2082 | 119982 | /*
* Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved.
*
* Author: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
*
* Description:
* QE UCC Gigabit Ethernet Driver
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/workqueue.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/immap_qe.h>
#include <asm/qe.h>
#include <asm/ucc.h>
#include <asm/ucc_fast.h>
#include <asm/machdep.h>
#include "ucc_geth.h"
#undef DEBUG
#define ugeth_printk(level, format, arg...) \
printk(level format "\n", ## arg)
#define ugeth_dbg(format, arg...) \
ugeth_printk(KERN_DEBUG , format , ## arg)
#ifdef UGETH_VERBOSE_DEBUG
#define ugeth_vdbg ugeth_dbg
#else
#define ugeth_vdbg(fmt, args...) do { } while (0)
#endif /* UGETH_VERBOSE_DEBUG */
#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
static DEFINE_SPINLOCK(ugeth_lock);
static struct {
u32 msg_enable;
} debug = { -1 };
module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
static struct ucc_geth_info ugeth_primary_info = {
.uf_info = {
.bd_mem_part = MEM_PART_SYSTEM,
.rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
.max_rx_buf_length = 1536,
/* adjusted at startup if max-speed 1000 */
.urfs = UCC_GETH_URFS_INIT,
.urfet = UCC_GETH_URFET_INIT,
.urfset = UCC_GETH_URFSET_INIT,
.utfs = UCC_GETH_UTFS_INIT,
.utfet = UCC_GETH_UTFET_INIT,
.utftt = UCC_GETH_UTFTT_INIT,
.ufpt = 256,
.mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
.ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
.tenc = UCC_FAST_TX_ENCODING_NRZ,
.renc = UCC_FAST_RX_ENCODING_NRZ,
.tcrc = UCC_FAST_16_BIT_CRC,
.synl = UCC_FAST_SYNC_LEN_NOT_USED,
},
.numQueuesTx = 1,
.numQueuesRx = 1,
.extendedFilteringChainPointer = ((uint32_t) NULL),
.typeorlen = 3072 /*1536 */ ,
.nonBackToBackIfgPart1 = 0x40,
.nonBackToBackIfgPart2 = 0x60,
.miminumInterFrameGapEnforcement = 0x50,
.backToBackInterFrameGap = 0x60,
.mblinterval = 128,
.nortsrbytetime = 5,
.fracsiz = 1,
.strictpriorityq = 0xff,
.altBebTruncation = 0xa,
.excessDefer = 1,
.maxRetransmission = 0xf,
.collisionWindow = 0x37,
.receiveFlowControl = 1,
.transmitFlowControl = 1,
.maxGroupAddrInHash = 4,
.maxIndAddrInHash = 4,
.prel = 7,
.maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */
.minFrameLength = 64,
.maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */
.maxD2Length = 1520+16, /* Add extra bytes for VLANs etc. */
.vlantype = 0x8100,
.ecamptr = ((uint32_t) NULL),
.eventRegMask = UCCE_OTHER,
.pausePeriod = 0xf000,
.interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
.bdRingLenTx = {
TX_BD_RING_LEN,
TX_BD_RING_LEN,
TX_BD_RING_LEN,
TX_BD_RING_LEN,
TX_BD_RING_LEN,
TX_BD_RING_LEN,
TX_BD_RING_LEN,
TX_BD_RING_LEN},
.bdRingLenRx = {
RX_BD_RING_LEN,
RX_BD_RING_LEN,
RX_BD_RING_LEN,
RX_BD_RING_LEN,
RX_BD_RING_LEN,
RX_BD_RING_LEN,
RX_BD_RING_LEN,
RX_BD_RING_LEN},
.numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
.largestexternallookupkeysize =
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
.statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE |
UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX |
UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX,
.vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
.vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
.rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
.aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
.padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
.numThreadsTx = UCC_GETH_NUM_OF_THREADS_1,
.numThreadsRx = UCC_GETH_NUM_OF_THREADS_1,
.riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
.riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
};
static struct ucc_geth_info ugeth_info[8];
#ifdef DEBUG
static void mem_disp(u8 *addr, int size)
{
u8 *i;
int size16Aling = (size >> 4) << 4;
int size4Aling = (size >> 2) << 2;
int notAlign = 0;
if (size % 16)
notAlign = 1;
for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
printk("0x%08x: %08x %08x %08x %08x\r\n",
(u32) i,
*((u32 *) (i)),
*((u32 *) (i + 4)),
*((u32 *) (i + 8)), *((u32 *) (i + 12)));
if (notAlign == 1)
printk("0x%08x: ", (u32) i);
for (; (u32) i < (u32) addr + size4Aling; i += 4)
printk("%08x ", *((u32 *) (i)));
for (; (u32) i < (u32) addr + size; i++)
printk("%02x", *((i)));
if (notAlign == 1)
printk("\r\n");
}
#endif /* DEBUG */
static struct list_head *dequeue(struct list_head *lh)
{
unsigned long flags;
spin_lock_irqsave(&ugeth_lock, flags);
if (!list_empty(lh)) {
struct list_head *node = lh->next;
list_del(node);
spin_unlock_irqrestore(&ugeth_lock, flags);
return node;
} else {
spin_unlock_irqrestore(&ugeth_lock, flags);
return NULL;
}
}
static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
u8 __iomem *bd)
{
struct sk_buff *skb;
skb = netdev_alloc_skb(ugeth->ndev,
ugeth->ug_info->uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT);
if (!skb)
return NULL;
/* We need the data buffer to be aligned properly. We will reserve
* as many bytes as needed to align the data properly
*/
skb_reserve(skb,
UCC_GETH_RX_DATA_BUF_ALIGNMENT -
(((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
1)));
out_be32(&((struct qe_bd __iomem *)bd)->buf,
dma_map_single(ugeth->dev,
skb->data,
ugeth->ug_info->uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT,
DMA_FROM_DEVICE));
out_be32((u32 __iomem *)bd,
(R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W)));
return skb;
}
static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
{
u8 __iomem *bd;
u32 bd_status;
struct sk_buff *skb;
int i;
bd = ugeth->p_rx_bd_ring[rxQ];
i = 0;
do {
bd_status = in_be32((u32 __iomem *)bd);
skb = get_new_skb(ugeth, bd);
if (!skb) /* If can not allocate data buffer,
abort. Cleanup will be elsewhere */
return -ENOMEM;
ugeth->rx_skbuff[rxQ][i] = skb;
/* advance the BD pointer */
bd += sizeof(struct qe_bd);
i++;
} while (!(bd_status & R_W));
return 0;
}
static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
u32 *p_start,
u8 num_entries,
u32 thread_size,
u32 thread_alignment,
unsigned int risc,
int skip_page_for_first_entry)
{
u32 init_enet_offset;
u8 i;
int snum;
for (i = 0; i < num_entries; i++) {
if ((snum = qe_get_snum()) < 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not get SNUM\n");
return snum;
}
if ((i == 0) && skip_page_for_first_entry)
/* First entry of Rx does not have page */
init_enet_offset = 0;
else {
init_enet_offset =
qe_muram_alloc(thread_size, thread_alignment);
if (IS_ERR_VALUE(init_enet_offset)) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory\n");
qe_put_snum((u8) snum);
return -ENOMEM;
}
}
*(p_start++) =
((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
| risc;
}
return 0;
}
static int return_init_enet_entries(struct ucc_geth_private *ugeth,
u32 *p_start,
u8 num_entries,
unsigned int risc,
int skip_page_for_first_entry)
{
u32 init_enet_offset;
u8 i;
int snum;
for (i = 0; i < num_entries; i++) {
u32 val = *p_start;
/* Check that this entry was actually valid --
needed in case failed in allocations */
if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
snum =
(u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
ENET_INIT_PARAM_SNUM_SHIFT;
qe_put_snum((u8) snum);
if (!((i == 0) && skip_page_for_first_entry)) {
/* First entry of Rx does not have page */
init_enet_offset =
(val & ENET_INIT_PARAM_PTR_MASK);
qe_muram_free(init_enet_offset);
}
*p_start++ = 0;
}
}
return 0;
}
#ifdef DEBUG
static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
u32 __iomem *p_start,
u8 num_entries,
u32 thread_size,
unsigned int risc,
int skip_page_for_first_entry)
{
u32 init_enet_offset;
u8 i;
int snum;
for (i = 0; i < num_entries; i++) {
u32 val = in_be32(p_start);
/* Check that this entry was actually valid --
needed in case failed in allocations */
if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
snum =
(u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
ENET_INIT_PARAM_SNUM_SHIFT;
qe_put_snum((u8) snum);
if (!((i == 0) && skip_page_for_first_entry)) {
/* First entry of Rx does not have page */
init_enet_offset =
(in_be32(p_start) &
ENET_INIT_PARAM_PTR_MASK);
pr_info("Init enet entry %d:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)qe_muram_addr(init_enet_offset));
mem_disp(qe_muram_addr(init_enet_offset),
thread_size);
}
p_start++;
}
}
return 0;
}
#endif
static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
{
kfree(enet_addr_cont);
}
static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
{
out_be16(®[0], ((u16)mac[5] << 8) | mac[4]);
out_be16(®[1], ((u16)mac[3] << 8) | mac[2]);
out_be16(®[2], ((u16)mac[1] << 8) | mac[0]);
}
static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
{
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
if (paddr_num >= NUM_OF_PADDRS) {
pr_warn("%s: Invalid paddr_num: %u\n", __func__, paddr_num);
return -EINVAL;
}
p_82xx_addr_filt =
(struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
addressfiltering;
/* Writing address ff.ff.ff.ff.ff.ff disables address
recognition for this register */
out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
return 0;
}
static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
u8 *p_enet_addr)
{
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
u32 cecr_subblock;
p_82xx_addr_filt =
(struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
addressfiltering;
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
/* Ethernet frames are defined in Little Endian mode,
therefore to insert */
/* the address to the hash (Big Endian mode), we reverse the bytes.*/
set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
QE_CR_PROTOCOL_ETHERNET, 0);
}
static inline int compare_addr(u8 **addr1, u8 **addr2)
{
return memcmp(addr1, addr2, ETH_ALEN);
}
#ifdef DEBUG
static void get_statistics(struct ucc_geth_private *ugeth,
struct ucc_geth_tx_firmware_statistics *
tx_firmware_statistics,
struct ucc_geth_rx_firmware_statistics *
rx_firmware_statistics,
struct ucc_geth_hardware_statistics *hardware_statistics)
{
struct ucc_fast __iomem *uf_regs;
struct ucc_geth __iomem *ug_regs;
struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
ug_regs = ugeth->ug_regs;
uf_regs = (struct ucc_fast __iomem *) ug_regs;
p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
/* Tx firmware only if user handed pointer and driver actually
gathers Tx firmware statistics */
if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
tx_firmware_statistics->sicoltx =
in_be32(&p_tx_fw_statistics_pram->sicoltx);
tx_firmware_statistics->mulcoltx =
in_be32(&p_tx_fw_statistics_pram->mulcoltx);
tx_firmware_statistics->latecoltxfr =
in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
tx_firmware_statistics->frabortduecol =
in_be32(&p_tx_fw_statistics_pram->frabortduecol);
tx_firmware_statistics->frlostinmactxer =
in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
tx_firmware_statistics->carriersenseertx =
in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
tx_firmware_statistics->frtxok =
in_be32(&p_tx_fw_statistics_pram->frtxok);
tx_firmware_statistics->txfrexcessivedefer =
in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
tx_firmware_statistics->txpkts256 =
in_be32(&p_tx_fw_statistics_pram->txpkts256);
tx_firmware_statistics->txpkts512 =
in_be32(&p_tx_fw_statistics_pram->txpkts512);
tx_firmware_statistics->txpkts1024 =
in_be32(&p_tx_fw_statistics_pram->txpkts1024);
tx_firmware_statistics->txpktsjumbo =
in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
}
/* Rx firmware only if user handed pointer and driver actually
* gathers Rx firmware statistics */
if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
int i;
rx_firmware_statistics->frrxfcser =
in_be32(&p_rx_fw_statistics_pram->frrxfcser);
rx_firmware_statistics->fraligner =
in_be32(&p_rx_fw_statistics_pram->fraligner);
rx_firmware_statistics->inrangelenrxer =
in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
rx_firmware_statistics->outrangelenrxer =
in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
rx_firmware_statistics->frtoolong =
in_be32(&p_rx_fw_statistics_pram->frtoolong);
rx_firmware_statistics->runt =
in_be32(&p_rx_fw_statistics_pram->runt);
rx_firmware_statistics->verylongevent =
in_be32(&p_rx_fw_statistics_pram->verylongevent);
rx_firmware_statistics->symbolerror =
in_be32(&p_rx_fw_statistics_pram->symbolerror);
rx_firmware_statistics->dropbsy =
in_be32(&p_rx_fw_statistics_pram->dropbsy);
for (i = 0; i < 0x8; i++)
rx_firmware_statistics->res0[i] =
p_rx_fw_statistics_pram->res0[i];
rx_firmware_statistics->mismatchdrop =
in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
rx_firmware_statistics->underpkts =
in_be32(&p_rx_fw_statistics_pram->underpkts);
rx_firmware_statistics->pkts256 =
in_be32(&p_rx_fw_statistics_pram->pkts256);
rx_firmware_statistics->pkts512 =
in_be32(&p_rx_fw_statistics_pram->pkts512);
rx_firmware_statistics->pkts1024 =
in_be32(&p_rx_fw_statistics_pram->pkts1024);
rx_firmware_statistics->pktsjumbo =
in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
rx_firmware_statistics->frlossinmacer =
in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
rx_firmware_statistics->pausefr =
in_be32(&p_rx_fw_statistics_pram->pausefr);
for (i = 0; i < 0x4; i++)
rx_firmware_statistics->res1[i] =
p_rx_fw_statistics_pram->res1[i];
rx_firmware_statistics->removevlan =
in_be32(&p_rx_fw_statistics_pram->removevlan);
rx_firmware_statistics->replacevlan =
in_be32(&p_rx_fw_statistics_pram->replacevlan);
rx_firmware_statistics->insertvlan =
in_be32(&p_rx_fw_statistics_pram->insertvlan);
}
/* Hardware only if user handed pointer and driver actually
gathers hardware statistics */
if (hardware_statistics &&
(in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) {
hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
hardware_statistics->txok = in_be32(&ug_regs->txok);
hardware_statistics->txcf = in_be16(&ug_regs->txcf);
hardware_statistics->tmca = in_be32(&ug_regs->tmca);
hardware_statistics->tbca = in_be32(&ug_regs->tbca);
hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
hardware_statistics->rmca = in_be32(&ug_regs->rmca);
hardware_statistics->rbca = in_be32(&ug_regs->rbca);
}
}
static void dump_bds(struct ucc_geth_private *ugeth)
{
int i;
int length;
for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
if (ugeth->p_tx_bd_ring[i]) {
length =
(ugeth->ug_info->bdRingLenTx[i] *
sizeof(struct qe_bd));
pr_info("TX BDs[%d]\n", i);
mem_disp(ugeth->p_tx_bd_ring[i], length);
}
}
for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
if (ugeth->p_rx_bd_ring[i]) {
length =
(ugeth->ug_info->bdRingLenRx[i] *
sizeof(struct qe_bd));
pr_info("RX BDs[%d]\n", i);
mem_disp(ugeth->p_rx_bd_ring[i], length);
}
}
}
static void dump_regs(struct ucc_geth_private *ugeth)
{
int i;
pr_info("UCC%d Geth registers:\n", ugeth->ug_info->uf_info.ucc_num + 1);
pr_info("Base address: 0x%08x\n", (u32)ugeth->ug_regs);
pr_info("maccfg1 : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->maccfg1,
in_be32(&ugeth->ug_regs->maccfg1));
pr_info("maccfg2 : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->maccfg2,
in_be32(&ugeth->ug_regs->maccfg2));
pr_info("ipgifg : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->ipgifg,
in_be32(&ugeth->ug_regs->ipgifg));
pr_info("hafdup : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->hafdup,
in_be32(&ugeth->ug_regs->hafdup));
pr_info("ifctl : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->ifctl,
in_be32(&ugeth->ug_regs->ifctl));
pr_info("ifstat : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->ifstat,
in_be32(&ugeth->ug_regs->ifstat));
pr_info("macstnaddr1: addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->macstnaddr1,
in_be32(&ugeth->ug_regs->macstnaddr1));
pr_info("macstnaddr2: addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->macstnaddr2,
in_be32(&ugeth->ug_regs->macstnaddr2));
pr_info("uempr : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->uempr,
in_be32(&ugeth->ug_regs->uempr));
pr_info("utbipar : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->utbipar,
in_be32(&ugeth->ug_regs->utbipar));
pr_info("uescr : addr - 0x%08x, val - 0x%04x\n",
(u32)&ugeth->ug_regs->uescr,
in_be16(&ugeth->ug_regs->uescr));
pr_info("tx64 : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->tx64,
in_be32(&ugeth->ug_regs->tx64));
pr_info("tx127 : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->tx127,
in_be32(&ugeth->ug_regs->tx127));
pr_info("tx255 : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->tx255,
in_be32(&ugeth->ug_regs->tx255));
pr_info("rx64 : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->rx64,
in_be32(&ugeth->ug_regs->rx64));
pr_info("rx127 : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->rx127,
in_be32(&ugeth->ug_regs->rx127));
pr_info("rx255 : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->rx255,
in_be32(&ugeth->ug_regs->rx255));
pr_info("txok : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->txok,
in_be32(&ugeth->ug_regs->txok));
pr_info("txcf : addr - 0x%08x, val - 0x%04x\n",
(u32)&ugeth->ug_regs->txcf,
in_be16(&ugeth->ug_regs->txcf));
pr_info("tmca : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->tmca,
in_be32(&ugeth->ug_regs->tmca));
pr_info("tbca : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->tbca,
in_be32(&ugeth->ug_regs->tbca));
pr_info("rxfok : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->rxfok,
in_be32(&ugeth->ug_regs->rxfok));
pr_info("rxbok : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->rxbok,
in_be32(&ugeth->ug_regs->rxbok));
pr_info("rbyt : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->rbyt,
in_be32(&ugeth->ug_regs->rbyt));
pr_info("rmca : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->rmca,
in_be32(&ugeth->ug_regs->rmca));
pr_info("rbca : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->rbca,
in_be32(&ugeth->ug_regs->rbca));
pr_info("scar : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->scar,
in_be32(&ugeth->ug_regs->scar));
pr_info("scam : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->ug_regs->scam,
in_be32(&ugeth->ug_regs->scam));
if (ugeth->p_thread_data_tx) {
int numThreadsTxNumerical;
switch (ugeth->ug_info->numThreadsTx) {
case UCC_GETH_NUM_OF_THREADS_1:
numThreadsTxNumerical = 1;
break;
case UCC_GETH_NUM_OF_THREADS_2:
numThreadsTxNumerical = 2;
break;
case UCC_GETH_NUM_OF_THREADS_4:
numThreadsTxNumerical = 4;
break;
case UCC_GETH_NUM_OF_THREADS_6:
numThreadsTxNumerical = 6;
break;
case UCC_GETH_NUM_OF_THREADS_8:
numThreadsTxNumerical = 8;
break;
default:
numThreadsTxNumerical = 0;
break;
}
pr_info("Thread data TXs:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_thread_data_tx);
for (i = 0; i < numThreadsTxNumerical; i++) {
pr_info("Thread data TX[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_thread_data_tx[i]);
mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
sizeof(struct ucc_geth_thread_data_tx));
}
}
if (ugeth->p_thread_data_rx) {
int numThreadsRxNumerical;
switch (ugeth->ug_info->numThreadsRx) {
case UCC_GETH_NUM_OF_THREADS_1:
numThreadsRxNumerical = 1;
break;
case UCC_GETH_NUM_OF_THREADS_2:
numThreadsRxNumerical = 2;
break;
case UCC_GETH_NUM_OF_THREADS_4:
numThreadsRxNumerical = 4;
break;
case UCC_GETH_NUM_OF_THREADS_6:
numThreadsRxNumerical = 6;
break;
case UCC_GETH_NUM_OF_THREADS_8:
numThreadsRxNumerical = 8;
break;
default:
numThreadsRxNumerical = 0;
break;
}
pr_info("Thread data RX:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_thread_data_rx);
for (i = 0; i < numThreadsRxNumerical; i++) {
pr_info("Thread data RX[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_thread_data_rx[i]);
mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
sizeof(struct ucc_geth_thread_data_rx));
}
}
if (ugeth->p_exf_glbl_param) {
pr_info("EXF global param:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_exf_glbl_param);
mem_disp((u8 *) ugeth->p_exf_glbl_param,
sizeof(*ugeth->p_exf_glbl_param));
}
if (ugeth->p_tx_glbl_pram) {
pr_info("TX global param:\n");
pr_info("Base address: 0x%08x\n", (u32)ugeth->p_tx_glbl_pram);
pr_info("temoder : addr - 0x%08x, val - 0x%04x\n",
(u32)&ugeth->p_tx_glbl_pram->temoder,
in_be16(&ugeth->p_tx_glbl_pram->temoder));
pr_info("sqptr : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_tx_glbl_pram->sqptr,
in_be32(&ugeth->p_tx_glbl_pram->sqptr));
pr_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_tx_glbl_pram->schedulerbasepointer,
in_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer));
pr_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_tx_glbl_pram->txrmonbaseptr,
in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
pr_info("tstate : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_tx_glbl_pram->tstate,
in_be32(&ugeth->p_tx_glbl_pram->tstate));
pr_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x\n",
(u32)&ugeth->p_tx_glbl_pram->iphoffset[0],
ugeth->p_tx_glbl_pram->iphoffset[0]);
pr_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x\n",
(u32)&ugeth->p_tx_glbl_pram->iphoffset[1],
ugeth->p_tx_glbl_pram->iphoffset[1]);
pr_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x\n",
(u32)&ugeth->p_tx_glbl_pram->iphoffset[2],
ugeth->p_tx_glbl_pram->iphoffset[2]);
pr_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x\n",
(u32)&ugeth->p_tx_glbl_pram->iphoffset[3],
ugeth->p_tx_glbl_pram->iphoffset[3]);
pr_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x\n",
(u32)&ugeth->p_tx_glbl_pram->iphoffset[4],
ugeth->p_tx_glbl_pram->iphoffset[4]);
pr_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x\n",
(u32)&ugeth->p_tx_glbl_pram->iphoffset[5],
ugeth->p_tx_glbl_pram->iphoffset[5]);
pr_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x\n",
(u32)&ugeth->p_tx_glbl_pram->iphoffset[6],
ugeth->p_tx_glbl_pram->iphoffset[6]);
pr_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x\n",
(u32)&ugeth->p_tx_glbl_pram->iphoffset[7],
ugeth->p_tx_glbl_pram->iphoffset[7]);
pr_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_tx_glbl_pram->vtagtable[0],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
pr_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_tx_glbl_pram->vtagtable[1],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
pr_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_tx_glbl_pram->vtagtable[2],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
pr_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_tx_glbl_pram->vtagtable[3],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
pr_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_tx_glbl_pram->vtagtable[4],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
pr_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_tx_glbl_pram->vtagtable[5],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
pr_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_tx_glbl_pram->vtagtable[6],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
pr_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_tx_glbl_pram->vtagtable[7],
in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
pr_info("tqptr : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_tx_glbl_pram->tqptr,
in_be32(&ugeth->p_tx_glbl_pram->tqptr));
}
if (ugeth->p_rx_glbl_pram) {
pr_info("RX global param:\n");
pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_glbl_pram);
pr_info("remoder : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->remoder,
in_be32(&ugeth->p_rx_glbl_pram->remoder));
pr_info("rqptr : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->rqptr,
in_be32(&ugeth->p_rx_glbl_pram->rqptr));
pr_info("typeorlen : addr - 0x%08x, val - 0x%04x\n",
(u32)&ugeth->p_rx_glbl_pram->typeorlen,
in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
pr_info("rxgstpack : addr - 0x%08x, val - 0x%02x\n",
(u32)&ugeth->p_rx_glbl_pram->rxgstpack,
ugeth->p_rx_glbl_pram->rxgstpack);
pr_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->rxrmonbaseptr,
in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
pr_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->intcoalescingptr,
in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
pr_info("rstate : addr - 0x%08x, val - 0x%02x\n",
(u32)&ugeth->p_rx_glbl_pram->rstate,
ugeth->p_rx_glbl_pram->rstate);
pr_info("mrblr : addr - 0x%08x, val - 0x%04x\n",
(u32)&ugeth->p_rx_glbl_pram->mrblr,
in_be16(&ugeth->p_rx_glbl_pram->mrblr));
pr_info("rbdqptr : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->rbdqptr,
in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
pr_info("mflr : addr - 0x%08x, val - 0x%04x\n",
(u32)&ugeth->p_rx_glbl_pram->mflr,
in_be16(&ugeth->p_rx_glbl_pram->mflr));
pr_info("minflr : addr - 0x%08x, val - 0x%04x\n",
(u32)&ugeth->p_rx_glbl_pram->minflr,
in_be16(&ugeth->p_rx_glbl_pram->minflr));
pr_info("maxd1 : addr - 0x%08x, val - 0x%04x\n",
(u32)&ugeth->p_rx_glbl_pram->maxd1,
in_be16(&ugeth->p_rx_glbl_pram->maxd1));
pr_info("maxd2 : addr - 0x%08x, val - 0x%04x\n",
(u32)&ugeth->p_rx_glbl_pram->maxd2,
in_be16(&ugeth->p_rx_glbl_pram->maxd2));
pr_info("ecamptr : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->ecamptr,
in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
pr_info("l2qt : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->l2qt,
in_be32(&ugeth->p_rx_glbl_pram->l2qt));
pr_info("l3qt[0] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->l3qt[0],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
pr_info("l3qt[1] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->l3qt[1],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
pr_info("l3qt[2] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->l3qt[2],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
pr_info("l3qt[3] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->l3qt[3],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
pr_info("l3qt[4] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->l3qt[4],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
pr_info("l3qt[5] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->l3qt[5],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
pr_info("l3qt[6] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->l3qt[6],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
pr_info("l3qt[7] : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->l3qt[7],
in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
pr_info("vlantype : addr - 0x%08x, val - 0x%04x\n",
(u32)&ugeth->p_rx_glbl_pram->vlantype,
in_be16(&ugeth->p_rx_glbl_pram->vlantype));
pr_info("vlantci : addr - 0x%08x, val - 0x%04x\n",
(u32)&ugeth->p_rx_glbl_pram->vlantci,
in_be16(&ugeth->p_rx_glbl_pram->vlantci));
for (i = 0; i < 64; i++)
pr_info("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x\n",
i,
(u32)&ugeth->p_rx_glbl_pram->addressfiltering[i],
ugeth->p_rx_glbl_pram->addressfiltering[i]);
pr_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_glbl_pram->exfGlobalParam,
in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
}
if (ugeth->p_send_q_mem_reg) {
pr_info("Send Q memory registers:\n");
pr_info("Base address: 0x%08x\n", (u32)ugeth->p_send_q_mem_reg);
for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
pr_info("SQQD[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_send_q_mem_reg->sqqd[i]);
mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
sizeof(struct ucc_geth_send_queue_qd));
}
}
if (ugeth->p_scheduler) {
pr_info("Scheduler:\n");
pr_info("Base address: 0x%08x\n", (u32)ugeth->p_scheduler);
mem_disp((u8 *) ugeth->p_scheduler,
sizeof(*ugeth->p_scheduler));
}
if (ugeth->p_tx_fw_statistics_pram) {
pr_info("TX FW statistics pram:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_tx_fw_statistics_pram);
mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
sizeof(*ugeth->p_tx_fw_statistics_pram));
}
if (ugeth->p_rx_fw_statistics_pram) {
pr_info("RX FW statistics pram:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_rx_fw_statistics_pram);
mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
sizeof(*ugeth->p_rx_fw_statistics_pram));
}
if (ugeth->p_rx_irq_coalescing_tbl) {
pr_info("RX IRQ coalescing tables:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_rx_irq_coalescing_tbl);
for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
pr_info("RX IRQ coalescing table entry[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_rx_irq_coalescing_tbl->
coalescingentry[i]);
pr_info("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_irq_coalescing_tbl->
coalescingentry[i].interruptcoalescingmaxvalue,
in_be32(&ugeth->p_rx_irq_coalescing_tbl->
coalescingentry[i].
interruptcoalescingmaxvalue));
pr_info("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_irq_coalescing_tbl->
coalescingentry[i].interruptcoalescingcounter,
in_be32(&ugeth->p_rx_irq_coalescing_tbl->
coalescingentry[i].
interruptcoalescingcounter));
}
}
if (ugeth->p_rx_bd_qs_tbl) {
pr_info("RX BD QS tables:\n");
pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_bd_qs_tbl);
for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
pr_info("RX BD QS table[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_rx_bd_qs_tbl[i]);
pr_info("bdbaseptr : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
pr_info("bdptr : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_bd_qs_tbl[i].bdptr,
in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
pr_info("externalbdbaseptr: addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
in_be32(&ugeth->p_rx_bd_qs_tbl[i].
externalbdbaseptr));
pr_info("externalbdptr : addr - 0x%08x, val - 0x%08x\n",
(u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
pr_info("ucode RX Prefetched BDs:\n");
pr_info("Base address: 0x%08x\n",
(u32)qe_muram_addr(in_be32
(&ugeth->p_rx_bd_qs_tbl[i].
bdbaseptr)));
mem_disp((u8 *)
qe_muram_addr(in_be32
(&ugeth->p_rx_bd_qs_tbl[i].
bdbaseptr)),
sizeof(struct ucc_geth_rx_prefetched_bds));
}
}
if (ugeth->p_init_enet_param_shadow) {
int size;
pr_info("Init enet param shadow:\n");
pr_info("Base address: 0x%08x\n",
(u32) ugeth->p_init_enet_param_shadow);
mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
sizeof(*ugeth->p_init_enet_param_shadow));
size = sizeof(struct ucc_geth_thread_rx_pram);
if (ugeth->ug_info->rxExtendedFiltering) {
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
if (ugeth->ug_info->largestexternallookupkeysize ==
QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
if (ugeth->ug_info->largestexternallookupkeysize ==
QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
}
dump_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
txthread[0]),
ENET_INIT_PARAM_MAX_ENTRIES_TX,
sizeof(struct ucc_geth_thread_tx_pram),
ugeth->ug_info->riscTx, 0);
dump_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
rxthread[0]),
ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
ugeth->ug_info->riscRx, 1);
}
}
#endif /* DEBUG */
static void init_default_reg_vals(u32 __iomem *upsmr_register,
u32 __iomem *maccfg1_register,
u32 __iomem *maccfg2_register)
{
out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
}
static int init_half_duplex_params(int alt_beb,
int back_pressure_no_backoff,
int no_backoff,
int excess_defer,
u8 alt_beb_truncation,
u8 max_retransmissions,
u8 collision_window,
u32 __iomem *hafdup_register)
{
u32 value = 0;
if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
(max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
(collision_window > HALFDUP_COLLISION_WINDOW_MAX))
return -EINVAL;
value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
if (alt_beb)
value |= HALFDUP_ALT_BEB;
if (back_pressure_no_backoff)
value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
if (no_backoff)
value |= HALFDUP_NO_BACKOFF;
if (excess_defer)
value |= HALFDUP_EXCESSIVE_DEFER;
value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
value |= collision_window;
out_be32(hafdup_register, value);
return 0;
}
static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
u8 non_btb_ipg,
u8 min_ifg,
u8 btb_ipg,
u32 __iomem *ipgifg_register)
{
u32 value = 0;
/* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
IPG part 2 */
if (non_btb_cs_ipg > non_btb_ipg)
return -EINVAL;
if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
(non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
/*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
(btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
return -EINVAL;
value |=
((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
IPGIFG_NBTB_CS_IPG_MASK);
value |=
((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
IPGIFG_NBTB_IPG_MASK);
value |=
((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
IPGIFG_MIN_IFG_MASK);
value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
out_be32(ipgifg_register, value);
return 0;
}
int init_flow_control_params(u32 automatic_flow_control_mode,
int rx_flow_control_enable,
int tx_flow_control_enable,
u16 pause_period,
u16 extension_field,
u32 __iomem *upsmr_register,
u32 __iomem *uempr_register,
u32 __iomem *maccfg1_register)
{
u32 value = 0;
/* Set UEMPR register */
value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
out_be32(uempr_register, value);
/* Set UPSMR register */
setbits32(upsmr_register, automatic_flow_control_mode);
value = in_be32(maccfg1_register);
if (rx_flow_control_enable)
value |= MACCFG1_FLOW_RX;
if (tx_flow_control_enable)
value |= MACCFG1_FLOW_TX;
out_be32(maccfg1_register, value);
return 0;
}
static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
int auto_zero_hardware_statistics,
u32 __iomem *upsmr_register,
u16 __iomem *uescr_register)
{
u16 uescr_value = 0;
/* Enable hardware statistics gathering if requested */
if (enable_hardware_statistics)
setbits32(upsmr_register, UCC_GETH_UPSMR_HSE);
/* Clear hardware statistics counters */
uescr_value = in_be16(uescr_register);
uescr_value |= UESCR_CLRCNT;
/* Automatically zero hardware statistics counters on read,
if requested */
if (auto_zero_hardware_statistics)
uescr_value |= UESCR_AUTOZ;
out_be16(uescr_register, uescr_value);
return 0;
}
static int init_firmware_statistics_gathering_mode(int
enable_tx_firmware_statistics,
int enable_rx_firmware_statistics,
u32 __iomem *tx_rmon_base_ptr,
u32 tx_firmware_statistics_structure_address,
u32 __iomem *rx_rmon_base_ptr,
u32 rx_firmware_statistics_structure_address,
u16 __iomem *temoder_register,
u32 __iomem *remoder_register)
{
/* Note: this function does not check if */
/* the parameters it receives are NULL */
if (enable_tx_firmware_statistics) {
out_be32(tx_rmon_base_ptr,
tx_firmware_statistics_structure_address);
setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE);
}
if (enable_rx_firmware_statistics) {
out_be32(rx_rmon_base_ptr,
rx_firmware_statistics_structure_address);
setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE);
}
return 0;
}
static int init_mac_station_addr_regs(u8 address_byte_0,
u8 address_byte_1,
u8 address_byte_2,
u8 address_byte_3,
u8 address_byte_4,
u8 address_byte_5,
u32 __iomem *macstnaddr1_register,
u32 __iomem *macstnaddr2_register)
{
u32 value = 0;
/* Example: for a station address of 0x12345678ABCD, */
/* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
/* MACSTNADDR1 Register: */
/* 0 7 8 15 */
/* station address byte 5 station address byte 4 */
/* 16 23 24 31 */
/* station address byte 3 station address byte 2 */
value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
out_be32(macstnaddr1_register, value);
/* MACSTNADDR2 Register: */
/* 0 7 8 15 */
/* station address byte 1 station address byte 0 */
/* 16 23 24 31 */
/* reserved reserved */
value = 0;
value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
out_be32(macstnaddr2_register, value);
return 0;
}
static int init_check_frame_length_mode(int length_check,
u32 __iomem *maccfg2_register)
{
u32 value = 0;
value = in_be32(maccfg2_register);
if (length_check)
value |= MACCFG2_LC;
else
value &= ~MACCFG2_LC;
out_be32(maccfg2_register, value);
return 0;
}
static int init_preamble_length(u8 preamble_length,
u32 __iomem *maccfg2_register)
{
if ((preamble_length < 3) || (preamble_length > 7))
return -EINVAL;
clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
preamble_length << MACCFG2_PREL_SHIFT);
return 0;
}
static int init_rx_parameters(int reject_broadcast,
int receive_short_frames,
int promiscuous, u32 __iomem *upsmr_register)
{
u32 value = 0;
value = in_be32(upsmr_register);
if (reject_broadcast)
value |= UCC_GETH_UPSMR_BRO;
else
value &= ~UCC_GETH_UPSMR_BRO;
if (receive_short_frames)
value |= UCC_GETH_UPSMR_RSH;
else
value &= ~UCC_GETH_UPSMR_RSH;
if (promiscuous)
value |= UCC_GETH_UPSMR_PRO;
else
value &= ~UCC_GETH_UPSMR_PRO;
out_be32(upsmr_register, value);
return 0;
}
static int init_max_rx_buff_len(u16 max_rx_buf_len,
u16 __iomem *mrblr_register)
{
/* max_rx_buf_len value must be a multiple of 128 */
if ((max_rx_buf_len == 0) ||
(max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
return -EINVAL;
out_be16(mrblr_register, max_rx_buf_len);
return 0;
}
static int init_min_frame_len(u16 min_frame_length,
u16 __iomem *minflr_register,
u16 __iomem *mrblr_register)
{
u16 mrblr_value = 0;
mrblr_value = in_be16(mrblr_register);
if (min_frame_length >= (mrblr_value - 4))
return -EINVAL;
out_be16(minflr_register, min_frame_length);
return 0;
}
static int adjust_enet_interface(struct ucc_geth_private *ugeth)
{
struct ucc_geth_info *ug_info;
struct ucc_geth __iomem *ug_regs;
struct ucc_fast __iomem *uf_regs;
int ret_val;
u32 upsmr, maccfg2;
u16 value;
ugeth_vdbg("%s: IN", __func__);
ug_info = ugeth->ug_info;
ug_regs = ugeth->ug_regs;
uf_regs = ugeth->uccf->uf_regs;
/* Set MACCFG2 */
maccfg2 = in_be32(&ug_regs->maccfg2);
maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
if ((ugeth->max_speed == SPEED_10) ||
(ugeth->max_speed == SPEED_100))
maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
else if (ugeth->max_speed == SPEED_1000)
maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
maccfg2 |= ug_info->padAndCrc;
out_be32(&ug_regs->maccfg2, maccfg2);
/* Set UPSMR */
upsmr = in_be32(&uf_regs->upsmr);
upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
upsmr |= UCC_GETH_UPSMR_RPM;
switch (ugeth->max_speed) {
case SPEED_10:
upsmr |= UCC_GETH_UPSMR_R10M;
/* FALLTHROUGH */
case SPEED_100:
if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
upsmr |= UCC_GETH_UPSMR_RMM;
}
}
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
upsmr |= UCC_GETH_UPSMR_TBIM;
}
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII))
upsmr |= UCC_GETH_UPSMR_SGMM;
out_be32(&uf_regs->upsmr, upsmr);
/* Disable autonegotiation in tbi mode, because by default it
comes up in autonegotiation mode. */
/* Note that this depends on proper setting in utbipar register. */
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
struct ucc_geth_info *ug_info = ugeth->ug_info;
struct phy_device *tbiphy;
if (!ug_info->tbi_node)
pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
tbiphy = of_phy_find_device(ug_info->tbi_node);
if (!tbiphy)
pr_warn("Could not get TBI device\n");
value = phy_read(tbiphy, ENET_TBI_MII_CR);
value &= ~0x1000; /* Turn off autonegotiation */
phy_write(tbiphy, ENET_TBI_MII_CR, value);
}
init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
if (ret_val != 0) {
if (netif_msg_probe(ugeth))
pr_err("Preamble length must be between 3 and 7 inclusive\n");
return ret_val;
}
return 0;
}
static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
{
struct ucc_fast_private *uccf;
u32 cecr_subblock;
u32 temp;
int i = 10;
uccf = ugeth->uccf;
/* Mask GRACEFUL STOP TX interrupt bit and clear it */
clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA);
out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */
/* Issue host command */
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
QE_CR_PROTOCOL_ETHERNET, 0);
/* Wait for command to complete */
do {
msleep(10);
temp = in_be32(uccf->p_ucce);
} while (!(temp & UCC_GETH_UCCE_GRA) && --i);
uccf->stopped_tx = 1;
return 0;
}
static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth)
{
struct ucc_fast_private *uccf;
u32 cecr_subblock;
u8 temp;
int i = 10;
uccf = ugeth->uccf;
/* Clear acknowledge bit */
temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
/* Keep issuing command and checking acknowledge bit until
it is asserted, according to spec */
do {
/* Issue host command */
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
ucc_num);
qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
QE_CR_PROTOCOL_ETHERNET, 0);
msleep(10);
temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
} while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i);
uccf->stopped_rx = 1;
return 0;
}
static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
{
struct ucc_fast_private *uccf;
u32 cecr_subblock;
uccf = ugeth->uccf;
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
uccf->stopped_tx = 0;
return 0;
}
static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
{
struct ucc_fast_private *uccf;
u32 cecr_subblock;
uccf = ugeth->uccf;
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
0);
uccf->stopped_rx = 0;
return 0;
}
static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
{
struct ucc_fast_private *uccf;
int enabled_tx, enabled_rx;
uccf = ugeth->uccf;
/* check if the UCC number is in range. */
if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
if (netif_msg_probe(ugeth))
pr_err("ucc_num out of range\n");
return -EINVAL;
}
enabled_tx = uccf->enabled_tx;
enabled_rx = uccf->enabled_rx;
/* Get Tx and Rx going again, in case this channel was actively
disabled. */
if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
ugeth_restart_tx(ugeth);
if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
ugeth_restart_rx(ugeth);
ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
return 0;
}
static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
{
struct ucc_fast_private *uccf;
uccf = ugeth->uccf;
/* check if the UCC number is in range. */
if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
if (netif_msg_probe(ugeth))
pr_err("ucc_num out of range\n");
return -EINVAL;
}
/* Stop any transmissions */
if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
ugeth_graceful_stop_tx(ugeth);
/* Stop any receptions */
if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
ugeth_graceful_stop_rx(ugeth);
ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
return 0;
}
static void ugeth_quiesce(struct ucc_geth_private *ugeth)
{
/* Prevent any further xmits, plus detach the device. */
netif_device_detach(ugeth->ndev);
/* Wait for any current xmits to finish. */
netif_tx_disable(ugeth->ndev);
/* Disable the interrupt to avoid NAPI rescheduling. */
disable_irq(ugeth->ug_info->uf_info.irq);
/* Stop NAPI, and possibly wait for its completion. */
napi_disable(&ugeth->napi);
}
static void ugeth_activate(struct ucc_geth_private *ugeth)
{
napi_enable(&ugeth->napi);
enable_irq(ugeth->ug_info->uf_info.irq);
netif_device_attach(ugeth->ndev);
}
/* Called every time the controller might need to be made
* aware of new link state. The PHY code conveys this
* information through variables in the ugeth structure, and this
* function converts those variables into the appropriate
* register values, and can bring down the device if needed.
*/
static void adjust_link(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
struct ucc_geth __iomem *ug_regs;
struct ucc_fast __iomem *uf_regs;
struct phy_device *phydev = ugeth->phydev;
int new_state = 0;
ug_regs = ugeth->ug_regs;
uf_regs = ugeth->uccf->uf_regs;
if (phydev->link) {
u32 tempval = in_be32(&ug_regs->maccfg2);
u32 upsmr = in_be32(&uf_regs->upsmr);
/* Now we make sure that we can be in full duplex mode.
* If not, we operate in half-duplex mode. */
if (phydev->duplex != ugeth->oldduplex) {
new_state = 1;
if (!(phydev->duplex))
tempval &= ~(MACCFG2_FDX);
else
tempval |= MACCFG2_FDX;
ugeth->oldduplex = phydev->duplex;
}
if (phydev->speed != ugeth->oldspeed) {
new_state = 1;
switch (phydev->speed) {
case SPEED_1000:
tempval = ((tempval &
~(MACCFG2_INTERFACE_MODE_MASK)) |
MACCFG2_INTERFACE_MODE_BYTE);
break;
case SPEED_100:
case SPEED_10:
tempval = ((tempval &
~(MACCFG2_INTERFACE_MODE_MASK)) |
MACCFG2_INTERFACE_MODE_NIBBLE);
/* if reduced mode, re-set UPSMR.R10M */
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
if (phydev->speed == SPEED_10)
upsmr |= UCC_GETH_UPSMR_R10M;
else
upsmr &= ~UCC_GETH_UPSMR_R10M;
}
break;
default:
if (netif_msg_link(ugeth))
pr_warn(
"%s: Ack! Speed (%d) is not 10/100/1000!",
dev->name, phydev->speed);
break;
}
ugeth->oldspeed = phydev->speed;
}
if (!ugeth->oldlink) {
new_state = 1;
ugeth->oldlink = 1;
}
if (new_state) {
/*
* To change the MAC configuration we need to disable
* the controller. To do so, we have to either grab
* ugeth->lock, which is a bad idea since 'graceful
* stop' commands might take quite a while, or we can
* quiesce driver's activity.
*/
ugeth_quiesce(ugeth);
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
out_be32(&ug_regs->maccfg2, tempval);
out_be32(&uf_regs->upsmr, upsmr);
ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
ugeth_activate(ugeth);
}
} else if (ugeth->oldlink) {
new_state = 1;
ugeth->oldlink = 0;
ugeth->oldspeed = 0;
ugeth->oldduplex = -1;
}
if (new_state && netif_msg_link(ugeth))
phy_print_status(phydev);
}
/* Initialize TBI PHY interface for communicating with the
* SERDES lynx PHY on the chip. We communicate with this PHY
* through the MDIO bus on each controller, treating it as a
* "normal" PHY at the address found in the UTBIPA register. We assume
* that the UTBIPA register is valid. Either the MDIO bus code will set
* it to a value that doesn't conflict with other PHYs on the bus, or the
* value doesn't matter, as there are no other PHYs on the bus.
*/
static void uec_configure_serdes(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
struct ucc_geth_info *ug_info = ugeth->ug_info;
struct phy_device *tbiphy;
if (!ug_info->tbi_node) {
dev_warn(&dev->dev, "SGMII mode requires that the device "
"tree specify a tbi-handle\n");
return;
}
tbiphy = of_phy_find_device(ug_info->tbi_node);
if (!tbiphy) {
dev_err(&dev->dev, "error: Could not get TBI device\n");
return;
}
/*
* If the link is already up, we must already be ok, and don't need to
* configure and reset the TBI<->SerDes link. Maybe U-Boot configured
* everything for us? Resetting it takes the link down and requires
* several seconds for it to come back.
*/
if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
return;
/* Single clk mode, mii mode off(for serdes communication) */
phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
}
/* Configure the PHY for dev.
* returns 0 if success. -1 if failure
*/
static int init_phy(struct net_device *dev)
{
struct ucc_geth_private *priv = netdev_priv(dev);
struct ucc_geth_info *ug_info = priv->ug_info;
struct phy_device *phydev;
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
priv->phy_interface);
if (!phydev)
phydev = of_phy_connect_fixed_link(dev, &adjust_link,
priv->phy_interface);
if (!phydev) {
dev_err(&dev->dev, "Could not attach to PHY\n");
return -ENODEV;
}
if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
uec_configure_serdes(dev);
phydev->supported &= (SUPPORTED_MII |
SUPPORTED_Autoneg |
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full);
if (priv->max_speed == SPEED_1000)
phydev->supported |= ADVERTISED_1000baseT_Full;
phydev->advertising = phydev->supported;
priv->phydev = phydev;
return 0;
}
static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
{
#ifdef DEBUG
ucc_fast_dump_regs(ugeth->uccf);
dump_regs(ugeth);
dump_bds(ugeth);
#endif
}
static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
ugeth,
enum enet_addr_type
enet_addr_type)
{
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
struct ucc_fast_private *uccf;
enum comm_dir comm_dir;
struct list_head *p_lh;
u16 i, num;
u32 __iomem *addr_h;
u32 __iomem *addr_l;
u8 *p_counter;
uccf = ugeth->uccf;
p_82xx_addr_filt =
(struct ucc_geth_82xx_address_filtering_pram __iomem *)
ugeth->p_rx_glbl_pram->addressfiltering;
if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
addr_h = &(p_82xx_addr_filt->gaddr_h);
addr_l = &(p_82xx_addr_filt->gaddr_l);
p_lh = &ugeth->group_hash_q;
p_counter = &(ugeth->numGroupAddrInHash);
} else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
addr_h = &(p_82xx_addr_filt->iaddr_h);
addr_l = &(p_82xx_addr_filt->iaddr_l);
p_lh = &ugeth->ind_hash_q;
p_counter = &(ugeth->numIndAddrInHash);
} else
return -EINVAL;
comm_dir = 0;
if (uccf->enabled_tx)
comm_dir |= COMM_DIR_TX;
if (uccf->enabled_rx)
comm_dir |= COMM_DIR_RX;
if (comm_dir)
ugeth_disable(ugeth, comm_dir);
/* Clear the hash table. */
out_be32(addr_h, 0x00000000);
out_be32(addr_l, 0x00000000);
if (!p_lh)
return 0;
num = *p_counter;
/* Delete all remaining CQ elements */
for (i = 0; i < num; i++)
put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
*p_counter = 0;
if (comm_dir)
ugeth_enable(ugeth, comm_dir);
return 0;
}
static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
u8 paddr_num)
{
ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
}
static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
{
struct ucc_geth_info *ug_info;
struct ucc_fast_info *uf_info;
u16 i, j;
u8 __iomem *bd;
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
if (ugeth->p_rx_bd_ring[i]) {
/* Return existing data buffers in ring */
bd = ugeth->p_rx_bd_ring[i];
for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
if (ugeth->rx_skbuff[i][j]) {
dma_unmap_single(ugeth->dev,
in_be32(&((struct qe_bd __iomem *)bd)->buf),
ugeth->ug_info->
uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT,
DMA_FROM_DEVICE);
dev_kfree_skb_any(
ugeth->rx_skbuff[i][j]);
ugeth->rx_skbuff[i][j] = NULL;
}
bd += sizeof(struct qe_bd);
}
kfree(ugeth->rx_skbuff[i]);
if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_SYSTEM)
kfree((void *)ugeth->rx_bd_ring_offset[i]);
else if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_MURAM)
qe_muram_free(ugeth->rx_bd_ring_offset[i]);
ugeth->p_rx_bd_ring[i] = NULL;
}
}
}
static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
{
struct ucc_geth_info *ug_info;
struct ucc_fast_info *uf_info;
u16 i, j;
u8 __iomem *bd;
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
bd = ugeth->p_tx_bd_ring[i];
if (!bd)
continue;
for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
if (ugeth->tx_skbuff[i][j]) {
dma_unmap_single(ugeth->dev,
in_be32(&((struct qe_bd __iomem *)bd)->buf),
(in_be32((u32 __iomem *)bd) &
BD_LENGTH_MASK),
DMA_TO_DEVICE);
dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
ugeth->tx_skbuff[i][j] = NULL;
}
}
kfree(ugeth->tx_skbuff[i]);
if (ugeth->p_tx_bd_ring[i]) {
if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_SYSTEM)
kfree((void *)ugeth->tx_bd_ring_offset[i]);
else if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_MURAM)
qe_muram_free(ugeth->tx_bd_ring_offset[i]);
ugeth->p_tx_bd_ring[i] = NULL;
}
}
}
static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
{
if (!ugeth)
return;
if (ugeth->uccf) {
ucc_fast_free(ugeth->uccf);
ugeth->uccf = NULL;
}
if (ugeth->p_thread_data_tx) {
qe_muram_free(ugeth->thread_dat_tx_offset);
ugeth->p_thread_data_tx = NULL;
}
if (ugeth->p_thread_data_rx) {
qe_muram_free(ugeth->thread_dat_rx_offset);
ugeth->p_thread_data_rx = NULL;
}
if (ugeth->p_exf_glbl_param) {
qe_muram_free(ugeth->exf_glbl_param_offset);
ugeth->p_exf_glbl_param = NULL;
}
if (ugeth->p_rx_glbl_pram) {
qe_muram_free(ugeth->rx_glbl_pram_offset);
ugeth->p_rx_glbl_pram = NULL;
}
if (ugeth->p_tx_glbl_pram) {
qe_muram_free(ugeth->tx_glbl_pram_offset);
ugeth->p_tx_glbl_pram = NULL;
}
if (ugeth->p_send_q_mem_reg) {
qe_muram_free(ugeth->send_q_mem_reg_offset);
ugeth->p_send_q_mem_reg = NULL;
}
if (ugeth->p_scheduler) {
qe_muram_free(ugeth->scheduler_offset);
ugeth->p_scheduler = NULL;
}
if (ugeth->p_tx_fw_statistics_pram) {
qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
ugeth->p_tx_fw_statistics_pram = NULL;
}
if (ugeth->p_rx_fw_statistics_pram) {
qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
ugeth->p_rx_fw_statistics_pram = NULL;
}
if (ugeth->p_rx_irq_coalescing_tbl) {
qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
ugeth->p_rx_irq_coalescing_tbl = NULL;
}
if (ugeth->p_rx_bd_qs_tbl) {
qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
ugeth->p_rx_bd_qs_tbl = NULL;
}
if (ugeth->p_init_enet_param_shadow) {
return_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
rxthread[0]),
ENET_INIT_PARAM_MAX_ENTRIES_RX,
ugeth->ug_info->riscRx, 1);
return_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
txthread[0]),
ENET_INIT_PARAM_MAX_ENTRIES_TX,
ugeth->ug_info->riscTx, 0);
kfree(ugeth->p_init_enet_param_shadow);
ugeth->p_init_enet_param_shadow = NULL;
}
ucc_geth_free_tx(ugeth);
ucc_geth_free_rx(ugeth);
while (!list_empty(&ugeth->group_hash_q))
put_enet_addr_container(ENET_ADDR_CONT_ENTRY
(dequeue(&ugeth->group_hash_q)));
while (!list_empty(&ugeth->ind_hash_q))
put_enet_addr_container(ENET_ADDR_CONT_ENTRY
(dequeue(&ugeth->ind_hash_q)));
if (ugeth->ug_regs) {
iounmap(ugeth->ug_regs);
ugeth->ug_regs = NULL;
}
}
static void ucc_geth_set_multi(struct net_device *dev)
{
struct ucc_geth_private *ugeth;
struct netdev_hw_addr *ha;
struct ucc_fast __iomem *uf_regs;
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
ugeth = netdev_priv(dev);
uf_regs = ugeth->uccf->uf_regs;
if (dev->flags & IFF_PROMISC) {
setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
} else {
clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
p_82xx_addr_filt =
(struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
p_rx_glbl_pram->addressfiltering;
if (dev->flags & IFF_ALLMULTI) {
/* Catch all multicast addresses, so set the
* filter to all 1's.
*/
out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
} else {
/* Clear filter and add the addresses in the list.
*/
out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
netdev_for_each_mc_addr(ha, dev) {
/* Ask CPM to run CRC and set bit in
* filter mask.
*/
hw_add_addr_in_hash(ugeth, ha->addr);
}
}
}
}
static void ucc_geth_stop(struct ucc_geth_private *ugeth)
{
struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
struct phy_device *phydev = ugeth->phydev;
ugeth_vdbg("%s: IN", __func__);
/*
* Tell the kernel the link is down.
* Must be done before disabling the controller
* or deadlock may happen.
*/
phy_stop(phydev);
/* Disable the controller */
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
/* Mask all interrupts */
out_be32(ugeth->uccf->p_uccm, 0x00000000);
/* Clear all interrupts */
out_be32(ugeth->uccf->p_ucce, 0xffffffff);
/* Disable Rx and Tx */
clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
ucc_geth_memclean(ugeth);
}
static int ucc_struct_init(struct ucc_geth_private *ugeth)
{
struct ucc_geth_info *ug_info;
struct ucc_fast_info *uf_info;
int i;
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
(uf_info->bd_mem_part == MEM_PART_MURAM))) {
if (netif_msg_probe(ugeth))
pr_err("Bad memory partition value\n");
return -EINVAL;
}
/* Rx BD lengths */
for (i = 0; i < ug_info->numQueuesRx; i++) {
if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
(ug_info->bdRingLenRx[i] %
UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
if (netif_msg_probe(ugeth))
pr_err("Rx BD ring length must be multiple of 4, no smaller than 8\n");
return -EINVAL;
}
}
/* Tx BD lengths */
for (i = 0; i < ug_info->numQueuesTx; i++) {
if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
if (netif_msg_probe(ugeth))
pr_err("Tx BD ring length must be no smaller than 2\n");
return -EINVAL;
}
}
/* mrblr */
if ((uf_info->max_rx_buf_length == 0) ||
(uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
if (netif_msg_probe(ugeth))
pr_err("max_rx_buf_length must be non-zero multiple of 128\n");
return -EINVAL;
}
/* num Tx queues */
if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
if (netif_msg_probe(ugeth))
pr_err("number of tx queues too large\n");
return -EINVAL;
}
/* num Rx queues */
if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
if (netif_msg_probe(ugeth))
pr_err("number of rx queues too large\n");
return -EINVAL;
}
/* l2qt */
for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
if (netif_msg_probe(ugeth))
pr_err("VLAN priority table entry must not be larger than number of Rx queues\n");
return -EINVAL;
}
}
/* l3qt */
for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
if (netif_msg_probe(ugeth))
pr_err("IP priority table entry must not be larger than number of Rx queues\n");
return -EINVAL;
}
}
if (ug_info->cam && !ug_info->ecamptr) {
if (netif_msg_probe(ugeth))
pr_err("If cam mode is chosen, must supply cam ptr\n");
return -EINVAL;
}
if ((ug_info->numStationAddresses !=
UCC_GETH_NUM_OF_STATION_ADDRESSES_1) &&
ug_info->rxExtendedFiltering) {
if (netif_msg_probe(ugeth))
pr_err("Number of station addresses greater than 1 not allowed in extended parsing mode\n");
return -EINVAL;
}
/* Generate uccm_mask for receive */
uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
for (i = 0; i < ug_info->numQueuesRx; i++)
uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
for (i = 0; i < ug_info->numQueuesTx; i++)
uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
/* Initialize the general fast UCC block. */
if (ucc_fast_init(uf_info, &ugeth->uccf)) {
if (netif_msg_probe(ugeth))
pr_err("Failed to init uccf\n");
return -ENOMEM;
}
/* read the number of risc engines, update the riscTx and riscRx
* if there are 4 riscs in QE
*/
if (qe_get_num_of_risc() == 4) {
ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS;
ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS;
}
ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs));
if (!ugeth->ug_regs) {
if (netif_msg_probe(ugeth))
pr_err("Failed to ioremap regs\n");
return -ENOMEM;
}
return 0;
}
static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
{
struct ucc_geth_info *ug_info;
struct ucc_fast_info *uf_info;
int length;
u16 i, j;
u8 __iomem *bd;
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
/* Allocate Tx bds */
for (j = 0; j < ug_info->numQueuesTx; j++) {
/* Allocate in multiple of
UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
according to spec */
length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
/ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
* UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
u32 align = 4;
if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
align = UCC_GETH_TX_BD_RING_ALIGNMENT;
ugeth->tx_bd_ring_offset[j] =
(u32) kmalloc((u32) (length + align), GFP_KERNEL);
if (ugeth->tx_bd_ring_offset[j] != 0)
ugeth->p_tx_bd_ring[j] =
(u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
align) & ~(align - 1));
} else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
ugeth->tx_bd_ring_offset[j] =
qe_muram_alloc(length,
UCC_GETH_TX_BD_RING_ALIGNMENT);
if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
ugeth->p_tx_bd_ring[j] =
(u8 __iomem *) qe_muram_addr(ugeth->
tx_bd_ring_offset[j]);
}
if (!ugeth->p_tx_bd_ring[j]) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for Tx bd rings\n");
return -ENOMEM;
}
/* Zero unused end of bd ring, according to spec */
memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
}
/* Init Tx bds */
for (j = 0; j < ug_info->numQueuesTx; j++) {
/* Setup the skbuff rings */
ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
ugeth->ug_info->bdRingLenTx[j],
GFP_KERNEL);
if (ugeth->tx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
pr_err("Could not allocate tx_skbuff\n");
return -ENOMEM;
}
for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
ugeth->tx_skbuff[j][i] = NULL;
ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
/* clear bd buffer */
out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
/* set bd status and length */
out_be32((u32 __iomem *)bd, 0);
bd += sizeof(struct qe_bd);
}
bd -= sizeof(struct qe_bd);
/* set bd status and length */
out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
}
return 0;
}
static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
{
struct ucc_geth_info *ug_info;
struct ucc_fast_info *uf_info;
int length;
u16 i, j;
u8 __iomem *bd;
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
/* Allocate Rx bds */
for (j = 0; j < ug_info->numQueuesRx; j++) {
length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
u32 align = 4;
if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
align = UCC_GETH_RX_BD_RING_ALIGNMENT;
ugeth->rx_bd_ring_offset[j] =
(u32) kmalloc((u32) (length + align), GFP_KERNEL);
if (ugeth->rx_bd_ring_offset[j] != 0)
ugeth->p_rx_bd_ring[j] =
(u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
align) & ~(align - 1));
} else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
ugeth->rx_bd_ring_offset[j] =
qe_muram_alloc(length,
UCC_GETH_RX_BD_RING_ALIGNMENT);
if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
ugeth->p_rx_bd_ring[j] =
(u8 __iomem *) qe_muram_addr(ugeth->
rx_bd_ring_offset[j]);
}
if (!ugeth->p_rx_bd_ring[j]) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for Rx bd rings\n");
return -ENOMEM;
}
}
/* Init Rx bds */
for (j = 0; j < ug_info->numQueuesRx; j++) {
/* Setup the skbuff rings */
ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
ugeth->ug_info->bdRingLenRx[j],
GFP_KERNEL);
if (ugeth->rx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
pr_err("Could not allocate rx_skbuff\n");
return -ENOMEM;
}
for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
ugeth->rx_skbuff[j][i] = NULL;
ugeth->skb_currx[j] = 0;
bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
/* set bd status and length */
out_be32((u32 __iomem *)bd, R_I);
/* clear bd buffer */
out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
bd += sizeof(struct qe_bd);
}
bd -= sizeof(struct qe_bd);
/* set bd status and length */
out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
}
return 0;
}
static int ucc_geth_startup(struct ucc_geth_private *ugeth)
{
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
struct ucc_geth_init_pram __iomem *p_init_enet_pram;
struct ucc_fast_private *uccf;
struct ucc_geth_info *ug_info;
struct ucc_fast_info *uf_info;
struct ucc_fast __iomem *uf_regs;
struct ucc_geth __iomem *ug_regs;
int ret_val = -EINVAL;
u32 remoder = UCC_GETH_REMODER_INIT;
u32 init_enet_pram_offset, cecr_subblock, command;
u32 ifstat, i, j, size, l2qt, l3qt;
u16 temoder = UCC_GETH_TEMODER_INIT;
u16 test;
u8 function_code = 0;
u8 __iomem *endOfRing;
u8 numThreadsRxNumerical, numThreadsTxNumerical;
ugeth_vdbg("%s: IN", __func__);
uccf = ugeth->uccf;
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
uf_regs = uccf->uf_regs;
ug_regs = ugeth->ug_regs;
switch (ug_info->numThreadsRx) {
case UCC_GETH_NUM_OF_THREADS_1:
numThreadsRxNumerical = 1;
break;
case UCC_GETH_NUM_OF_THREADS_2:
numThreadsRxNumerical = 2;
break;
case UCC_GETH_NUM_OF_THREADS_4:
numThreadsRxNumerical = 4;
break;
case UCC_GETH_NUM_OF_THREADS_6:
numThreadsRxNumerical = 6;
break;
case UCC_GETH_NUM_OF_THREADS_8:
numThreadsRxNumerical = 8;
break;
default:
if (netif_msg_ifup(ugeth))
pr_err("Bad number of Rx threads value\n");
return -EINVAL;
break;
}
switch (ug_info->numThreadsTx) {
case UCC_GETH_NUM_OF_THREADS_1:
numThreadsTxNumerical = 1;
break;
case UCC_GETH_NUM_OF_THREADS_2:
numThreadsTxNumerical = 2;
break;
case UCC_GETH_NUM_OF_THREADS_4:
numThreadsTxNumerical = 4;
break;
case UCC_GETH_NUM_OF_THREADS_6:
numThreadsTxNumerical = 6;
break;
case UCC_GETH_NUM_OF_THREADS_8:
numThreadsTxNumerical = 8;
break;
default:
if (netif_msg_ifup(ugeth))
pr_err("Bad number of Tx threads value\n");
return -EINVAL;
break;
}
/* Calculate rx_extended_features */
ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
ug_info->ipAddressAlignment ||
(ug_info->numStationAddresses !=
UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
(ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) ||
(ug_info->vlanOperationNonTagged !=
UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
init_default_reg_vals(&uf_regs->upsmr,
&ug_regs->maccfg1, &ug_regs->maccfg2);
/* Set UPSMR */
/* For more details see the hardware spec. */
init_rx_parameters(ug_info->bro,
ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
/* We're going to ignore other registers for now, */
/* except as needed to get up and running */
/* Set MACCFG1 */
/* For more details see the hardware spec. */
init_flow_control_params(ug_info->aufc,
ug_info->receiveFlowControl,
ug_info->transmitFlowControl,
ug_info->pausePeriod,
ug_info->extensionField,
&uf_regs->upsmr,
&ug_regs->uempr, &ug_regs->maccfg1);
setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
/* Set IPGIFG */
/* For more details see the hardware spec. */
ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
ug_info->nonBackToBackIfgPart2,
ug_info->
miminumInterFrameGapEnforcement,
ug_info->backToBackInterFrameGap,
&ug_regs->ipgifg);
if (ret_val != 0) {
if (netif_msg_ifup(ugeth))
pr_err("IPGIFG initialization parameter too large\n");
return ret_val;
}
/* Set HAFDUP */
/* For more details see the hardware spec. */
ret_val = init_half_duplex_params(ug_info->altBeb,
ug_info->backPressureNoBackoff,
ug_info->noBackoff,
ug_info->excessDefer,
ug_info->altBebTruncation,
ug_info->maxRetransmission,
ug_info->collisionWindow,
&ug_regs->hafdup);
if (ret_val != 0) {
if (netif_msg_ifup(ugeth))
pr_err("Half Duplex initialization parameter too large\n");
return ret_val;
}
/* Set IFSTAT */
/* For more details see the hardware spec. */
/* Read only - resets upon read */
ifstat = in_be32(&ug_regs->ifstat);
/* Clear UEMPR */
/* For more details see the hardware spec. */
out_be32(&ug_regs->uempr, 0);
/* Set UESCR */
/* For more details see the hardware spec. */
init_hw_statistics_gathering_mode((ug_info->statisticsMode &
UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
0, &uf_regs->upsmr, &ug_regs->uescr);
ret_val = ucc_geth_alloc_tx(ugeth);
if (ret_val != 0)
return ret_val;
ret_val = ucc_geth_alloc_rx(ugeth);
if (ret_val != 0)
return ret_val;
/*
* Global PRAM
*/
/* Tx global PRAM */
/* Allocate global tx parameter RAM page */
ugeth->tx_glbl_pram_offset =
qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_tx_glbl_pram\n");
return -ENOMEM;
}
ugeth->p_tx_glbl_pram =
(struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
tx_glbl_pram_offset);
/* Zero out p_tx_glbl_pram */
memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
/* Fill global PRAM */
/* TQPTR */
/* Size varies with number of Tx threads */
ugeth->thread_dat_tx_offset =
qe_muram_alloc(numThreadsTxNumerical *
sizeof(struct ucc_geth_thread_data_tx) +
32 * (numThreadsTxNumerical == 1),
UCC_GETH_THREAD_DATA_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_thread_data_tx\n");
return -ENOMEM;
}
ugeth->p_thread_data_tx =
(struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
thread_dat_tx_offset);
out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
/* vtagtable */
for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
ug_info->vtagtable[i]);
/* iphoffset */
for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
ug_info->iphoffset[i]);
/* SQPTR */
/* Size varies with number of Tx queues */
ugeth->send_q_mem_reg_offset =
qe_muram_alloc(ug_info->numQueuesTx *
sizeof(struct ucc_geth_send_queue_qd),
UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_send_q_mem_reg\n");
return -ENOMEM;
}
ugeth->p_send_q_mem_reg =
(struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
send_q_mem_reg_offset);
out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
/* Setup the table */
/* Assume BD rings are already established */
for (i = 0; i < ug_info->numQueuesTx; i++) {
endOfRing =
ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
1) * sizeof(struct qe_bd);
if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
(u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
last_bd_completed_address,
(u32) virt_to_phys(endOfRing));
} else if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_MURAM) {
out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
(u32) immrbar_virt_to_phys(ugeth->
p_tx_bd_ring[i]));
out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
last_bd_completed_address,
(u32) immrbar_virt_to_phys(endOfRing));
}
}
/* schedulerbasepointer */
if (ug_info->numQueuesTx > 1) {
/* scheduler exists only if more than 1 tx queue */
ugeth->scheduler_offset =
qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
UCC_GETH_SCHEDULER_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_scheduler\n");
return -ENOMEM;
}
ugeth->p_scheduler =
(struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
scheduler_offset);
out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
ugeth->scheduler_offset);
/* Zero out p_scheduler */
memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
/* Set values in scheduler */
out_be32(&ugeth->p_scheduler->mblinterval,
ug_info->mblinterval);
out_be16(&ugeth->p_scheduler->nortsrbytetime,
ug_info->nortsrbytetime);
out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
out_8(&ugeth->p_scheduler->strictpriorityq,
ug_info->strictpriorityq);
out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
for (i = 0; i < NUM_TX_QUEUES; i++)
out_8(&ugeth->p_scheduler->weightfactor[i],
ug_info->weightfactor[i]);
/* Set pointers to cpucount registers in scheduler */
ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
}
/* schedulerbasepointer */
/* TxRMON_PTR (statistics) */
if (ug_info->
statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
ugeth->tx_fw_statistics_pram_offset =
qe_muram_alloc(sizeof
(struct ucc_geth_tx_firmware_statistics_pram),
UCC_GETH_TX_STATISTICS_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_tx_fw_statistics_pram\n");
return -ENOMEM;
}
ugeth->p_tx_fw_statistics_pram =
(struct ucc_geth_tx_firmware_statistics_pram __iomem *)
qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
/* Zero out p_tx_fw_statistics_pram */
memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
}
/* temoder */
/* Already has speed set */
if (ug_info->numQueuesTx > 1)
temoder |= TEMODER_SCHEDULER_ENABLE;
if (ug_info->ipCheckSumGenerate)
temoder |= TEMODER_IP_CHECKSUM_GENERATE;
temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
/* Function code register value to be used later */
function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
/* Required for QE */
/* function code register */
out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
/* Rx global PRAM */
/* Allocate global rx parameter RAM page */
ugeth->rx_glbl_pram_offset =
qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_rx_glbl_pram\n");
return -ENOMEM;
}
ugeth->p_rx_glbl_pram =
(struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
rx_glbl_pram_offset);
/* Zero out p_rx_glbl_pram */
memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
/* Fill global PRAM */
/* RQPTR */
/* Size varies with number of Rx threads */
ugeth->thread_dat_rx_offset =
qe_muram_alloc(numThreadsRxNumerical *
sizeof(struct ucc_geth_thread_data_rx),
UCC_GETH_THREAD_DATA_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_thread_data_rx\n");
return -ENOMEM;
}
ugeth->p_thread_data_rx =
(struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
thread_dat_rx_offset);
out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
/* typeorlen */
out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
/* rxrmonbaseptr (statistics) */
if (ug_info->
statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
ugeth->rx_fw_statistics_pram_offset =
qe_muram_alloc(sizeof
(struct ucc_geth_rx_firmware_statistics_pram),
UCC_GETH_RX_STATISTICS_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_rx_fw_statistics_pram\n");
return -ENOMEM;
}
ugeth->p_rx_fw_statistics_pram =
(struct ucc_geth_rx_firmware_statistics_pram __iomem *)
qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
/* Zero out p_rx_fw_statistics_pram */
memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
sizeof(struct ucc_geth_rx_firmware_statistics_pram));
}
/* intCoalescingPtr */
/* Size varies with number of Rx queues */
ugeth->rx_irq_coalescing_tbl_offset =
qe_muram_alloc(ug_info->numQueuesRx *
sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
+ 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_rx_irq_coalescing_tbl\n");
return -ENOMEM;
}
ugeth->p_rx_irq_coalescing_tbl =
(struct ucc_geth_rx_interrupt_coalescing_table __iomem *)
qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
ugeth->rx_irq_coalescing_tbl_offset);
/* Fill interrupt coalescing table */
for (i = 0; i < ug_info->numQueuesRx; i++) {
out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
interruptcoalescingmaxvalue,
ug_info->interruptcoalescingmaxvalue[i]);
out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
interruptcoalescingcounter,
ug_info->interruptcoalescingmaxvalue[i]);
}
/* MRBLR */
init_max_rx_buff_len(uf_info->max_rx_buf_length,
&ugeth->p_rx_glbl_pram->mrblr);
/* MFLR */
out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
/* MINFLR */
init_min_frame_len(ug_info->minFrameLength,
&ugeth->p_rx_glbl_pram->minflr,
&ugeth->p_rx_glbl_pram->mrblr);
/* MAXD1 */
out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
/* MAXD2 */
out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
/* l2qt */
l2qt = 0;
for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
/* l3qt */
for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
l3qt = 0;
for (i = 0; i < 8; i++)
l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
}
/* vlantype */
out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
/* vlantci */
out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
/* ecamptr */
out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
/* RBDQPTR */
/* Size varies with number of Rx queues */
ugeth->rx_bd_qs_tbl_offset =
qe_muram_alloc(ug_info->numQueuesRx *
(sizeof(struct ucc_geth_rx_bd_queues_entry) +
sizeof(struct ucc_geth_rx_prefetched_bds)),
UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_rx_bd_qs_tbl\n");
return -ENOMEM;
}
ugeth->p_rx_bd_qs_tbl =
(struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
rx_bd_qs_tbl_offset);
out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
/* Zero out p_rx_bd_qs_tbl */
memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
0,
ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
sizeof(struct ucc_geth_rx_prefetched_bds)));
/* Setup the table */
/* Assume BD rings are already established */
for (i = 0; i < ug_info->numQueuesRx; i++) {
if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
(u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
} else if (ugeth->ug_info->uf_info.bd_mem_part ==
MEM_PART_MURAM) {
out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
(u32) immrbar_virt_to_phys(ugeth->
p_rx_bd_ring[i]));
}
/* rest of fields handled by QE */
}
/* remoder */
/* Already has speed set */
if (ugeth->rx_extended_features)
remoder |= REMODER_RX_EXTENDED_FEATURES;
if (ug_info->rxExtendedFiltering)
remoder |= REMODER_RX_EXTENDED_FILTERING;
if (ug_info->dynamicMaxFrameLength)
remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
if (ug_info->dynamicMinFrameLength)
remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
remoder |=
ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
remoder |=
ug_info->
vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
if (ug_info->ipCheckSumCheck)
remoder |= REMODER_IP_CHECKSUM_CHECK;
if (ug_info->ipAddressAlignment)
remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
/* Note that this function must be called */
/* ONLY AFTER p_tx_fw_statistics_pram */
/* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
init_firmware_statistics_gathering_mode((ug_info->
statisticsMode &
UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
(ug_info->statisticsMode &
UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
&ugeth->p_tx_glbl_pram->txrmonbaseptr,
ugeth->tx_fw_statistics_pram_offset,
&ugeth->p_rx_glbl_pram->rxrmonbaseptr,
ugeth->rx_fw_statistics_pram_offset,
&ugeth->p_tx_glbl_pram->temoder,
&ugeth->p_rx_glbl_pram->remoder);
/* function code register */
out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
/* initialize extended filtering */
if (ug_info->rxExtendedFiltering) {
if (!ug_info->extendedFilteringChainPointer) {
if (netif_msg_ifup(ugeth))
pr_err("Null Extended Filtering Chain Pointer\n");
return -EINVAL;
}
/* Allocate memory for extended filtering Mode Global
Parameters */
ugeth->exf_glbl_param_offset =
qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_exf_glbl_param\n");
return -ENOMEM;
}
ugeth->p_exf_glbl_param =
(struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
exf_glbl_param_offset);
out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
ugeth->exf_glbl_param_offset);
out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
(u32) ug_info->extendedFilteringChainPointer);
} else { /* initialize 82xx style address filtering */
/* Init individual address recognition registers to disabled */
for (j = 0; j < NUM_OF_PADDRS; j++)
ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
p_82xx_addr_filt =
(struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
p_rx_glbl_pram->addressfiltering;
ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
ENET_ADDR_TYPE_GROUP);
ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
ENET_ADDR_TYPE_INDIVIDUAL);
}
/*
* Initialize UCC at QE level
*/
command = QE_INIT_TX_RX;
/* Allocate shadow InitEnet command parameter structure.
* This is needed because after the InitEnet command is executed,
* the structure in DPRAM is released, because DPRAM is a premium
* resource.
* This shadow structure keeps a copy of what was done so that the
* allocated resources can be released when the channel is freed.
*/
if (!(ugeth->p_init_enet_param_shadow =
kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for p_UccInitEnetParamShadows\n");
return -ENOMEM;
}
/* Zero out *p_init_enet_param_shadow */
memset((char *)ugeth->p_init_enet_param_shadow,
0, sizeof(struct ucc_geth_init_pram));
/* Fill shadow InitEnet command parameter structure */
ugeth->p_init_enet_param_shadow->resinit1 =
ENET_INIT_PARAM_MAGIC_RES_INIT1;
ugeth->p_init_enet_param_shadow->resinit2 =
ENET_INIT_PARAM_MAGIC_RES_INIT2;
ugeth->p_init_enet_param_shadow->resinit3 =
ENET_INIT_PARAM_MAGIC_RES_INIT3;
ugeth->p_init_enet_param_shadow->resinit4 =
ENET_INIT_PARAM_MAGIC_RES_INIT4;
ugeth->p_init_enet_param_shadow->resinit5 =
ENET_INIT_PARAM_MAGIC_RES_INIT5;
ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
ugeth->rx_glbl_pram_offset | ug_info->riscRx;
if ((ug_info->largestexternallookupkeysize !=
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) &&
(ug_info->largestexternallookupkeysize !=
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) &&
(ug_info->largestexternallookupkeysize !=
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
if (netif_msg_ifup(ugeth))
pr_err("Invalid largest External Lookup Key Size\n");
return -EINVAL;
}
ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
ug_info->largestexternallookupkeysize;
size = sizeof(struct ucc_geth_thread_rx_pram);
if (ug_info->rxExtendedFiltering) {
size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
if (ug_info->largestexternallookupkeysize ==
QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
if (ug_info->largestexternallookupkeysize ==
QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
}
if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
p_init_enet_param_shadow->rxthread[0]),
(u8) (numThreadsRxNumerical + 1)
/* Rx needs one extra for terminator */
, size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
ug_info->riscRx, 1)) != 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not fill p_init_enet_param_shadow\n");
return ret_val;
}
ugeth->p_init_enet_param_shadow->txglobal =
ugeth->tx_glbl_pram_offset | ug_info->riscTx;
if ((ret_val =
fill_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
txthread[0]), numThreadsTxNumerical,
sizeof(struct ucc_geth_thread_tx_pram),
UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
ug_info->riscTx, 0)) != 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not fill p_init_enet_param_shadow\n");
return ret_val;
}
/* Load Rx bds with buffers */
for (i = 0; i < ug_info->numQueuesRx; i++) {
if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not fill Rx bds with buffers\n");
return ret_val;
}
}
/* Allocate InitEnet command parameter structure */
init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
if (IS_ERR_VALUE(init_enet_pram_offset)) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_init_enet_pram\n");
return -ENOMEM;
}
p_init_enet_pram =
(struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset);
/* Copy shadow InitEnet command parameter structure into PRAM */
out_8(&p_init_enet_pram->resinit1,
ugeth->p_init_enet_param_shadow->resinit1);
out_8(&p_init_enet_pram->resinit2,
ugeth->p_init_enet_param_shadow->resinit2);
out_8(&p_init_enet_pram->resinit3,
ugeth->p_init_enet_param_shadow->resinit3);
out_8(&p_init_enet_pram->resinit4,
ugeth->p_init_enet_param_shadow->resinit4);
out_be16(&p_init_enet_pram->resinit5,
ugeth->p_init_enet_param_shadow->resinit5);
out_8(&p_init_enet_pram->largestexternallookupkeysize,
ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
out_be32(&p_init_enet_pram->rgftgfrxglobal,
ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
out_be32(&p_init_enet_pram->rxthread[i],
ugeth->p_init_enet_param_shadow->rxthread[i]);
out_be32(&p_init_enet_pram->txglobal,
ugeth->p_init_enet_param_shadow->txglobal);
for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
out_be32(&p_init_enet_pram->txthread[i],
ugeth->p_init_enet_param_shadow->txthread[i]);
/* Issue QE command */
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
init_enet_pram_offset);
/* Free InitEnet command parameter */
qe_muram_free(init_enet_pram_offset);
return 0;
}
/* This is called by the kernel when a frame is ready for transmission. */
/* It is pointed to by the dev->hard_start_xmit function pointer */
static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
#ifdef CONFIG_UGETH_TX_ON_DEMAND
struct ucc_fast_private *uccf;
#endif
u8 __iomem *bd; /* BD pointer */
u32 bd_status;
u8 txQ = 0;
unsigned long flags;
ugeth_vdbg("%s: IN", __func__);
spin_lock_irqsave(&ugeth->lock, flags);
dev->stats.tx_bytes += skb->len;
/* Start from the next BD that should be filled */
bd = ugeth->txBd[txQ];
bd_status = in_be32((u32 __iomem *)bd);
/* Save the skb pointer so we can free it later */
ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
/* Update the current skb pointer (wrapping if this was the last) */
ugeth->skb_curtx[txQ] =
(ugeth->skb_curtx[txQ] +
1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
/* set up the buffer descriptor */
out_be32(&((struct qe_bd __iomem *)bd)->buf,
dma_map_single(ugeth->dev, skb->data,
skb->len, DMA_TO_DEVICE));
/* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
/* set bd status and length */
out_be32((u32 __iomem *)bd, bd_status);
/* Move to next BD in the ring */
if (!(bd_status & T_W))
bd += sizeof(struct qe_bd);
else
bd = ugeth->p_tx_bd_ring[txQ];
/* If the next BD still needs to be cleaned up, then the bds
are full. We need to tell the kernel to stop sending us stuff. */
if (bd == ugeth->confBd[txQ]) {
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
}
ugeth->txBd[txQ] = bd;
skb_tx_timestamp(skb);
if (ugeth->p_scheduler) {
ugeth->cpucount[txQ]++;
/* Indicate to QE that there are more Tx bds ready for
transmission */
/* This is done by writing a running counter of the bd
count to the scheduler PRAM. */
out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
}
#ifdef CONFIG_UGETH_TX_ON_DEMAND
uccf = ugeth->uccf;
out_be16(uccf->p_utodr, UCC_FAST_TOD);
#endif
spin_unlock_irqrestore(&ugeth->lock, flags);
return NETDEV_TX_OK;
}
static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
{
struct sk_buff *skb;
u8 __iomem *bd;
u16 length, howmany = 0;
u32 bd_status;
u8 *bdBuffer;
struct net_device *dev;
ugeth_vdbg("%s: IN", __func__);
dev = ugeth->ndev;
/* collect received buffers */
bd = ugeth->rxBd[rxQ];
bd_status = in_be32((u32 __iomem *)bd);
/* while there are received buffers and BD is full (~R_E) */
while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf);
length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
/* determine whether buffer is first, last, first and last
(single buffer frame) or middle (not first and not last) */
if (!skb ||
(!(bd_status & (R_F | R_L))) ||
(bd_status & R_ERRORS_FATAL)) {
if (netif_msg_rx_err(ugeth))
pr_err("%d: ERROR!!! skb - 0x%08x\n",
__LINE__, (u32)skb);
dev_kfree_skb(skb);
ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
dev->stats.rx_dropped++;
} else {
dev->stats.rx_packets++;
howmany++;
/* Prep the skb for the packet */
skb_put(skb, length);
/* Tell the skb what kind of packet this is */
skb->protocol = eth_type_trans(skb, ugeth->ndev);
dev->stats.rx_bytes += length;
/* Send the packet up the stack */
netif_receive_skb(skb);
}
skb = get_new_skb(ugeth, bd);
if (!skb) {
if (netif_msg_rx_err(ugeth))
pr_warn("No Rx Data Buffer\n");
dev->stats.rx_dropped++;
break;
}
ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
/* update to point at the next skb */
ugeth->skb_currx[rxQ] =
(ugeth->skb_currx[rxQ] +
1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
if (bd_status & R_W)
bd = ugeth->p_rx_bd_ring[rxQ];
else
bd += sizeof(struct qe_bd);
bd_status = in_be32((u32 __iomem *)bd);
}
ugeth->rxBd[rxQ] = bd;
return howmany;
}
static int ucc_geth_tx(struct net_device *dev, u8 txQ)
{
/* Start from the next BD that should be filled */
struct ucc_geth_private *ugeth = netdev_priv(dev);
u8 __iomem *bd; /* BD pointer */
u32 bd_status;
bd = ugeth->confBd[txQ];
bd_status = in_be32((u32 __iomem *)bd);
/* Normal processing. */
while ((bd_status & T_R) == 0) {
struct sk_buff *skb;
/* BD contains already transmitted buffer. */
/* Handle the transmitted buffer and release */
/* the BD to be used with the current frame */
skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
if (!skb)
break;
dev->stats.tx_packets++;
dev_kfree_skb(skb);
ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
ugeth->skb_dirtytx[txQ] =
(ugeth->skb_dirtytx[txQ] +
1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
/* We freed a buffer, so now we can restart transmission */
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
/* Advance the confirmation BD pointer */
if (!(bd_status & T_W))
bd += sizeof(struct qe_bd);
else
bd = ugeth->p_tx_bd_ring[txQ];
bd_status = in_be32((u32 __iomem *)bd);
}
ugeth->confBd[txQ] = bd;
return 0;
}
static int ucc_geth_poll(struct napi_struct *napi, int budget)
{
struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
struct ucc_geth_info *ug_info;
int howmany, i;
ug_info = ugeth->ug_info;
/* Tx event processing */
spin_lock(&ugeth->lock);
for (i = 0; i < ug_info->numQueuesTx; i++)
ucc_geth_tx(ugeth->ndev, i);
spin_unlock(&ugeth->lock);
howmany = 0;
for (i = 0; i < ug_info->numQueuesRx; i++)
howmany += ucc_geth_rx(ugeth, i, budget - howmany);
if (howmany < budget) {
napi_complete(napi);
setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
}
return howmany;
}
static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
{
struct net_device *dev = info;
struct ucc_geth_private *ugeth = netdev_priv(dev);
struct ucc_fast_private *uccf;
struct ucc_geth_info *ug_info;
register u32 ucce;
register u32 uccm;
ugeth_vdbg("%s: IN", __func__);
uccf = ugeth->uccf;
ug_info = ugeth->ug_info;
/* read and clear events */
ucce = (u32) in_be32(uccf->p_ucce);
uccm = (u32) in_be32(uccf->p_uccm);
ucce &= uccm;
out_be32(uccf->p_ucce, ucce);
/* check for receive events that require processing */
if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) {
if (napi_schedule_prep(&ugeth->napi)) {
uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS);
out_be32(uccf->p_uccm, uccm);
__napi_schedule(&ugeth->napi);
}
}
/* Errors and other events */
if (ucce & UCCE_OTHER) {
if (ucce & UCC_GETH_UCCE_BSY)
dev->stats.rx_errors++;
if (ucce & UCC_GETH_UCCE_TXE)
dev->stats.tx_errors++;
}
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
static void ucc_netpoll(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
int irq = ugeth->ug_info->uf_info.irq;
disable_irq(irq);
ucc_geth_irq_handler(irq, dev);
enable_irq(irq);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
static int ucc_geth_set_mac_addr(struct net_device *dev, void *p)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
/*
* If device is not running, we will set mac addr register
* when opening the device.
*/
if (!netif_running(dev))
return 0;
spin_lock_irq(&ugeth->lock);
init_mac_station_addr_regs(dev->dev_addr[0],
dev->dev_addr[1],
dev->dev_addr[2],
dev->dev_addr[3],
dev->dev_addr[4],
dev->dev_addr[5],
&ugeth->ug_regs->macstnaddr1,
&ugeth->ug_regs->macstnaddr2);
spin_unlock_irq(&ugeth->lock);
return 0;
}
static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
{
struct net_device *dev = ugeth->ndev;
int err;
err = ucc_struct_init(ugeth);
if (err) {
netif_err(ugeth, ifup, dev, "Cannot configure internal struct, aborting\n");
goto err;
}
err = ucc_geth_startup(ugeth);
if (err) {
netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
goto err;
}
err = adjust_enet_interface(ugeth);
if (err) {
netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
goto err;
}
/* Set MACSTNADDR1, MACSTNADDR2 */
/* For more details see the hardware spec. */
init_mac_station_addr_regs(dev->dev_addr[0],
dev->dev_addr[1],
dev->dev_addr[2],
dev->dev_addr[3],
dev->dev_addr[4],
dev->dev_addr[5],
&ugeth->ug_regs->macstnaddr1,
&ugeth->ug_regs->macstnaddr2);
err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
if (err) {
netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n");
goto err;
}
return 0;
err:
ucc_geth_stop(ugeth);
return err;
}
/* Called when something needs to use the ethernet device */
/* Returns 0 for success. */
static int ucc_geth_open(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
int err;
ugeth_vdbg("%s: IN", __func__);
/* Test station address */
if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
netif_err(ugeth, ifup, dev,
"Multicast address used for station address - is this what you wanted?\n");
return -EINVAL;
}
err = init_phy(dev);
if (err) {
netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n");
return err;
}
err = ucc_geth_init_mac(ugeth);
if (err) {
netif_err(ugeth, ifup, dev, "Cannot initialize MAC, aborting\n");
goto err;
}
err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler,
0, "UCC Geth", dev);
if (err) {
netif_err(ugeth, ifup, dev, "Cannot get IRQ for net device, aborting\n");
goto err;
}
phy_start(ugeth->phydev);
napi_enable(&ugeth->napi);
netif_start_queue(dev);
device_set_wakeup_capable(&dev->dev,
qe_alive_during_sleep() || ugeth->phydev->irq);
device_set_wakeup_enable(&dev->dev, ugeth->wol_en);
return err;
err:
ucc_geth_stop(ugeth);
return err;
}
/* Stops the kernel queue, and halts the controller */
static int ucc_geth_close(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
ugeth_vdbg("%s: IN", __func__);
napi_disable(&ugeth->napi);
cancel_work_sync(&ugeth->timeout_work);
ucc_geth_stop(ugeth);
phy_disconnect(ugeth->phydev);
ugeth->phydev = NULL;
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
netif_stop_queue(dev);
return 0;
}
/* Reopen device. This will reset the MAC and PHY. */
static void ucc_geth_timeout_work(struct work_struct *work)
{
struct ucc_geth_private *ugeth;
struct net_device *dev;
ugeth = container_of(work, struct ucc_geth_private, timeout_work);
dev = ugeth->ndev;
ugeth_vdbg("%s: IN", __func__);
dev->stats.tx_errors++;
ugeth_dump_regs(ugeth);
if (dev->flags & IFF_UP) {
/*
* Must reset MAC *and* PHY. This is done by reopening
* the device.
*/
netif_tx_stop_all_queues(dev);
ucc_geth_stop(ugeth);
ucc_geth_init_mac(ugeth);
/* Must start PHY here */
phy_start(ugeth->phydev);
netif_tx_start_all_queues(dev);
}
netif_tx_schedule_all(dev);
}
/*
* ucc_geth_timeout gets called when a packet has not been
* transmitted after a set amount of time.
*/
static void ucc_geth_timeout(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
schedule_work(&ugeth->timeout_work);
}
#ifdef CONFIG_PM
static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
{
struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
struct ucc_geth_private *ugeth = netdev_priv(ndev);
if (!netif_running(ndev))
return 0;
netif_device_detach(ndev);
napi_disable(&ugeth->napi);
/*
* Disable the controller, otherwise we'll wakeup on any network
* activity.
*/
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
if (ugeth->wol_en & WAKE_MAGIC) {
setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX);
} else if (!(ugeth->wol_en & WAKE_PHY)) {
phy_stop(ugeth->phydev);
}
return 0;
}
static int ucc_geth_resume(struct platform_device *ofdev)
{
struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
struct ucc_geth_private *ugeth = netdev_priv(ndev);
int err;
if (!netif_running(ndev))
return 0;
if (qe_alive_during_sleep()) {
if (ugeth->wol_en & WAKE_MAGIC) {
ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX);
clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
}
ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
} else {
/*
* Full reinitialization is required if QE shuts down
* during sleep.
*/
ucc_geth_memclean(ugeth);
err = ucc_geth_init_mac(ugeth);
if (err) {
netdev_err(ndev, "Cannot initialize MAC, aborting\n");
return err;
}
}
ugeth->oldlink = 0;
ugeth->oldspeed = 0;
ugeth->oldduplex = -1;
phy_stop(ugeth->phydev);
phy_start(ugeth->phydev);
napi_enable(&ugeth->napi);
netif_device_attach(ndev);
return 0;
}
#else
#define ucc_geth_suspend NULL
#define ucc_geth_resume NULL
#endif
static phy_interface_t to_phy_interface(const char *phy_connection_type)
{
if (strcasecmp(phy_connection_type, "mii") == 0)
return PHY_INTERFACE_MODE_MII;
if (strcasecmp(phy_connection_type, "gmii") == 0)
return PHY_INTERFACE_MODE_GMII;
if (strcasecmp(phy_connection_type, "tbi") == 0)
return PHY_INTERFACE_MODE_TBI;
if (strcasecmp(phy_connection_type, "rmii") == 0)
return PHY_INTERFACE_MODE_RMII;
if (strcasecmp(phy_connection_type, "rgmii") == 0)
return PHY_INTERFACE_MODE_RGMII;
if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
return PHY_INTERFACE_MODE_RGMII_ID;
if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
return PHY_INTERFACE_MODE_RGMII_TXID;
if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
return PHY_INTERFACE_MODE_RGMII_RXID;
if (strcasecmp(phy_connection_type, "rtbi") == 0)
return PHY_INTERFACE_MODE_RTBI;
if (strcasecmp(phy_connection_type, "sgmii") == 0)
return PHY_INTERFACE_MODE_SGMII;
return PHY_INTERFACE_MODE_MII;
}
static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
if (!ugeth->phydev)
return -ENODEV;
return phy_mii_ioctl(ugeth->phydev, rq, cmd);
}
static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_open = ucc_geth_open,
.ndo_stop = ucc_geth_close,
.ndo_start_xmit = ucc_geth_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ucc_geth_set_mac_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_rx_mode = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
.ndo_do_ioctl = ucc_geth_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ucc_netpoll,
#endif
};
static int ucc_geth_probe(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct net_device *dev = NULL;
struct ucc_geth_private *ugeth = NULL;
struct ucc_geth_info *ug_info;
struct resource res;
int err, ucc_num, max_speed = 0;
const unsigned int *prop;
const char *sprop;
const void *mac_addr;
phy_interface_t phy_interface;
static const int enet_to_speed[] = {
SPEED_10, SPEED_10, SPEED_10,
SPEED_100, SPEED_100, SPEED_100,
SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
};
static const phy_interface_t enet_to_phy_interface[] = {
PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
PHY_INTERFACE_MODE_SGMII,
};
ugeth_vdbg("%s: IN", __func__);
prop = of_get_property(np, "cell-index", NULL);
if (!prop) {
prop = of_get_property(np, "device-id", NULL);
if (!prop)
return -ENODEV;
}
ucc_num = *prop - 1;
if ((ucc_num < 0) || (ucc_num > 7))
return -ENODEV;
ug_info = &ugeth_info[ucc_num];
if (ug_info == NULL) {
if (netif_msg_probe(&debug))
pr_err("[%d] Missing additional data!\n", ucc_num);
return -ENODEV;
}
ug_info->uf_info.ucc_num = ucc_num;
sprop = of_get_property(np, "rx-clock-name", NULL);
if (sprop) {
ug_info->uf_info.rx_clock = qe_clock_source(sprop);
if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
(ug_info->uf_info.rx_clock > QE_CLK24)) {
pr_err("invalid rx-clock-name property\n");
return -EINVAL;
}
} else {
prop = of_get_property(np, "rx-clock", NULL);
if (!prop) {
/* If both rx-clock-name and rx-clock are missing,
we want to tell people to use rx-clock-name. */
pr_err("missing rx-clock-name property\n");
return -EINVAL;
}
if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
pr_err("invalid rx-clock propperty\n");
return -EINVAL;
}
ug_info->uf_info.rx_clock = *prop;
}
sprop = of_get_property(np, "tx-clock-name", NULL);
if (sprop) {
ug_info->uf_info.tx_clock = qe_clock_source(sprop);
if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
(ug_info->uf_info.tx_clock > QE_CLK24)) {
pr_err("invalid tx-clock-name property\n");
return -EINVAL;
}
} else {
prop = of_get_property(np, "tx-clock", NULL);
if (!prop) {
pr_err("missing tx-clock-name property\n");
return -EINVAL;
}
if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
pr_err("invalid tx-clock property\n");
return -EINVAL;
}
ug_info->uf_info.tx_clock = *prop;
}
err = of_address_to_resource(np, 0, &res);
if (err)
return -EINVAL;
ug_info->uf_info.regs = res.start;
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* Find the TBI PHY node. If it's not there, we don't support SGMII */
ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
/* get the phy interface type, or default to MII */
prop = of_get_property(np, "phy-connection-type", NULL);
if (!prop) {
/* handle interface property present in old trees */
prop = of_get_property(ug_info->phy_node, "interface", NULL);
if (prop != NULL) {
phy_interface = enet_to_phy_interface[*prop];
max_speed = enet_to_speed[*prop];
} else
phy_interface = PHY_INTERFACE_MODE_MII;
} else {
phy_interface = to_phy_interface((const char *)prop);
}
/* get speed, or derive from PHY interface */
if (max_speed == 0)
switch (phy_interface) {
case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_TBI:
case PHY_INTERFACE_MODE_RTBI:
case PHY_INTERFACE_MODE_SGMII:
max_speed = SPEED_1000;
break;
default:
max_speed = SPEED_100;
break;
}
if (max_speed == SPEED_1000) {
unsigned int snums = qe_get_num_of_snums();
/* configure muram FIFOs for gigabit operation */
ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT;
ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
/* If QE's snum number is 46/76 which means we need to support
* 4 UECs at 1000Base-T simultaneously, we need to allocate
* more Threads to Rx.
*/
if ((snums == 76) || (snums == 46))
ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6;
else
ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
}
if (netif_msg_probe(&debug))
pr_info("UCC%1d at 0x%8x (irq = %d)\n",
ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
ug_info->uf_info.irq);
/* Create an ethernet device instance */
dev = alloc_etherdev(sizeof(*ugeth));
if (dev == NULL)
return -ENOMEM;
ugeth = netdev_priv(dev);
spin_lock_init(&ugeth->lock);
/* Create CQs for hash tables */
INIT_LIST_HEAD(&ugeth->group_hash_q);
INIT_LIST_HEAD(&ugeth->ind_hash_q);
dev_set_drvdata(device, dev);
/* Set the dev->base_addr to the gfar reg region */
dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
SET_NETDEV_DEV(dev, device);
/* Fill in the dev structure */
uec_set_ethtool_ops(dev);
dev->netdev_ops = &ucc_geth_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
dev->mtu = 1500;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
ugeth->phy_interface = phy_interface;
ugeth->max_speed = max_speed;
err = register_netdev(dev);
if (err) {
if (netif_msg_probe(ugeth))
pr_err("%s: Cannot register net device, aborting\n",
dev->name);
free_netdev(dev);
return err;
}
mac_addr = of_get_mac_address(np);
if (mac_addr)
memcpy(dev->dev_addr, mac_addr, 6);
ugeth->ug_info = ug_info;
ugeth->dev = device;
ugeth->ndev = dev;
ugeth->node = np;
return 0;
}
static int ucc_geth_remove(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
struct net_device *dev = dev_get_drvdata(device);
struct ucc_geth_private *ugeth = netdev_priv(dev);
unregister_netdev(dev);
free_netdev(dev);
ucc_geth_memclean(ugeth);
dev_set_drvdata(device, NULL);
return 0;
}
static struct of_device_id ucc_geth_match[] = {
{
.type = "network",
.compatible = "ucc_geth",
},
{},
};
MODULE_DEVICE_TABLE(of, ucc_geth_match);
static struct platform_driver ucc_geth_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = ucc_geth_match,
},
.probe = ucc_geth_probe,
.remove = ucc_geth_remove,
.suspend = ucc_geth_suspend,
.resume = ucc_geth_resume,
};
static int __init ucc_geth_init(void)
{
int i, ret;
if (netif_msg_drv(&debug))
pr_info(DRV_DESC "\n");
for (i = 0; i < 8; i++)
memcpy(&(ugeth_info[i]), &ugeth_primary_info,
sizeof(ugeth_primary_info));
ret = platform_driver_register(&ucc_geth_driver);
return ret;
}
static void __exit ucc_geth_exit(void)
{
platform_driver_unregister(&ucc_geth_driver);
}
module_init(ucc_geth_init);
module_exit(ucc_geth_exit);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION(DRV_DESC);
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.