repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
embecosm/epiphany-gcc | gcc/testsuite/gcc.dg/autopar/reduc-1short.c | 114 | 1416 | /* { dg-do compile } */
/* { dg-options "-O2 -ftree-parallelize-loops=4 -fdump-tree-parloops-details -fdump-tree-optimized" } */
#include <stdarg.h>
#include <stdlib.h>
#define N 1600
#define DIFF 242
unsigned short ub[N] = {1,3,6,9,12,15,18,21,24,27,30,33,36,39,42,45};
unsigned short uc[N] = {1,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
__attribute__ ((noinline)) void
main1 (unsigned short x, unsigned short max_result, unsigned short min_result)
{
int i;
unsigned short udiff = 2;
unsigned short umax = x;
unsigned short umin = x;
for (i = 0; i < N; i++) {
udiff += (unsigned short)(ub[i] - uc[i]);
}
for (i = 0; i < N; i++) {
umax = umax < uc[i] ? uc[i] : umax;
}
for (i = 0; i < N; i++) {
umin = umin > uc[i] ? uc[i] : umin;
}
/* check results: */
if (udiff != DIFF)
abort ();
if (umax != max_result)
abort ();
if (umin != min_result)
abort ();
}
__attribute__((noinline))
void init_arrays ()
{
int i;
for (i=16; i<N; i++)
{
ub[i] = 1;
uc[i] = 1;
}
}
int main (void)
{
init_arrays();
main1 (100, 100, 1);
main1 (0, 15, 0);
return 0;
}
/* { dg-final { scan-tree-dump-times "Detected reduction" 3 "parloops" } } */
/* { dg-final { scan-tree-dump-times "SUCCESS: may be parallelized" 4 "parloops" } } */
/* { dg-final { cleanup-tree-dump "parloops" } } */
/* { dg-final { cleanup-tree-dump "optimized" } } */
| gpl-2.0 |
vSlipenchuk/linux-aufs | drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c | 626 | 6172 | /******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
******************************************************************************/
#include "odm_precomp.h"
static bool CheckCondition(const u32 Condition, const u32 Hex)
{
u32 _board = (Hex & 0x000000FF);
u32 _interface = (Hex & 0x0000FF00) >> 8;
u32 _platform = (Hex & 0x00FF0000) >> 16;
u32 cond = Condition;
if (Condition == 0xCDCDCDCD)
return true;
cond = Condition & 0x000000FF;
if ((_board == cond) && cond != 0x00)
return false;
cond = Condition & 0x0000FF00;
cond = cond >> 8;
if ((_interface & cond) == 0 && cond != 0x07)
return false;
cond = Condition & 0x00FF0000;
cond = cond >> 16;
if ((_platform & cond) == 0 && cond != 0x0F)
return false;
return true;
}
/******************************************************************************
* RadioA_1T.TXT
******************************************************************************/
static u32 Array_RadioA_1T_8723A[] = {
0x000, 0x00030159,
0x001, 0x00031284,
0x002, 0x00098000,
0xFF0F011F, 0xABCD,
0x003, 0x00018C63,
0xCDCDCDCD, 0xCDCD,
0x003, 0x00039C63,
0xFF0F011F, 0xDEAD,
0x004, 0x000210E7,
0x009, 0x0002044F,
0x00A, 0x0001A3F1,
0x00B, 0x00014787,
0x00C, 0x000896FE,
0x00D, 0x0000E02C,
0x00E, 0x00039CE7,
0x00F, 0x00000451,
0x019, 0x00000000,
0x01A, 0x00030355,
0x01B, 0x00060A00,
0x01C, 0x000FC378,
0x01D, 0x000A1250,
0x01E, 0x0000024F,
0x01F, 0x00000000,
0x020, 0x0000B614,
0x021, 0x0006C000,
0x022, 0x00000000,
0x023, 0x00001558,
0x024, 0x00000060,
0x025, 0x00000483,
0x026, 0x0004F000,
0x027, 0x000EC7D9,
0x028, 0x00057730,
0x029, 0x00004783,
0x02A, 0x00000001,
0x02B, 0x00021334,
0x02A, 0x00000000,
0x02B, 0x00000054,
0x02A, 0x00000001,
0x02B, 0x00000808,
0x02B, 0x00053333,
0x02C, 0x0000000C,
0x02A, 0x00000002,
0x02B, 0x00000808,
0x02B, 0x0005B333,
0x02C, 0x0000000D,
0x02A, 0x00000003,
0x02B, 0x00000808,
0x02B, 0x00063333,
0x02C, 0x0000000D,
0x02A, 0x00000004,
0x02B, 0x00000808,
0x02B, 0x0006B333,
0x02C, 0x0000000D,
0x02A, 0x00000005,
0x02B, 0x00000808,
0x02B, 0x00073333,
0x02C, 0x0000000D,
0x02A, 0x00000006,
0x02B, 0x00000709,
0x02B, 0x0005B333,
0x02C, 0x0000000D,
0x02A, 0x00000007,
0x02B, 0x00000709,
0x02B, 0x00063333,
0x02C, 0x0000000D,
0x02A, 0x00000008,
0x02B, 0x0000060A,
0x02B, 0x0004B333,
0x02C, 0x0000000D,
0x02A, 0x00000009,
0x02B, 0x0000060A,
0x02B, 0x00053333,
0x02C, 0x0000000D,
0x02A, 0x0000000A,
0x02B, 0x0000060A,
0x02B, 0x0005B333,
0x02C, 0x0000000D,
0x02A, 0x0000000B,
0x02B, 0x0000060A,
0x02B, 0x00063333,
0x02C, 0x0000000D,
0x02A, 0x0000000C,
0x02B, 0x0000060A,
0x02B, 0x0006B333,
0x02C, 0x0000000D,
0x02A, 0x0000000D,
0x02B, 0x0000060A,
0x02B, 0x00073333,
0x02C, 0x0000000D,
0x02A, 0x0000000E,
0x02B, 0x0000050B,
0x02B, 0x00066666,
0x02C, 0x0000001A,
0x02A, 0x000E0000,
0x010, 0x0004000F,
0x011, 0x000E31FC,
0x010, 0x0006000F,
0x011, 0x000FF9F8,
0x010, 0x0002000F,
0x011, 0x000203F9,
0x010, 0x0003000F,
0x011, 0x000FF500,
0x010, 0x00000000,
0x011, 0x00000000,
0x010, 0x0008000F,
0x011, 0x0003F100,
0x010, 0x0009000F,
0x011, 0x00023100,
0x012, 0x00032000,
0x012, 0x00071000,
0x012, 0x000B0000,
0x012, 0x000FC000,
0x013, 0x000287B3,
0x013, 0x000244B7,
0x013, 0x000204AB,
0x013, 0x0001C49F,
0x013, 0x00018493,
0x013, 0x0001429B,
0x013, 0x00010299,
0x013, 0x0000C29C,
0x013, 0x000081A0,
0x013, 0x000040AC,
0x013, 0x00000020,
0x014, 0x0001944C,
0x014, 0x00059444,
0x014, 0x0009944C,
0x014, 0x000D9444,
0xFF0F011F, 0xABCD,
0x015, 0x0000F424,
0x015, 0x0004F424,
0x015, 0x0008F424,
0x015, 0x000CF424,
0xCDCDCDCD, 0xCDCD,
0x015, 0x0000F474,
0x015, 0x0004F477,
0x015, 0x0008F455,
0x015, 0x000CF455,
0xFF0F011F, 0xDEAD,
0x016, 0x00000339,
0x016, 0x00040339,
0x016, 0x00080339,
0xFF0F011F, 0xABCD,
0x016, 0x000C0356,
0xCDCDCDCD, 0xCDCD,
0x016, 0x000C0366,
0xFF0F011F, 0xDEAD,
0x000, 0x00010159,
0x018, 0x0000F401,
0x0FE, 0x00000000,
0x0FE, 0x00000000,
0x01F, 0x00000003,
0x0FE, 0x00000000,
0x0FE, 0x00000000,
0x01E, 0x00000247,
0x01F, 0x00000000,
0x000, 0x00030159,
};
void ODM_ReadAndConfig_RadioA_1T_8723A(struct dm_odm_t *pDM_Odm)
{
#define READ_NEXT_PAIR(v1, v2, i) \
do { \
i += 2; v1 = Array[i]; v2 = Array[i+1];\
} while (0)
u32 hex = 0;
u32 i = 0;
u8 platform = 0x04;
u8 interfaceValue = pDM_Odm->SupportInterface;
u8 board = pDM_Odm->BoardType;
u32 ArrayLen = sizeof(Array_RadioA_1T_8723A)/sizeof(u32);
u32 *Array = Array_RadioA_1T_8723A;
hex += board;
hex += interfaceValue << 8;
hex += platform << 16;
hex += 0xFF000000;
for (i = 0; i < ArrayLen; i += 2) {
u32 v1 = Array[i];
u32 v2 = Array[i+1];
/* This (offset, data) pair meets the condition. */
if (v1 < 0xCDCDCDCD) {
odm_ConfigRFReg_8723A(pDM_Odm, v1, v2, RF_PATH_A, v1);
continue;
} else {
if (!CheckCondition(Array[i], hex)) {
/* Discard the following (offset, data) pairs. */
READ_NEXT_PAIR(v1, v2, i);
while (v2 != 0xDEAD &&
v2 != 0xCDEF &&
v2 != 0xCDCD && i < ArrayLen - 2)
READ_NEXT_PAIR(v1, v2, i);
i -= 2; /* prevent from for-loop += 2 */
} else {
/* Configure matched pairs and skip to end of if-else. */
READ_NEXT_PAIR(v1, v2, i);
while (v2 != 0xDEAD &&
v2 != 0xCDEF &&
v2 != 0xCDCD && i < ArrayLen - 2) {
odm_ConfigRFReg_8723A(pDM_Odm, v1, v2,
RF_PATH_A, v1);
READ_NEXT_PAIR(v1, v2, i);
}
while (v2 != 0xDEAD && i < ArrayLen - 2)
READ_NEXT_PAIR(v1, v2, i);
}
}
}
}
| gpl-2.0 |
LinTeX9527/linux | drivers/net/wireless/prism54/oid_mgt.c | 1394 | 25229 | /*
* Copyright (C) 2003,2004 Aurelien Alleaume <slts@free.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include "prismcompat.h"
#include "islpci_dev.h"
#include "islpci_mgt.h"
#include "isl_oid.h"
#include "oid_mgt.h"
#include "isl_ioctl.h"
/* to convert between channel and freq */
static const int frequency_list_bg[] = { 2412, 2417, 2422, 2427, 2432,
2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484
};
int
channel_of_freq(int f)
{
int c = 0;
if ((f >= 2412) && (f <= 2484)) {
while ((c < 14) && (f != frequency_list_bg[c]))
c++;
return (c >= 14) ? 0 : ++c;
} else if ((f >= (int) 5000) && (f <= (int) 6000)) {
return ( (f - 5000) / 5 );
} else
return 0;
}
#define OID_STRUCT(name,oid,s,t) [name] = {oid, 0, sizeof(s), t}
#define OID_STRUCT_C(name,oid,s,t) OID_STRUCT(name,oid,s,t | OID_FLAG_CACHED)
#define OID_U32(name,oid) OID_STRUCT(name,oid,u32,OID_TYPE_U32)
#define OID_U32_C(name,oid) OID_STRUCT_C(name,oid,u32,OID_TYPE_U32)
#define OID_STRUCT_MLME(name,oid) OID_STRUCT(name,oid,struct obj_mlme,OID_TYPE_MLME)
#define OID_STRUCT_MLMEEX(name,oid) OID_STRUCT(name,oid,struct obj_mlmeex,OID_TYPE_MLMEEX)
#define OID_UNKNOWN(name,oid) OID_STRUCT(name,oid,0,0)
struct oid_t isl_oid[] = {
OID_STRUCT(GEN_OID_MACADDRESS, 0x00000000, u8[6], OID_TYPE_ADDR),
OID_U32(GEN_OID_LINKSTATE, 0x00000001),
OID_UNKNOWN(GEN_OID_WATCHDOG, 0x00000002),
OID_UNKNOWN(GEN_OID_MIBOP, 0x00000003),
OID_UNKNOWN(GEN_OID_OPTIONS, 0x00000004),
OID_UNKNOWN(GEN_OID_LEDCONFIG, 0x00000005),
/* 802.11 */
OID_U32_C(DOT11_OID_BSSTYPE, 0x10000000),
OID_STRUCT_C(DOT11_OID_BSSID, 0x10000001, u8[6], OID_TYPE_RAW),
OID_STRUCT_C(DOT11_OID_SSID, 0x10000002, struct obj_ssid,
OID_TYPE_SSID),
OID_U32(DOT11_OID_STATE, 0x10000003),
OID_U32(DOT11_OID_AID, 0x10000004),
OID_STRUCT(DOT11_OID_COUNTRYSTRING, 0x10000005, u8[4], OID_TYPE_RAW),
OID_STRUCT_C(DOT11_OID_SSIDOVERRIDE, 0x10000006, struct obj_ssid,
OID_TYPE_SSID),
OID_U32(DOT11_OID_MEDIUMLIMIT, 0x11000000),
OID_U32_C(DOT11_OID_BEACONPERIOD, 0x11000001),
OID_U32(DOT11_OID_DTIMPERIOD, 0x11000002),
OID_U32(DOT11_OID_ATIMWINDOW, 0x11000003),
OID_U32(DOT11_OID_LISTENINTERVAL, 0x11000004),
OID_U32(DOT11_OID_CFPPERIOD, 0x11000005),
OID_U32(DOT11_OID_CFPDURATION, 0x11000006),
OID_U32_C(DOT11_OID_AUTHENABLE, 0x12000000),
OID_U32_C(DOT11_OID_PRIVACYINVOKED, 0x12000001),
OID_U32_C(DOT11_OID_EXUNENCRYPTED, 0x12000002),
OID_U32_C(DOT11_OID_DEFKEYID, 0x12000003),
[DOT11_OID_DEFKEYX] = {0x12000004, 3, sizeof (struct obj_key),
OID_FLAG_CACHED | OID_TYPE_KEY}, /* DOT11_OID_DEFKEY1,...DOT11_OID_DEFKEY4 */
OID_UNKNOWN(DOT11_OID_STAKEY, 0x12000008),
OID_U32(DOT11_OID_REKEYTHRESHOLD, 0x12000009),
OID_UNKNOWN(DOT11_OID_STASC, 0x1200000a),
OID_U32(DOT11_OID_PRIVTXREJECTED, 0x1a000000),
OID_U32(DOT11_OID_PRIVRXPLAIN, 0x1a000001),
OID_U32(DOT11_OID_PRIVRXFAILED, 0x1a000002),
OID_U32(DOT11_OID_PRIVRXNOKEY, 0x1a000003),
OID_U32_C(DOT11_OID_RTSTHRESH, 0x13000000),
OID_U32_C(DOT11_OID_FRAGTHRESH, 0x13000001),
OID_U32_C(DOT11_OID_SHORTRETRIES, 0x13000002),
OID_U32_C(DOT11_OID_LONGRETRIES, 0x13000003),
OID_U32_C(DOT11_OID_MAXTXLIFETIME, 0x13000004),
OID_U32(DOT11_OID_MAXRXLIFETIME, 0x13000005),
OID_U32(DOT11_OID_AUTHRESPTIMEOUT, 0x13000006),
OID_U32(DOT11_OID_ASSOCRESPTIMEOUT, 0x13000007),
OID_UNKNOWN(DOT11_OID_ALOFT_TABLE, 0x1d000000),
OID_UNKNOWN(DOT11_OID_ALOFT_CTRL_TABLE, 0x1d000001),
OID_UNKNOWN(DOT11_OID_ALOFT_RETREAT, 0x1d000002),
OID_UNKNOWN(DOT11_OID_ALOFT_PROGRESS, 0x1d000003),
OID_U32(DOT11_OID_ALOFT_FIXEDRATE, 0x1d000004),
OID_UNKNOWN(DOT11_OID_ALOFT_RSSIGRAPH, 0x1d000005),
OID_UNKNOWN(DOT11_OID_ALOFT_CONFIG, 0x1d000006),
[DOT11_OID_VDCFX] = {0x1b000000, 7, 0, 0},
OID_U32(DOT11_OID_MAXFRAMEBURST, 0x1b000008),
OID_U32(DOT11_OID_PSM, 0x14000000),
OID_U32(DOT11_OID_CAMTIMEOUT, 0x14000001),
OID_U32(DOT11_OID_RECEIVEDTIMS, 0x14000002),
OID_U32(DOT11_OID_ROAMPREFERENCE, 0x14000003),
OID_U32(DOT11_OID_BRIDGELOCAL, 0x15000000),
OID_U32(DOT11_OID_CLIENTS, 0x15000001),
OID_U32(DOT11_OID_CLIENTSASSOCIATED, 0x15000002),
[DOT11_OID_CLIENTX] = {0x15000003, 2006, 0, 0}, /* DOT11_OID_CLIENTX,...DOT11_OID_CLIENT2007 */
OID_STRUCT(DOT11_OID_CLIENTFIND, 0x150007DB, u8[6], OID_TYPE_ADDR),
OID_STRUCT(DOT11_OID_WDSLINKADD, 0x150007DC, u8[6], OID_TYPE_ADDR),
OID_STRUCT(DOT11_OID_WDSLINKREMOVE, 0x150007DD, u8[6], OID_TYPE_ADDR),
OID_STRUCT(DOT11_OID_EAPAUTHSTA, 0x150007DE, u8[6], OID_TYPE_ADDR),
OID_STRUCT(DOT11_OID_EAPUNAUTHSTA, 0x150007DF, u8[6], OID_TYPE_ADDR),
OID_U32_C(DOT11_OID_DOT1XENABLE, 0x150007E0),
OID_UNKNOWN(DOT11_OID_MICFAILURE, 0x150007E1),
OID_UNKNOWN(DOT11_OID_REKEYINDICATE, 0x150007E2),
OID_U32(DOT11_OID_MPDUTXSUCCESSFUL, 0x16000000),
OID_U32(DOT11_OID_MPDUTXONERETRY, 0x16000001),
OID_U32(DOT11_OID_MPDUTXMULTIPLERETRIES, 0x16000002),
OID_U32(DOT11_OID_MPDUTXFAILED, 0x16000003),
OID_U32(DOT11_OID_MPDURXSUCCESSFUL, 0x16000004),
OID_U32(DOT11_OID_MPDURXDUPS, 0x16000005),
OID_U32(DOT11_OID_RTSSUCCESSFUL, 0x16000006),
OID_U32(DOT11_OID_RTSFAILED, 0x16000007),
OID_U32(DOT11_OID_ACKFAILED, 0x16000008),
OID_U32(DOT11_OID_FRAMERECEIVES, 0x16000009),
OID_U32(DOT11_OID_FRAMEERRORS, 0x1600000A),
OID_U32(DOT11_OID_FRAMEABORTS, 0x1600000B),
OID_U32(DOT11_OID_FRAMEABORTSPHY, 0x1600000C),
OID_U32(DOT11_OID_SLOTTIME, 0x17000000),
OID_U32(DOT11_OID_CWMIN, 0x17000001),
OID_U32(DOT11_OID_CWMAX, 0x17000002),
OID_U32(DOT11_OID_ACKWINDOW, 0x17000003),
OID_U32(DOT11_OID_ANTENNARX, 0x17000004),
OID_U32(DOT11_OID_ANTENNATX, 0x17000005),
OID_U32(DOT11_OID_ANTENNADIVERSITY, 0x17000006),
OID_U32_C(DOT11_OID_CHANNEL, 0x17000007),
OID_U32_C(DOT11_OID_EDTHRESHOLD, 0x17000008),
OID_U32(DOT11_OID_PREAMBLESETTINGS, 0x17000009),
OID_STRUCT(DOT11_OID_RATES, 0x1700000A, u8[IWMAX_BITRATES + 1],
OID_TYPE_RAW),
OID_U32(DOT11_OID_CCAMODESUPPORTED, 0x1700000B),
OID_U32(DOT11_OID_CCAMODE, 0x1700000C),
OID_UNKNOWN(DOT11_OID_RSSIVECTOR, 0x1700000D),
OID_UNKNOWN(DOT11_OID_OUTPUTPOWERTABLE, 0x1700000E),
OID_U32(DOT11_OID_OUTPUTPOWER, 0x1700000F),
OID_STRUCT(DOT11_OID_SUPPORTEDRATES, 0x17000010,
u8[IWMAX_BITRATES + 1], OID_TYPE_RAW),
OID_U32_C(DOT11_OID_FREQUENCY, 0x17000011),
[DOT11_OID_SUPPORTEDFREQUENCIES] =
{0x17000012, 0, sizeof (struct obj_frequencies)
+ sizeof (u16) * IWMAX_FREQ, OID_TYPE_FREQUENCIES},
OID_U32(DOT11_OID_NOISEFLOOR, 0x17000013),
OID_STRUCT(DOT11_OID_FREQUENCYACTIVITY, 0x17000014, u8[IWMAX_FREQ + 1],
OID_TYPE_RAW),
OID_UNKNOWN(DOT11_OID_IQCALIBRATIONTABLE, 0x17000015),
OID_U32(DOT11_OID_NONERPPROTECTION, 0x17000016),
OID_U32(DOT11_OID_SLOTSETTINGS, 0x17000017),
OID_U32(DOT11_OID_NONERPTIMEOUT, 0x17000018),
OID_U32(DOT11_OID_PROFILES, 0x17000019),
OID_STRUCT(DOT11_OID_EXTENDEDRATES, 0x17000020,
u8[IWMAX_BITRATES + 1], OID_TYPE_RAW),
OID_STRUCT_MLME(DOT11_OID_DEAUTHENTICATE, 0x18000000),
OID_STRUCT_MLME(DOT11_OID_AUTHENTICATE, 0x18000001),
OID_STRUCT_MLME(DOT11_OID_DISASSOCIATE, 0x18000002),
OID_STRUCT_MLME(DOT11_OID_ASSOCIATE, 0x18000003),
OID_UNKNOWN(DOT11_OID_SCAN, 0x18000004),
OID_STRUCT_MLMEEX(DOT11_OID_BEACON, 0x18000005),
OID_STRUCT_MLMEEX(DOT11_OID_PROBE, 0x18000006),
OID_STRUCT_MLMEEX(DOT11_OID_DEAUTHENTICATEEX, 0x18000007),
OID_STRUCT_MLMEEX(DOT11_OID_AUTHENTICATEEX, 0x18000008),
OID_STRUCT_MLMEEX(DOT11_OID_DISASSOCIATEEX, 0x18000009),
OID_STRUCT_MLMEEX(DOT11_OID_ASSOCIATEEX, 0x1800000A),
OID_STRUCT_MLMEEX(DOT11_OID_REASSOCIATE, 0x1800000B),
OID_STRUCT_MLMEEX(DOT11_OID_REASSOCIATEEX, 0x1800000C),
OID_U32(DOT11_OID_NONERPSTATUS, 0x1E000000),
OID_U32(DOT11_OID_STATIMEOUT, 0x19000000),
OID_U32_C(DOT11_OID_MLMEAUTOLEVEL, 0x19000001),
OID_U32(DOT11_OID_BSSTIMEOUT, 0x19000002),
[DOT11_OID_ATTACHMENT] = {0x19000003, 0,
sizeof(struct obj_attachment), OID_TYPE_ATTACH},
OID_STRUCT_C(DOT11_OID_PSMBUFFER, 0x19000004, struct obj_buffer,
OID_TYPE_BUFFER),
OID_U32(DOT11_OID_BSSS, 0x1C000000),
[DOT11_OID_BSSX] = {0x1C000001, 63, sizeof (struct obj_bss),
OID_TYPE_BSS}, /*DOT11_OID_BSS1,...,DOT11_OID_BSS64 */
OID_STRUCT(DOT11_OID_BSSFIND, 0x1C000042, struct obj_bss, OID_TYPE_BSS),
[DOT11_OID_BSSLIST] = {0x1C000043, 0, sizeof (struct
obj_bsslist) +
sizeof (struct obj_bss[IWMAX_BSS]),
OID_TYPE_BSSLIST},
OID_UNKNOWN(OID_INL_TUNNEL, 0xFF020000),
OID_UNKNOWN(OID_INL_MEMADDR, 0xFF020001),
OID_UNKNOWN(OID_INL_MEMORY, 0xFF020002),
OID_U32_C(OID_INL_MODE, 0xFF020003),
OID_UNKNOWN(OID_INL_COMPONENT_NR, 0xFF020004),
OID_STRUCT(OID_INL_VERSION, 0xFF020005, u8[8], OID_TYPE_RAW),
OID_UNKNOWN(OID_INL_INTERFACE_ID, 0xFF020006),
OID_UNKNOWN(OID_INL_COMPONENT_ID, 0xFF020007),
OID_U32_C(OID_INL_CONFIG, 0xFF020008),
OID_U32_C(OID_INL_DOT11D_CONFORMANCE, 0xFF02000C),
OID_U32(OID_INL_PHYCAPABILITIES, 0xFF02000D),
OID_U32_C(OID_INL_OUTPUTPOWER, 0xFF02000F),
};
int
mgt_init(islpci_private *priv)
{
int i;
priv->mib = kcalloc(OID_NUM_LAST, sizeof (void *), GFP_KERNEL);
if (!priv->mib)
return -ENOMEM;
/* Alloc the cache */
for (i = 0; i < OID_NUM_LAST; i++) {
if (isl_oid[i].flags & OID_FLAG_CACHED) {
priv->mib[i] = kzalloc(isl_oid[i].size *
(isl_oid[i].range + 1),
GFP_KERNEL);
if (!priv->mib[i])
return -ENOMEM;
} else
priv->mib[i] = NULL;
}
init_rwsem(&priv->mib_sem);
prism54_mib_init(priv);
return 0;
}
void
mgt_clean(islpci_private *priv)
{
int i;
if (!priv->mib)
return;
for (i = 0; i < OID_NUM_LAST; i++) {
kfree(priv->mib[i]);
priv->mib[i] = NULL;
}
kfree(priv->mib);
priv->mib = NULL;
}
void
mgt_le_to_cpu(int type, void *data)
{
switch (type) {
case OID_TYPE_U32:
*(u32 *) data = le32_to_cpu(*(u32 *) data);
break;
case OID_TYPE_BUFFER:{
struct obj_buffer *buff = data;
buff->size = le32_to_cpu(buff->size);
buff->addr = le32_to_cpu(buff->addr);
break;
}
case OID_TYPE_BSS:{
struct obj_bss *bss = data;
bss->age = le16_to_cpu(bss->age);
bss->channel = le16_to_cpu(bss->channel);
bss->capinfo = le16_to_cpu(bss->capinfo);
bss->rates = le16_to_cpu(bss->rates);
bss->basic_rates = le16_to_cpu(bss->basic_rates);
break;
}
case OID_TYPE_BSSLIST:{
struct obj_bsslist *list = data;
int i;
list->nr = le32_to_cpu(list->nr);
for (i = 0; i < list->nr; i++)
mgt_le_to_cpu(OID_TYPE_BSS, &list->bsslist[i]);
break;
}
case OID_TYPE_FREQUENCIES:{
struct obj_frequencies *freq = data;
int i;
freq->nr = le16_to_cpu(freq->nr);
for (i = 0; i < freq->nr; i++)
freq->mhz[i] = le16_to_cpu(freq->mhz[i]);
break;
}
case OID_TYPE_MLME:{
struct obj_mlme *mlme = data;
mlme->id = le16_to_cpu(mlme->id);
mlme->state = le16_to_cpu(mlme->state);
mlme->code = le16_to_cpu(mlme->code);
break;
}
case OID_TYPE_MLMEEX:{
struct obj_mlmeex *mlme = data;
mlme->id = le16_to_cpu(mlme->id);
mlme->state = le16_to_cpu(mlme->state);
mlme->code = le16_to_cpu(mlme->code);
mlme->size = le16_to_cpu(mlme->size);
break;
}
case OID_TYPE_ATTACH:{
struct obj_attachment *attach = data;
attach->id = le16_to_cpu(attach->id);
attach->size = le16_to_cpu(attach->size);
break;
}
case OID_TYPE_SSID:
case OID_TYPE_KEY:
case OID_TYPE_ADDR:
case OID_TYPE_RAW:
break;
default:
BUG();
}
}
static void
mgt_cpu_to_le(int type, void *data)
{
switch (type) {
case OID_TYPE_U32:
*(u32 *) data = cpu_to_le32(*(u32 *) data);
break;
case OID_TYPE_BUFFER:{
struct obj_buffer *buff = data;
buff->size = cpu_to_le32(buff->size);
buff->addr = cpu_to_le32(buff->addr);
break;
}
case OID_TYPE_BSS:{
struct obj_bss *bss = data;
bss->age = cpu_to_le16(bss->age);
bss->channel = cpu_to_le16(bss->channel);
bss->capinfo = cpu_to_le16(bss->capinfo);
bss->rates = cpu_to_le16(bss->rates);
bss->basic_rates = cpu_to_le16(bss->basic_rates);
break;
}
case OID_TYPE_BSSLIST:{
struct obj_bsslist *list = data;
int i;
list->nr = cpu_to_le32(list->nr);
for (i = 0; i < list->nr; i++)
mgt_cpu_to_le(OID_TYPE_BSS, &list->bsslist[i]);
break;
}
case OID_TYPE_FREQUENCIES:{
struct obj_frequencies *freq = data;
int i;
freq->nr = cpu_to_le16(freq->nr);
for (i = 0; i < freq->nr; i++)
freq->mhz[i] = cpu_to_le16(freq->mhz[i]);
break;
}
case OID_TYPE_MLME:{
struct obj_mlme *mlme = data;
mlme->id = cpu_to_le16(mlme->id);
mlme->state = cpu_to_le16(mlme->state);
mlme->code = cpu_to_le16(mlme->code);
break;
}
case OID_TYPE_MLMEEX:{
struct obj_mlmeex *mlme = data;
mlme->id = cpu_to_le16(mlme->id);
mlme->state = cpu_to_le16(mlme->state);
mlme->code = cpu_to_le16(mlme->code);
mlme->size = cpu_to_le16(mlme->size);
break;
}
case OID_TYPE_ATTACH:{
struct obj_attachment *attach = data;
attach->id = cpu_to_le16(attach->id);
attach->size = cpu_to_le16(attach->size);
break;
}
case OID_TYPE_SSID:
case OID_TYPE_KEY:
case OID_TYPE_ADDR:
case OID_TYPE_RAW:
break;
default:
BUG();
}
}
/* Note : data is modified during this function */
int
mgt_set_request(islpci_private *priv, enum oid_num_t n, int extra, void *data)
{
int ret = 0;
struct islpci_mgmtframe *response = NULL;
int response_op = PIMFOR_OP_ERROR;
int dlen;
void *cache, *_data = data;
u32 oid;
BUG_ON(OID_NUM_LAST <= n);
BUG_ON(extra > isl_oid[n].range);
if (!priv->mib)
/* memory has been freed */
return -1;
dlen = isl_oid[n].size;
cache = priv->mib[n];
cache += (cache ? extra * dlen : 0);
oid = isl_oid[n].oid + extra;
if (_data == NULL)
/* we are requested to re-set a cached value */
_data = cache;
else
mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, _data);
/* If we are going to write to the cache, we don't want anyone to read
* it -> acquire write lock.
* Else we could acquire a read lock to be sure we don't bother the
* commit process (which takes a write lock). But I'm not sure if it's
* needed.
*/
if (cache)
down_write(&priv->mib_sem);
if (islpci_get_state(priv) >= PRV_STATE_READY) {
ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET, oid,
_data, dlen, &response);
if (!ret) {
response_op = response->header->operation;
islpci_mgt_release(response);
}
if (ret || response_op == PIMFOR_OP_ERROR)
ret = -EIO;
} else if (!cache)
ret = -EIO;
if (cache) {
if (!ret && data)
memcpy(cache, _data, dlen);
up_write(&priv->mib_sem);
}
/* re-set given data to what it was */
if (data)
mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, data);
return ret;
}
/* None of these are cached */
int
mgt_set_varlen(islpci_private *priv, enum oid_num_t n, void *data, int extra_len)
{
int ret = 0;
struct islpci_mgmtframe *response;
int response_op = PIMFOR_OP_ERROR;
int dlen;
u32 oid;
BUG_ON(OID_NUM_LAST <= n);
dlen = isl_oid[n].size;
oid = isl_oid[n].oid;
mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, data);
if (islpci_get_state(priv) >= PRV_STATE_READY) {
ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET, oid,
data, dlen + extra_len, &response);
if (!ret) {
response_op = response->header->operation;
islpci_mgt_release(response);
}
if (ret || response_op == PIMFOR_OP_ERROR)
ret = -EIO;
} else
ret = -EIO;
/* re-set given data to what it was */
if (data)
mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, data);
return ret;
}
int
mgt_get_request(islpci_private *priv, enum oid_num_t n, int extra, void *data,
union oid_res_t *res)
{
int ret = -EIO;
int reslen = 0;
struct islpci_mgmtframe *response = NULL;
int dlen;
void *cache, *_res = NULL;
u32 oid;
BUG_ON(OID_NUM_LAST <= n);
BUG_ON(extra > isl_oid[n].range);
res->ptr = NULL;
if (!priv->mib)
/* memory has been freed */
return -1;
dlen = isl_oid[n].size;
cache = priv->mib[n];
cache += cache ? extra * dlen : 0;
oid = isl_oid[n].oid + extra;
reslen = dlen;
if (cache)
down_read(&priv->mib_sem);
if (islpci_get_state(priv) >= PRV_STATE_READY) {
ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET,
oid, data, dlen, &response);
if (ret || !response ||
response->header->operation == PIMFOR_OP_ERROR) {
if (response)
islpci_mgt_release(response);
ret = -EIO;
}
if (!ret) {
_res = response->data;
reslen = response->header->length;
}
} else if (cache) {
_res = cache;
ret = 0;
}
if ((isl_oid[n].flags & OID_FLAG_TYPE) == OID_TYPE_U32)
res->u = ret ? 0 : le32_to_cpu(*(u32 *) _res);
else {
res->ptr = kmalloc(reslen, GFP_KERNEL);
BUG_ON(res->ptr == NULL);
if (ret)
memset(res->ptr, 0, reslen);
else {
memcpy(res->ptr, _res, reslen);
mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE,
res->ptr);
}
}
if (cache)
up_read(&priv->mib_sem);
if (response && !ret)
islpci_mgt_release(response);
if (reslen > isl_oid[n].size)
printk(KERN_DEBUG
"mgt_get_request(0x%x): received data length was bigger "
"than expected (%d > %d). Memory is probably corrupted...",
oid, reslen, isl_oid[n].size);
return ret;
}
/* lock outside */
int
mgt_commit_list(islpci_private *priv, enum oid_num_t *l, int n)
{
int i, ret = 0;
struct islpci_mgmtframe *response;
for (i = 0; i < n; i++) {
struct oid_t *t = &(isl_oid[l[i]]);
void *data = priv->mib[l[i]];
int j = 0;
u32 oid = t->oid;
BUG_ON(data == NULL);
while (j <= t->range) {
int r = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET,
oid, data, t->size,
&response);
if (response) {
r |= (response->header->operation == PIMFOR_OP_ERROR);
islpci_mgt_release(response);
}
if (r)
printk(KERN_ERR "%s: mgt_commit_list: failure. "
"oid=%08x err=%d\n",
priv->ndev->name, oid, r);
ret |= r;
j++;
oid++;
data += t->size;
}
}
return ret;
}
/* Lock outside */
void
mgt_set(islpci_private *priv, enum oid_num_t n, void *data)
{
BUG_ON(OID_NUM_LAST <= n);
BUG_ON(priv->mib[n] == NULL);
memcpy(priv->mib[n], data, isl_oid[n].size);
mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, priv->mib[n]);
}
void
mgt_get(islpci_private *priv, enum oid_num_t n, void *res)
{
BUG_ON(OID_NUM_LAST <= n);
BUG_ON(priv->mib[n] == NULL);
BUG_ON(res == NULL);
memcpy(res, priv->mib[n], isl_oid[n].size);
mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, res);
}
/* Commits the cache. Lock outside. */
static enum oid_num_t commit_part1[] = {
OID_INL_CONFIG,
OID_INL_MODE,
DOT11_OID_BSSTYPE,
DOT11_OID_CHANNEL,
DOT11_OID_MLMEAUTOLEVEL
};
static enum oid_num_t commit_part2[] = {
DOT11_OID_SSID,
DOT11_OID_PSMBUFFER,
DOT11_OID_AUTHENABLE,
DOT11_OID_PRIVACYINVOKED,
DOT11_OID_EXUNENCRYPTED,
DOT11_OID_DEFKEYX, /* MULTIPLE */
DOT11_OID_DEFKEYID,
DOT11_OID_DOT1XENABLE,
OID_INL_DOT11D_CONFORMANCE,
/* Do not initialize this - fw < 1.0.4.3 rejects it
OID_INL_OUTPUTPOWER,
*/
};
/* update the MAC addr. */
static int
mgt_update_addr(islpci_private *priv)
{
struct islpci_mgmtframe *res;
int ret;
ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET,
isl_oid[GEN_OID_MACADDRESS].oid, NULL,
isl_oid[GEN_OID_MACADDRESS].size, &res);
if ((ret == 0) && res && (res->header->operation != PIMFOR_OP_ERROR))
memcpy(priv->ndev->dev_addr, res->data, ETH_ALEN);
else
ret = -EIO;
if (res)
islpci_mgt_release(res);
if (ret)
printk(KERN_ERR "%s: mgt_update_addr: failure\n", priv->ndev->name);
return ret;
}
int
mgt_commit(islpci_private *priv)
{
int rvalue;
enum oid_num_t u;
if (islpci_get_state(priv) < PRV_STATE_INIT)
return 0;
rvalue = mgt_commit_list(priv, commit_part1, ARRAY_SIZE(commit_part1));
if (priv->iw_mode != IW_MODE_MONITOR)
rvalue |= mgt_commit_list(priv, commit_part2, ARRAY_SIZE(commit_part2));
u = OID_INL_MODE;
rvalue |= mgt_commit_list(priv, &u, 1);
rvalue |= mgt_update_addr(priv);
if (rvalue) {
/* some request have failed. The device might be in an
incoherent state. We should reset it ! */
printk(KERN_DEBUG "%s: mgt_commit: failure\n", priv->ndev->name);
}
return rvalue;
}
/* The following OIDs need to be "unlatched":
*
* MEDIUMLIMIT,BEACONPERIOD,DTIMPERIOD,ATIMWINDOW,LISTENINTERVAL
* FREQUENCY,EXTENDEDRATES.
*
* The way to do this is to set ESSID. Note though that they may get
* unlatch before though by setting another OID. */
#if 0
void
mgt_unlatch_all(islpci_private *priv)
{
u32 u;
int rvalue = 0;
if (islpci_get_state(priv) < PRV_STATE_INIT)
return;
u = DOT11_OID_SSID;
rvalue = mgt_commit_list(priv, &u, 1);
/* Necessary if in MANUAL RUN mode? */
#if 0
u = OID_INL_MODE;
rvalue |= mgt_commit_list(priv, &u, 1);
u = DOT11_OID_MLMEAUTOLEVEL;
rvalue |= mgt_commit_list(priv, &u, 1);
u = OID_INL_MODE;
rvalue |= mgt_commit_list(priv, &u, 1);
#endif
if (rvalue)
printk(KERN_DEBUG "%s: Unlatching OIDs failed\n", priv->ndev->name);
}
#endif
/* This will tell you if you are allowed to answer a mlme(ex) request .*/
int
mgt_mlme_answer(islpci_private *priv)
{
u32 mlmeautolevel;
/* Acquire a read lock because if we are in a mode change, it's
* possible to answer true, while the card is leaving master to managed
* mode. Answering to a mlme in this situation could hang the card.
*/
down_read(&priv->mib_sem);
mlmeautolevel =
le32_to_cpu(*(u32 *) priv->mib[DOT11_OID_MLMEAUTOLEVEL]);
up_read(&priv->mib_sem);
return ((priv->iw_mode == IW_MODE_MASTER) &&
(mlmeautolevel >= DOT11_MLME_INTERMEDIATE));
}
enum oid_num_t
mgt_oidtonum(u32 oid)
{
int i;
for (i = 0; i < OID_NUM_LAST; i++)
if (isl_oid[i].oid == oid)
return i;
printk(KERN_DEBUG "looking for an unknown oid 0x%x", oid);
return OID_NUM_LAST;
}
int
mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
{
switch (isl_oid[n].flags & OID_FLAG_TYPE) {
case OID_TYPE_U32:
return snprintf(str, PRIV_STR_SIZE, "%u\n", r->u);
case OID_TYPE_BUFFER:{
struct obj_buffer *buff = r->ptr;
return snprintf(str, PRIV_STR_SIZE,
"size=%u\naddr=0x%X\n", buff->size,
buff->addr);
}
break;
case OID_TYPE_BSS:{
struct obj_bss *bss = r->ptr;
return snprintf(str, PRIV_STR_SIZE,
"age=%u\nchannel=%u\n"
"capinfo=0x%X\nrates=0x%X\n"
"basic_rates=0x%X\n", bss->age,
bss->channel, bss->capinfo,
bss->rates, bss->basic_rates);
}
break;
case OID_TYPE_BSSLIST:{
struct obj_bsslist *list = r->ptr;
int i, k;
k = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", list->nr);
for (i = 0; i < list->nr; i++)
k += snprintf(str + k, PRIV_STR_SIZE - k,
"bss[%u] :\nage=%u\nchannel=%u\n"
"capinfo=0x%X\nrates=0x%X\n"
"basic_rates=0x%X\n",
i, list->bsslist[i].age,
list->bsslist[i].channel,
list->bsslist[i].capinfo,
list->bsslist[i].rates,
list->bsslist[i].basic_rates);
return k;
}
break;
case OID_TYPE_FREQUENCIES:{
struct obj_frequencies *freq = r->ptr;
int i, t;
printk("nr : %u\n", freq->nr);
t = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", freq->nr);
for (i = 0; i < freq->nr; i++)
t += snprintf(str + t, PRIV_STR_SIZE - t,
"mhz[%u]=%u\n", i, freq->mhz[i]);
return t;
}
break;
case OID_TYPE_MLME:{
struct obj_mlme *mlme = r->ptr;
return snprintf(str, PRIV_STR_SIZE,
"id=0x%X\nstate=0x%X\ncode=0x%X\n",
mlme->id, mlme->state, mlme->code);
}
break;
case OID_TYPE_MLMEEX:{
struct obj_mlmeex *mlme = r->ptr;
return snprintf(str, PRIV_STR_SIZE,
"id=0x%X\nstate=0x%X\n"
"code=0x%X\nsize=0x%X\n", mlme->id,
mlme->state, mlme->code, mlme->size);
}
break;
case OID_TYPE_ATTACH:{
struct obj_attachment *attach = r->ptr;
return snprintf(str, PRIV_STR_SIZE,
"id=%d\nsize=%d\n",
attach->id,
attach->size);
}
break;
case OID_TYPE_SSID:{
struct obj_ssid *ssid = r->ptr;
return snprintf(str, PRIV_STR_SIZE,
"length=%u\noctets=%.*s\n",
ssid->length, ssid->length,
ssid->octets);
}
break;
case OID_TYPE_KEY:{
struct obj_key *key = r->ptr;
int t, i;
t = snprintf(str, PRIV_STR_SIZE,
"type=0x%X\nlength=0x%X\nkey=0x",
key->type, key->length);
for (i = 0; i < key->length; i++)
t += snprintf(str + t, PRIV_STR_SIZE - t,
"%02X:", key->key[i]);
t += snprintf(str + t, PRIV_STR_SIZE - t, "\n");
return t;
}
break;
case OID_TYPE_RAW:
case OID_TYPE_ADDR:{
unsigned char *buff = r->ptr;
int t, i;
t = snprintf(str, PRIV_STR_SIZE, "hex data=");
for (i = 0; i < isl_oid[n].size; i++)
t += snprintf(str + t, PRIV_STR_SIZE - t,
"%02X:", buff[i]);
t += snprintf(str + t, PRIV_STR_SIZE - t, "\n");
return t;
}
break;
default:
BUG();
}
return 0;
}
| gpl-2.0 |
mythos234/SimplKernel-LL-BOFJ | drivers/gpu/drm/radeon/radeon_device.c | 1650 | 40574 | /*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include <linux/console.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
static const char radeon_family_name[][16] = {
"R100",
"RV100",
"RS100",
"RV200",
"RS200",
"R200",
"RV250",
"RS300",
"RV280",
"R300",
"R350",
"RV350",
"RV380",
"R420",
"R423",
"RV410",
"RS400",
"RS480",
"RS600",
"RS690",
"RS740",
"RV515",
"R520",
"RV530",
"RV560",
"RV570",
"R580",
"R600",
"RV610",
"RV630",
"RV670",
"RV620",
"RV635",
"RS780",
"RS880",
"RV770",
"RV730",
"RV710",
"RV740",
"CEDAR",
"REDWOOD",
"JUNIPER",
"CYPRESS",
"HEMLOCK",
"PALM",
"SUMO",
"SUMO2",
"BARTS",
"TURKS",
"CAICOS",
"CAYMAN",
"ARUBA",
"TAHITI",
"PITCAIRN",
"VERDE",
"OLAND",
"HAINAN",
"LAST",
};
/**
* radeon_program_register_sequence - program an array of registers.
*
* @rdev: radeon_device pointer
* @registers: pointer to the register array
* @array_size: size of the register array
*
* Programs an array or registers with and and or masks.
* This is a helper for setting golden registers.
*/
void radeon_program_register_sequence(struct radeon_device *rdev,
const u32 *registers,
const u32 array_size)
{
u32 tmp, reg, and_mask, or_mask;
int i;
if (array_size % 3)
return;
for (i = 0; i < array_size; i +=3) {
reg = registers[i + 0];
and_mask = registers[i + 1];
or_mask = registers[i + 2];
if (and_mask == 0xffffffff) {
tmp = or_mask;
} else {
tmp = RREG32(reg);
tmp &= ~and_mask;
tmp |= or_mask;
}
WREG32(reg, tmp);
}
}
/**
* radeon_surface_init - Clear GPU surface registers.
*
* @rdev: radeon_device pointer
*
* Clear GPU surface registers (r1xx-r5xx).
*/
void radeon_surface_init(struct radeon_device *rdev)
{
/* FIXME: check this out */
if (rdev->family < CHIP_R600) {
int i;
for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
if (rdev->surface_regs[i].bo)
radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
else
radeon_clear_surface_reg(rdev, i);
}
/* enable surfaces */
WREG32(RADEON_SURFACE_CNTL, 0);
}
}
/*
* GPU scratch registers helpers function.
*/
/**
* radeon_scratch_init - Init scratch register driver information.
*
* @rdev: radeon_device pointer
*
* Init CP scratch register driver information (r1xx-r5xx)
*/
void radeon_scratch_init(struct radeon_device *rdev)
{
int i;
/* FIXME: check this out */
if (rdev->family < CHIP_R300) {
rdev->scratch.num_reg = 5;
} else {
rdev->scratch.num_reg = 7;
}
rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
for (i = 0; i < rdev->scratch.num_reg; i++) {
rdev->scratch.free[i] = true;
rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
}
}
/**
* radeon_scratch_get - Allocate a scratch register
*
* @rdev: radeon_device pointer
* @reg: scratch register mmio offset
*
* Allocate a CP scratch register for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
{
int i;
for (i = 0; i < rdev->scratch.num_reg; i++) {
if (rdev->scratch.free[i]) {
rdev->scratch.free[i] = false;
*reg = rdev->scratch.reg[i];
return 0;
}
}
return -EINVAL;
}
/**
* radeon_scratch_free - Free a scratch register
*
* @rdev: radeon_device pointer
* @reg: scratch register mmio offset
*
* Free a CP scratch register allocated for use by the driver (all asics)
*/
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
{
int i;
for (i = 0; i < rdev->scratch.num_reg; i++) {
if (rdev->scratch.reg[i] == reg) {
rdev->scratch.free[i] = true;
return;
}
}
}
/*
* radeon_wb_*()
* Writeback is the the method by which the the GPU updates special pages
* in memory with the status of certain GPU events (fences, ring pointers,
* etc.).
*/
/**
* radeon_wb_disable - Disable Writeback
*
* @rdev: radeon_device pointer
*
* Disables Writeback (all asics). Used for suspend.
*/
void radeon_wb_disable(struct radeon_device *rdev)
{
rdev->wb.enabled = false;
}
/**
* radeon_wb_fini - Disable Writeback and free memory
*
* @rdev: radeon_device pointer
*
* Disables Writeback and frees the Writeback memory (all asics).
* Used at driver shutdown.
*/
void radeon_wb_fini(struct radeon_device *rdev)
{
radeon_wb_disable(rdev);
if (rdev->wb.wb_obj) {
if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
radeon_bo_kunmap(rdev->wb.wb_obj);
radeon_bo_unpin(rdev->wb.wb_obj);
radeon_bo_unreserve(rdev->wb.wb_obj);
}
radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
}
/**
* radeon_wb_init- Init Writeback driver info and allocate memory
*
* @rdev: radeon_device pointer
*
* Disables Writeback and frees the Writeback memory (all asics).
* Used at driver startup.
* Returns 0 on success or an -error on failure.
*/
int radeon_wb_init(struct radeon_device *rdev)
{
int r;
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
}
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
if (unlikely(r != 0)) {
radeon_wb_fini(rdev);
return r;
}
r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
&rdev->wb.gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->wb.wb_obj);
dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
radeon_wb_fini(rdev);
return r;
}
r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
radeon_wb_fini(rdev);
return r;
}
}
/* clear wb memory */
memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
/* disable event_write fences */
rdev->wb.use_event = false;
/* disabled via module param */
if (radeon_no_wb == 1) {
rdev->wb.enabled = false;
} else {
if (rdev->flags & RADEON_IS_AGP) {
/* often unreliable on AGP */
rdev->wb.enabled = false;
} else if (rdev->family < CHIP_R300) {
/* often unreliable on pre-r300 */
rdev->wb.enabled = false;
} else {
rdev->wb.enabled = true;
/* event_write fences are only available on r600+ */
if (rdev->family >= CHIP_R600) {
rdev->wb.use_event = true;
}
}
}
/* always use writeback/events on NI, APUs */
if (rdev->family >= CHIP_PALM) {
rdev->wb.enabled = true;
rdev->wb.use_event = true;
}
dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
return 0;
}
/**
* radeon_vram_location - try to find VRAM location
* @rdev: radeon device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
* @base: base address at which to put VRAM
*
* Function will place try to place VRAM at base address provided
* as parameter (which is so far either PCI aperture address or
* for IGP TOM base address).
*
* If there is not enough space to fit the unvisible VRAM in the 32bits
* address space then we limit the VRAM size to the aperture.
*
* If we are using AGP and if the AGP aperture doesn't allow us to have
* room for all the VRAM than we restrict the VRAM to the PCI aperture
* size and print a warning.
*
* This function will never fails, worst case are limiting VRAM.
*
* Note: GTT start, end, size should be initialized before calling this
* function on AGP platform.
*
* Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
* this shouldn't be a problem as we are using the PCI aperture as a reference.
* Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
* not IGP.
*
* Note: we use mc_vram_size as on some board we need to program the mc to
* cover the whole aperture even if VRAM size is inferior to aperture size
* Novell bug 204882 + along with lots of ubuntu ones
*
* Note: when limiting vram it's safe to overwritte real_vram_size because
* we are not in case where real_vram_size is inferior to mc_vram_size (ie
* note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
* ones)
*
* Note: IGP TOM addr should be the same as the aperture addr, we don't
* explicitly check for that thought.
*
* FIXME: when reducing VRAM size align new size on power of 2.
*/
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
{
uint64_t limit = (uint64_t)radeon_vram_limit << 20;
mc->vram_start = base;
if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
mc->real_vram_size = mc->aper_size;
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
mc->real_vram_size = mc->aper_size;
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
if (limit && limit < mc->real_vram_size)
mc->real_vram_size = limit;
dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
}
/**
* radeon_gtt_location - try to find GTT location
* @rdev: radeon device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
* Function will place try to place GTT before or after VRAM.
*
* If GTT size is bigger than space left then we ajust GTT size.
* Thus function will never fails.
*
* FIXME: when reducing GTT size align new size on power of 2.
*/
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_af, size_bf;
size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
size_bf = mc->vram_start & ~mc->gtt_base_align;
if (size_bf > size_af) {
if (mc->gtt_size > size_bf) {
dev_warn(rdev->dev, "limiting GTT\n");
mc->gtt_size = size_bf;
}
mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
} else {
if (mc->gtt_size > size_af) {
dev_warn(rdev->dev, "limiting GTT\n");
mc->gtt_size = size_af;
}
mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
}
mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
}
/*
* GPU helpers function.
*/
/**
* radeon_card_posted - check if the hw has already been initialized
*
* @rdev: radeon_device pointer
*
* Check if the asic has been initialized (all asics).
* Used at driver startup.
* Returns true if initialized or false if not.
*/
bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
/* required for EFI mode on macbook2,1 which uses an r5xx asic */
if (efi_enabled(EFI_BOOT) &&
(rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
(rdev->family < CHIP_R600))
return false;
if (ASIC_IS_NODCE(rdev))
goto check_memsize;
/* first check CRTCs */
if (ASIC_IS_DCE4(rdev)) {
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
if (rdev->num_crtc >= 4) {
reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
}
if (rdev->num_crtc >= 6) {
reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
if (reg & EVERGREEN_CRTC_MASTER_EN)
return true;
} else if (ASIC_IS_AVIVO(rdev)) {
reg = RREG32(AVIVO_D1CRTC_CONTROL) |
RREG32(AVIVO_D2CRTC_CONTROL);
if (reg & AVIVO_CRTC_EN) {
return true;
}
} else {
reg = RREG32(RADEON_CRTC_GEN_CNTL) |
RREG32(RADEON_CRTC2_GEN_CNTL);
if (reg & RADEON_CRTC_EN) {
return true;
}
}
check_memsize:
/* then check MEM_SIZE, in case the crtcs are off */
if (rdev->family >= CHIP_R600)
reg = RREG32(R600_CONFIG_MEMSIZE);
else
reg = RREG32(RADEON_CONFIG_MEMSIZE);
if (reg)
return true;
return false;
}
/**
* radeon_update_bandwidth_info - update display bandwidth params
*
* @rdev: radeon_device pointer
*
* Used when sclk/mclk are switched or display modes are set.
* params are used to calculate display watermarks (all asics)
*/
void radeon_update_bandwidth_info(struct radeon_device *rdev)
{
fixed20_12 a;
u32 sclk = rdev->pm.current_sclk;
u32 mclk = rdev->pm.current_mclk;
/* sclk/mclk in Mhz */
a.full = dfixed_const(100);
rdev->pm.sclk.full = dfixed_const(sclk);
rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
rdev->pm.mclk.full = dfixed_const(mclk);
rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
if (rdev->flags & RADEON_IS_IGP) {
a.full = dfixed_const(16);
/* core_bandwidth = sclk(Mhz) * 16 */
rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
}
}
/**
* radeon_boot_test_post_card - check and possibly initialize the hw
*
* @rdev: radeon_device pointer
*
* Check if the asic is initialized and if not, attempt to initialize
* it (all asics).
* Returns true if initialized or false if not.
*/
bool radeon_boot_test_post_card(struct radeon_device *rdev)
{
if (radeon_card_posted(rdev))
return true;
if (rdev->bios) {
DRM_INFO("GPU not posted. posting now...\n");
if (rdev->is_atom_bios)
atom_asic_init(rdev->mode_info.atom_context);
else
radeon_combios_asic_init(rdev->ddev);
return true;
} else {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return false;
}
}
/**
* radeon_dummy_page_init - init dummy page used by the driver
*
* @rdev: radeon_device pointer
*
* Allocate the dummy page used by the driver (all asics).
* This dummy page is used by the driver as a filler for gart entries
* when pages are taken out of the GART
* Returns 0 on sucess, -ENOMEM on failure.
*/
int radeon_dummy_page_init(struct radeon_device *rdev)
{
if (rdev->dummy_page.page)
return 0;
rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
if (rdev->dummy_page.page == NULL)
return -ENOMEM;
rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
__free_page(rdev->dummy_page.page);
rdev->dummy_page.page = NULL;
return -ENOMEM;
}
return 0;
}
/**
* radeon_dummy_page_fini - free dummy page used by the driver
*
* @rdev: radeon_device pointer
*
* Frees the dummy page used by the driver (all asics).
*/
void radeon_dummy_page_fini(struct radeon_device *rdev)
{
if (rdev->dummy_page.page == NULL)
return;
pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
__free_page(rdev->dummy_page.page);
rdev->dummy_page.page = NULL;
}
/* ATOM accessor methods */
/*
* ATOM is an interpreted byte code stored in tables in the vbios. The
* driver registers callbacks to access registers and the interpreter
* in the driver parses the tables and executes then to program specific
* actions (set display modes, asic init, etc.). See radeon_atombios.c,
* atombios.h, and atom.c
*/
/**
* cail_pll_read - read PLL register
*
* @info: atom card_info pointer
* @reg: PLL register offset
*
* Provides a PLL register accessor for the atom interpreter (r4xx+).
* Returns the value of the PLL register.
*/
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
uint32_t r;
r = rdev->pll_rreg(rdev, reg);
return r;
}
/**
* cail_pll_write - write PLL register
*
* @info: atom card_info pointer
* @reg: PLL register offset
* @val: value to write to the pll register
*
* Provides a PLL register accessor for the atom interpreter (r4xx+).
*/
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
rdev->pll_wreg(rdev, reg, val);
}
/**
* cail_mc_read - read MC (Memory Controller) register
*
* @info: atom card_info pointer
* @reg: MC register offset
*
* Provides an MC register accessor for the atom interpreter (r4xx+).
* Returns the value of the MC register.
*/
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
uint32_t r;
r = rdev->mc_rreg(rdev, reg);
return r;
}
/**
* cail_mc_write - write MC (Memory Controller) register
*
* @info: atom card_info pointer
* @reg: MC register offset
* @val: value to write to the pll register
*
* Provides a MC register accessor for the atom interpreter (r4xx+).
*/
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
rdev->mc_wreg(rdev, reg, val);
}
/**
* cail_reg_write - write MMIO register
*
* @info: atom card_info pointer
* @reg: MMIO register offset
* @val: value to write to the pll register
*
* Provides a MMIO register accessor for the atom interpreter (r4xx+).
*/
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
WREG32(reg*4, val);
}
/**
* cail_reg_read - read MMIO register
*
* @info: atom card_info pointer
* @reg: MMIO register offset
*
* Provides an MMIO register accessor for the atom interpreter (r4xx+).
* Returns the value of the MMIO register.
*/
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
uint32_t r;
r = RREG32(reg*4);
return r;
}
/**
* cail_ioreg_write - write IO register
*
* @info: atom card_info pointer
* @reg: IO register offset
* @val: value to write to the pll register
*
* Provides a IO register accessor for the atom interpreter (r4xx+).
*/
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
WREG32_IO(reg*4, val);
}
/**
* cail_ioreg_read - read IO register
*
* @info: atom card_info pointer
* @reg: IO register offset
*
* Provides an IO register accessor for the atom interpreter (r4xx+).
* Returns the value of the IO register.
*/
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
uint32_t r;
r = RREG32_IO(reg*4);
return r;
}
/**
* radeon_atombios_init - init the driver info and callbacks for atombios
*
* @rdev: radeon_device pointer
*
* Initializes the driver info and register access callbacks for the
* ATOM interpreter (r4xx+).
* Returns 0 on sucess, -ENOMEM on failure.
* Called at driver startup.
*/
int radeon_atombios_init(struct radeon_device *rdev)
{
struct card_info *atom_card_info =
kzalloc(sizeof(struct card_info), GFP_KERNEL);
if (!atom_card_info)
return -ENOMEM;
rdev->mode_info.atom_card_info = atom_card_info;
atom_card_info->dev = rdev->ddev;
atom_card_info->reg_read = cail_reg_read;
atom_card_info->reg_write = cail_reg_write;
/* needed for iio ops */
if (rdev->rio_mem) {
atom_card_info->ioreg_read = cail_ioreg_read;
atom_card_info->ioreg_write = cail_ioreg_write;
} else {
DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
atom_card_info->ioreg_read = cail_reg_read;
atom_card_info->ioreg_write = cail_reg_write;
}
atom_card_info->mc_read = cail_mc_read;
atom_card_info->mc_write = cail_mc_write;
atom_card_info->pll_read = cail_pll_read;
atom_card_info->pll_write = cail_pll_write;
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
if (!rdev->mode_info.atom_context) {
radeon_atombios_fini(rdev);
return -ENOMEM;
}
mutex_init(&rdev->mode_info.atom_context->mutex);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
return 0;
}
/**
* radeon_atombios_fini - free the driver info and callbacks for atombios
*
* @rdev: radeon_device pointer
*
* Frees the driver info and register access callbacks for the ATOM
* interpreter (r4xx+).
* Called at driver shutdown.
*/
void radeon_atombios_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.atom_context) {
kfree(rdev->mode_info.atom_context->scratch);
}
kfree(rdev->mode_info.atom_context);
rdev->mode_info.atom_context = NULL;
kfree(rdev->mode_info.atom_card_info);
rdev->mode_info.atom_card_info = NULL;
}
/* COMBIOS */
/*
* COMBIOS is the bios format prior to ATOM. It provides
* command tables similar to ATOM, but doesn't have a unified
* parser. See radeon_combios.c
*/
/**
* radeon_combios_init - init the driver info for combios
*
* @rdev: radeon_device pointer
*
* Initializes the driver info for combios (r1xx-r3xx).
* Returns 0 on sucess.
* Called at driver startup.
*/
int radeon_combios_init(struct radeon_device *rdev)
{
radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
return 0;
}
/**
* radeon_combios_fini - free the driver info for combios
*
* @rdev: radeon_device pointer
*
* Frees the driver info for combios (r1xx-r3xx).
* Called at driver shutdown.
*/
void radeon_combios_fini(struct radeon_device *rdev)
{
}
/* if we get transitioned to only one device, take VGA back */
/**
* radeon_vga_set_decode - enable/disable vga decode
*
* @cookie: radeon_device pointer
* @state: enable/disable vga decode
*
* Enable/disable vga decode (all asics).
* Returns VGA resource flags.
*/
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
{
struct radeon_device *rdev = cookie;
radeon_vga_set_state(rdev, state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
else
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
/**
* radeon_check_pot_argument - check that argument is a power of two
*
* @arg: value to check
*
* Validates that a certain argument is a power of two (all asics).
* Returns true if argument is valid.
*/
static bool radeon_check_pot_argument(int arg)
{
return (arg & (arg - 1)) == 0;
}
/**
* radeon_check_arguments - validate module params
*
* @rdev: radeon_device pointer
*
* Validates certain module parameters and updates
* the associated values used by the driver (all asics).
*/
static void radeon_check_arguments(struct radeon_device *rdev)
{
/* vramlimit must be a power of two */
if (!radeon_check_pot_argument(radeon_vram_limit)) {
dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
radeon_vram_limit);
radeon_vram_limit = 0;
}
/* gtt size must be power of two and greater or equal to 32M */
if (radeon_gart_size < 32) {
dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
radeon_gart_size);
radeon_gart_size = 512;
} else if (!radeon_check_pot_argument(radeon_gart_size)) {
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
radeon_gart_size = 512;
}
rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
/* AGP mode can only be -1, 1, 2, 4, 8 */
switch (radeon_agpmode) {
case -1:
case 0:
case 1:
case 2:
case 4:
case 8:
break;
default:
dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
radeon_agpmode = 0;
break;
}
}
/**
* radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
* needed for waking up.
*
* @pdev: pci dev pointer
*/
static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
{
/* 6600m in a macbook pro */
if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
pdev->subsystem_device == 0x00e2) {
printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
return true;
}
return false;
}
/**
* radeon_switcheroo_set_state - set switcheroo state
*
* @pdev: pci dev pointer
* @state: vga switcheroo state
*
* Callback for the switcheroo driver. Suspends or resumes the
* the asics before or after it is powered up using ACPI methods.
*/
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
unsigned d3_delay = dev->pdev->d3_delay;
printk(KERN_INFO "radeon: switched on\n");
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
dev->pdev->d3_delay = 20;
radeon_resume_kms(dev);
dev->pdev->d3_delay = d3_delay;
dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_INFO "radeon: switched off\n");
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
radeon_suspend_kms(dev, pmm);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
/**
* radeon_switcheroo_can_switch - see if switcheroo state can change
*
* @pdev: pci dev pointer
*
* Callback for the switcheroo driver. Check of the switcheroo
* state can be changed.
* Returns true if the state can be changed, false if not.
*/
static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
bool can_switch;
spin_lock(&dev->count_lock);
can_switch = (dev->open_count == 0);
spin_unlock(&dev->count_lock);
return can_switch;
}
static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
.set_gpu_state = radeon_switcheroo_set_state,
.reprobe = NULL,
.can_switch = radeon_switcheroo_can_switch,
};
/**
* radeon_device_init - initialize the driver
*
* @rdev: radeon_device pointer
* @pdev: drm dev pointer
* @pdev: pci dev pointer
* @flags: driver flags
*
* Initializes the driver info and hw (all asics).
* Returns 0 for success or an error on failure.
* Called at driver startup.
*/
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
struct pci_dev *pdev,
uint32_t flags)
{
int r, i;
int dma_bits;
rdev->shutdown = false;
rdev->dev = &pdev->dev;
rdev->ddev = ddev;
rdev->pdev = pdev;
rdev->flags = flags;
rdev->family = flags & RADEON_FAMILY_MASK;
rdev->is_atom_bios = false;
rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
rdev->accel_working = false;
/* set up ring ids */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
rdev->ring[i].idx = i;
}
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device);
/* mutex initialization are all done here so we
* can recall function without having locking issues */
mutex_init(&rdev->ring_lock);
mutex_init(&rdev->dc_hw_i2c_mutex);
atomic_set(&rdev->ih.lock, 0);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
r = radeon_gem_init(rdev);
if (r)
return r;
/* initialize vm here */
mutex_init(&rdev->vm_manager.lock);
/* Adjust VM size here.
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
rdev->vm_manager.max_pfn = 1 << 20;
INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
/* Set asic functions */
r = radeon_asic_init(rdev);
if (r)
return r;
radeon_check_arguments(rdev);
/* all of the newer IGP chips have an internal gart
* However some rs4xx report as AGP, so remove that here.
*/
if ((rdev->family >= CHIP_RS400) &&
(rdev->flags & RADEON_IS_IGP)) {
rdev->flags &= ~RADEON_IS_AGP;
}
if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
radeon_agp_disable(rdev);
}
/* Set the internal MC address mask
* This is the max address of the GPU's
* internal address space.
*/
if (rdev->family >= CHIP_CAYMAN)
rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
else if (rdev->family >= CHIP_CEDAR)
rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
else
rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
* IGP - can handle 40-bits
* AGP - generally dma32 is safest
* PCI - dma32 for legacy pci gart, 40 bits on newer asics
*/
rdev->need_dma32 = false;
if (rdev->flags & RADEON_IS_AGP)
rdev->need_dma32 = true;
if ((rdev->flags & RADEON_IS_PCI) &&
(rdev->family <= CHIP_RS740))
rdev->need_dma32 = true;
dma_bits = rdev->need_dma32 ? 32 : 40;
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
rdev->need_dma32 = true;
dma_bits = 32;
printk(KERN_WARNING "radeon: No suitable DMA available.\n");
}
r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
printk(KERN_WARNING "radeon: No coherent DMA available.\n");
}
/* Registers mapping */
/* TODO: block userspace mapping of io register */
spin_lock_init(&rdev->mmio_idx_lock);
rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
if (rdev->rmmio == NULL) {
return -ENOMEM;
}
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
break;
}
}
if (rdev->rio_mem == NULL)
DRM_ERROR("Unable to find PCI I/O BAR\n");
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
r = radeon_init(rdev);
if (r)
return r;
r = radeon_ib_ring_tests(rdev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
r = radeon_gem_debugfs_init(rdev);
if (r) {
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
}
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
*/
radeon_asic_reset(rdev);
radeon_fini(rdev);
radeon_agp_disable(rdev);
r = radeon_init(rdev);
if (r)
return r;
}
if ((radeon_testing & 1)) {
if (rdev->accel_working)
radeon_test_moves(rdev);
else
DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
}
if ((radeon_testing & 2)) {
if (rdev->accel_working)
radeon_test_syncing(rdev);
else
DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
}
if (radeon_benchmarking) {
if (rdev->accel_working)
radeon_benchmark(rdev, radeon_benchmarking);
else
DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
}
return 0;
}
static void radeon_debugfs_remove_files(struct radeon_device *rdev);
/**
* radeon_device_fini - tear down the driver
*
* @rdev: radeon_device pointer
*
* Tear down the driver info (all asics).
* Called at driver shutdown.
*/
void radeon_device_fini(struct radeon_device *rdev)
{
DRM_INFO("radeon: finishing device.\n");
rdev->shutdown = true;
/* evict vram memory */
radeon_bo_evict_vram(rdev);
radeon_fini(rdev);
vga_switcheroo_unregister_client(rdev->pdev);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
if (rdev->rio_mem)
pci_iounmap(rdev->pdev, rdev->rio_mem);
rdev->rio_mem = NULL;
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
radeon_debugfs_remove_files(rdev);
}
/*
* Suspend & resume.
*/
/**
* radeon_suspend_kms - initiate device suspend
*
* @pdev: drm dev pointer
* @state: suspend state
*
* Puts the hw in the suspend state (all asics).
* Returns 0 for success or an error on failure.
* Called at driver suspend.
*/
int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
struct radeon_device *rdev;
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, r;
bool force_completion = false;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
}
if (state.event == PM_EVENT_PRETHAW) {
return 0;
}
rdev = dev->dev_private;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
drm_kms_helper_poll_disable(dev);
/* turn off display hw */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
struct radeon_bo *robj;
if (rfb == NULL || rfb->obj == NULL) {
continue;
}
robj = gem_to_radeon_bo(rfb->obj);
/* don't unpin kernel fb objects */
if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
r = radeon_bo_reserve(robj, false);
if (r == 0) {
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
}
}
/* evict vram memory */
radeon_bo_evict_vram(rdev);
mutex_lock(&rdev->ring_lock);
/* wait for gpu to finish processing current batch */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
r = radeon_fence_wait_empty_locked(rdev, i);
if (r) {
/* delay GPU reset to resume */
force_completion = true;
}
}
if (force_completion) {
radeon_fence_driver_force_completion(rdev);
}
mutex_unlock(&rdev->ring_lock);
radeon_save_bios_scratch_regs(rdev);
radeon_pm_suspend(rdev);
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
/* evict remaining vram memory */
radeon_bo_evict_vram(rdev);
radeon_agp_suspend(rdev);
pci_save_state(dev->pdev);
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
}
console_lock();
radeon_fbdev_set_suspend(rdev, 1);
console_unlock();
return 0;
}
/**
* radeon_resume_kms - initiate device resume
*
* @pdev: drm dev pointer
*
* Bring the hw back to operating state (all asics).
* Returns 0 for success or an error on failure.
* Called at driver resume.
*/
int radeon_resume_kms(struct drm_device *dev)
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
int r;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
console_lock();
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
if (pci_enable_device(dev->pdev)) {
console_unlock();
return -1;
}
/* resume AGP if in use */
radeon_agp_resume(rdev);
radeon_resume(rdev);
r = radeon_ib_ring_tests(rdev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
radeon_pm_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
radeon_fbdev_set_suspend(rdev, 0);
console_unlock();
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
radeon_atom_disp_eng_pll_init(rdev);
/* turn on the BL */
if (rdev->mode_info.bl_encoder) {
u8 bl_level = radeon_get_backlight_level(rdev,
rdev->mode_info.bl_encoder);
radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
bl_level);
}
}
/* reset hpd state */
radeon_hpd_init(rdev);
/* blat the mode back in */
drm_helper_resume_force_mode(dev);
/* turn on display hw */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
drm_kms_helper_poll_enable(dev);
return 0;
}
/**
* radeon_gpu_reset - reset the asic
*
* @rdev: radeon device pointer
*
* Attempt the reset the GPU if it has hung (all asics).
* Returns 0 for success or an error on failure.
*/
int radeon_gpu_reset(struct radeon_device *rdev)
{
unsigned ring_sizes[RADEON_NUM_RINGS];
uint32_t *ring_data[RADEON_NUM_RINGS];
bool saved = false;
int i, r;
int resched;
down_write(&rdev->exclusive_lock);
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_suspend(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
&ring_data[i]);
if (ring_sizes[i]) {
saved = true;
dev_info(rdev->dev, "Saved %d dwords of commands "
"on ring %d.\n", ring_sizes[i], i);
}
}
retry:
r = radeon_asic_reset(rdev);
if (!r) {
dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
radeon_resume(rdev);
}
radeon_restore_bios_scratch_regs(rdev);
if (!r) {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
radeon_ring_restore(rdev, &rdev->ring[i],
ring_sizes[i], ring_data[i]);
ring_sizes[i] = 0;
ring_data[i] = NULL;
}
r = radeon_ib_ring_tests(rdev);
if (r) {
dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
if (saved) {
saved = false;
radeon_suspend(rdev);
goto retry;
}
}
} else {
radeon_fence_driver_force_completion(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
}
}
drm_helper_resume_force_mode(rdev->ddev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n");
}
up_write(&rdev->exclusive_lock);
return r;
}
/*
* Debugfs
*/
int radeon_debugfs_add_files(struct radeon_device *rdev,
struct drm_info_list *files,
unsigned nfiles)
{
unsigned i;
for (i = 0; i < rdev->debugfs_count; i++) {
if (rdev->debugfs[i].files == files) {
/* Already registered */
return 0;
}
}
i = rdev->debugfs_count + 1;
if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
DRM_ERROR("Reached maximum number of debugfs components.\n");
DRM_ERROR("Report so we increase "
"RADEON_DEBUGFS_MAX_COMPONENTS.\n");
return -EINVAL;
}
rdev->debugfs[rdev->debugfs_count].files = files;
rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
rdev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
rdev->ddev->control->debugfs_root,
rdev->ddev->control);
drm_debugfs_create_files(files, nfiles,
rdev->ddev->primary->debugfs_root,
rdev->ddev->primary);
#endif
return 0;
}
static void radeon_debugfs_remove_files(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned i;
for (i = 0; i < rdev->debugfs_count; i++) {
drm_debugfs_remove_files(rdev->debugfs[i].files,
rdev->debugfs[i].num_files,
rdev->ddev->control);
drm_debugfs_remove_files(rdev->debugfs[i].files,
rdev->debugfs[i].num_files,
rdev->ddev->primary);
}
#endif
}
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor)
{
return 0;
}
void radeon_debugfs_cleanup(struct drm_minor *minor)
{
}
#endif
| gpl-2.0 |
miiicmueller/android_kernel_raspberryPi_rpiv2 | drivers/net/ethernet/qualcomm/qca_framing.c | 1650 | 3909 | /*
* Copyright (c) 2011, 2012, Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
*
* Permission to use, copy, modify, and/or distribute this software
* for any purpose with or without fee is hereby granted, provided
* that the above copyright notice and this permission notice appear
* in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
* THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Atheros ethernet framing. Every Ethernet frame is surrounded
* by an atheros frame while transmitted over a serial channel;
*/
#include <linux/kernel.h>
#include "qca_framing.h"
u16
qcafrm_create_header(u8 *buf, u16 length)
{
__le16 len;
if (!buf)
return 0;
len = cpu_to_le16(length);
buf[0] = 0xAA;
buf[1] = 0xAA;
buf[2] = 0xAA;
buf[3] = 0xAA;
buf[4] = len & 0xff;
buf[5] = (len >> 8) & 0xff;
buf[6] = 0;
buf[7] = 0;
return QCAFRM_HEADER_LEN;
}
u16
qcafrm_create_footer(u8 *buf)
{
if (!buf)
return 0;
buf[0] = 0x55;
buf[1] = 0x55;
return QCAFRM_FOOTER_LEN;
}
/* Gather received bytes and try to extract a full ethernet frame by
* following a simple state machine.
*
* Return: QCAFRM_GATHER No ethernet frame fully received yet.
* QCAFRM_NOHEAD Header expected but not found.
* QCAFRM_INVLEN Atheros frame length is invalid
* QCAFRM_NOTAIL Footer expected but not found.
* > 0 Number of byte in the fully received
* Ethernet frame
*/
s32
qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_byte)
{
s32 ret = QCAFRM_GATHER;
u16 len;
switch (handle->state) {
case QCAFRM_HW_LEN0:
case QCAFRM_HW_LEN1:
/* by default, just go to next state */
handle->state--;
if (recv_byte != 0x00) {
/* first two bytes of length must be 0 */
handle->state = QCAFRM_HW_LEN0;
}
break;
case QCAFRM_HW_LEN2:
case QCAFRM_HW_LEN3:
handle->state--;
break;
/* 4 bytes header pattern */
case QCAFRM_WAIT_AA1:
case QCAFRM_WAIT_AA2:
case QCAFRM_WAIT_AA3:
case QCAFRM_WAIT_AA4:
if (recv_byte != 0xAA) {
ret = QCAFRM_NOHEAD;
handle->state = QCAFRM_HW_LEN0;
} else {
handle->state--;
}
break;
/* 2 bytes length. */
/* Borrow offset field to hold length for now. */
case QCAFRM_WAIT_LEN_BYTE0:
handle->offset = recv_byte;
handle->state = QCAFRM_WAIT_LEN_BYTE1;
break;
case QCAFRM_WAIT_LEN_BYTE1:
handle->offset = handle->offset | (recv_byte << 8);
handle->state = QCAFRM_WAIT_RSVD_BYTE1;
break;
case QCAFRM_WAIT_RSVD_BYTE1:
handle->state = QCAFRM_WAIT_RSVD_BYTE2;
break;
case QCAFRM_WAIT_RSVD_BYTE2:
len = handle->offset;
if (len > buf_len || len < QCAFRM_ETHMINLEN) {
ret = QCAFRM_INVLEN;
handle->state = QCAFRM_HW_LEN0;
} else {
handle->state = (enum qcafrm_state)(len + 1);
/* Remaining number of bytes. */
handle->offset = 0;
}
break;
default:
/* Receiving Ethernet frame itself. */
buf[handle->offset] = recv_byte;
handle->offset++;
handle->state--;
break;
case QCAFRM_WAIT_551:
if (recv_byte != 0x55) {
ret = QCAFRM_NOTAIL;
handle->state = QCAFRM_HW_LEN0;
} else {
handle->state = QCAFRM_WAIT_552;
}
break;
case QCAFRM_WAIT_552:
if (recv_byte != 0x55) {
ret = QCAFRM_NOTAIL;
handle->state = QCAFRM_HW_LEN0;
} else {
ret = handle->offset;
/* Frame is fully received. */
handle->state = QCAFRM_HW_LEN0;
}
break;
}
return ret;
}
| gpl-2.0 |
Barracuda09/linux | drivers/staging/vt6655/rc4.c | 2674 | 2202 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* File: rc4.c
*
* Purpose:
*
* Functions:
*
* Revision History:
*
* Author: Kyle Hsu
*
* Date: Sep 4, 2002
*
*/
#include "rc4.h"
void rc4_init(PRC4Ext pRC4, unsigned char *pbyKey, unsigned int cbKey_len)
{
unsigned int ust1, ust2;
unsigned int keyindex;
unsigned int stateindex;
unsigned char *pbyst;
unsigned int idx;
pbyst = pRC4->abystate;
pRC4->ux = 0;
pRC4->uy = 0;
for (idx = 0; idx < 256; idx++)
pbyst[idx] = (unsigned char)idx;
keyindex = 0;
stateindex = 0;
for (idx = 0; idx < 256; idx++) {
ust1 = pbyst[idx];
stateindex = (stateindex + pbyKey[keyindex] + ust1) & 0xff;
ust2 = pbyst[stateindex];
pbyst[stateindex] = (unsigned char)ust1;
pbyst[idx] = (unsigned char)ust2;
if (++keyindex >= cbKey_len)
keyindex = 0;
}
}
unsigned int rc4_byte(PRC4Ext pRC4)
{
unsigned int ux;
unsigned int uy;
unsigned int ustx, usty;
unsigned char *pbyst;
pbyst = pRC4->abystate;
ux = (pRC4->ux + 1) & 0xff;
ustx = pbyst[ux];
uy = (ustx + pRC4->uy) & 0xff;
usty = pbyst[uy];
pRC4->ux = ux;
pRC4->uy = uy;
pbyst[uy] = (unsigned char)ustx;
pbyst[ux] = (unsigned char)usty;
return pbyst[(ustx + usty) & 0xff];
}
void rc4_encrypt(PRC4Ext pRC4, unsigned char *pbyDest,
unsigned char *pbySrc, unsigned int cbData_len)
{
unsigned int ii;
for (ii = 0; ii < cbData_len; ii++)
pbyDest[ii] = (unsigned char)(pbySrc[ii] ^ rc4_byte(pRC4));
}
| gpl-2.0 |
PaoloW8/android_kernel_ZTE_NX505J | arch/s390/kernel/cpcmd.c | 4466 | 3063 | /*
* arch/s390/kernel/cpcmd.c
*
* S390 version
* Copyright IBM Corp. 1999,2007
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Christian Borntraeger (cborntra@de.ibm.com),
*/
#define KMSG_COMPONENT "cpcmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <asm/ebcdic.h>
#include <asm/cpcmd.h>
#include <asm/io.h>
static DEFINE_SPINLOCK(cpcmd_lock);
static char cpcmd_buf[241];
static int diag8_noresponse(int cmdlen)
{
register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
register unsigned long reg3 asm ("3") = cmdlen;
asm volatile(
#ifndef CONFIG_64BIT
" diag %1,%0,0x8\n"
#else /* CONFIG_64BIT */
" sam31\n"
" diag %1,%0,0x8\n"
" sam64\n"
#endif /* CONFIG_64BIT */
: "+d" (reg3) : "d" (reg2) : "cc");
return reg3;
}
static int diag8_response(int cmdlen, char *response, int *rlen)
{
register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
register unsigned long reg3 asm ("3") = (addr_t) response;
register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
register unsigned long reg5 asm ("5") = *rlen;
asm volatile(
#ifndef CONFIG_64BIT
" diag %2,%0,0x8\n"
" brc 8,1f\n"
" ar %1,%4\n"
#else /* CONFIG_64BIT */
" sam31\n"
" diag %2,%0,0x8\n"
" sam64\n"
" brc 8,1f\n"
" agr %1,%4\n"
#endif /* CONFIG_64BIT */
"1:\n"
: "+d" (reg4), "+d" (reg5)
: "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
*rlen = reg5;
return reg4;
}
/*
* __cpcmd has some restrictions over cpcmd
* - the response buffer must reside below 2GB (if any)
* - __cpcmd is unlocked and therefore not SMP-safe
*/
int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
{
int cmdlen;
int rc;
int response_len;
cmdlen = strlen(cmd);
BUG_ON(cmdlen > 240);
memcpy(cpcmd_buf, cmd, cmdlen);
ASCEBC(cpcmd_buf, cmdlen);
if (response) {
memset(response, 0, rlen);
response_len = rlen;
rc = diag8_response(cmdlen, response, &rlen);
EBCASC(response, response_len);
} else {
rc = diag8_noresponse(cmdlen);
}
if (response_code)
*response_code = rc;
return rlen;
}
EXPORT_SYMBOL(__cpcmd);
int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
{
char *lowbuf;
int len;
unsigned long flags;
if ((virt_to_phys(response) != (unsigned long) response) ||
(((unsigned long)response + rlen) >> 31)) {
lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
if (!lowbuf) {
pr_warning("The cpcmd kernel function failed to "
"allocate a response buffer\n");
return -ENOMEM;
}
spin_lock_irqsave(&cpcmd_lock, flags);
len = __cpcmd(cmd, lowbuf, rlen, response_code);
spin_unlock_irqrestore(&cpcmd_lock, flags);
memcpy(response, lowbuf, rlen);
kfree(lowbuf);
} else {
spin_lock_irqsave(&cpcmd_lock, flags);
len = __cpcmd(cmd, response, rlen, response_code);
spin_unlock_irqrestore(&cpcmd_lock, flags);
}
return len;
}
EXPORT_SYMBOL(cpcmd);
| gpl-2.0 |
aatjitra/silean | net/rds/ib_recv.c | 4978 | 30505 | /*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <rdma/rdma_cm.h>
#include "rds.h"
#include "ib.h"
static struct kmem_cache *rds_ib_incoming_slab;
static struct kmem_cache *rds_ib_frag_slab;
static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
{
struct rds_ib_recv_work *recv;
u32 i;
for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
struct ib_sge *sge;
recv->r_ibinc = NULL;
recv->r_frag = NULL;
recv->r_wr.next = NULL;
recv->r_wr.wr_id = i;
recv->r_wr.sg_list = recv->r_sge;
recv->r_wr.num_sge = RDS_IB_RECV_SGE;
sge = &recv->r_sge[0];
sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
sge->length = sizeof(struct rds_header);
sge->lkey = ic->i_mr->lkey;
sge = &recv->r_sge[1];
sge->addr = 0;
sge->length = RDS_FRAG_SIZE;
sge->lkey = ic->i_mr->lkey;
}
}
/*
* The entire 'from' list, including the from element itself, is put on
* to the tail of the 'to' list.
*/
static void list_splice_entire_tail(struct list_head *from,
struct list_head *to)
{
struct list_head *from_last = from->prev;
list_splice_tail(from_last, to);
list_add_tail(from_last, to);
}
static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
{
struct list_head *tmp;
tmp = xchg(&cache->xfer, NULL);
if (tmp) {
if (cache->ready)
list_splice_entire_tail(tmp, cache->ready);
else
cache->ready = tmp;
}
}
static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
{
struct rds_ib_cache_head *head;
int cpu;
cache->percpu = alloc_percpu(struct rds_ib_cache_head);
if (!cache->percpu)
return -ENOMEM;
for_each_possible_cpu(cpu) {
head = per_cpu_ptr(cache->percpu, cpu);
head->first = NULL;
head->count = 0;
}
cache->xfer = NULL;
cache->ready = NULL;
return 0;
}
int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic)
{
int ret;
ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs);
if (!ret) {
ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags);
if (ret)
free_percpu(ic->i_cache_incs.percpu);
}
return ret;
}
static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
struct list_head *caller_list)
{
struct rds_ib_cache_head *head;
int cpu;
for_each_possible_cpu(cpu) {
head = per_cpu_ptr(cache->percpu, cpu);
if (head->first) {
list_splice_entire_tail(head->first, caller_list);
head->first = NULL;
}
}
if (cache->ready) {
list_splice_entire_tail(cache->ready, caller_list);
cache->ready = NULL;
}
}
void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
{
struct rds_ib_incoming *inc;
struct rds_ib_incoming *inc_tmp;
struct rds_page_frag *frag;
struct rds_page_frag *frag_tmp;
LIST_HEAD(list);
rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
free_percpu(ic->i_cache_incs.percpu);
list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
list_del(&inc->ii_cache_entry);
WARN_ON(!list_empty(&inc->ii_frags));
kmem_cache_free(rds_ib_incoming_slab, inc);
}
rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
free_percpu(ic->i_cache_frags.percpu);
list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
list_del(&frag->f_cache_entry);
WARN_ON(!list_empty(&frag->f_item));
kmem_cache_free(rds_ib_frag_slab, frag);
}
}
/* fwd decl */
static void rds_ib_recv_cache_put(struct list_head *new_item,
struct rds_ib_refill_cache *cache);
static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
/* Recycle frag and attached recv buffer f_sg */
static void rds_ib_frag_free(struct rds_ib_connection *ic,
struct rds_page_frag *frag)
{
rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
}
/* Recycle inc after freeing attached frags */
void rds_ib_inc_free(struct rds_incoming *inc)
{
struct rds_ib_incoming *ibinc;
struct rds_page_frag *frag;
struct rds_page_frag *pos;
struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
/* Free attached frags */
list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
list_del_init(&frag->f_item);
rds_ib_frag_free(ic, frag);
}
BUG_ON(!list_empty(&ibinc->ii_frags));
rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
}
static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
struct rds_ib_recv_work *recv)
{
if (recv->r_ibinc) {
rds_inc_put(&recv->r_ibinc->ii_inc);
recv->r_ibinc = NULL;
}
if (recv->r_frag) {
ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
rds_ib_frag_free(ic, recv->r_frag);
recv->r_frag = NULL;
}
}
void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
{
u32 i;
for (i = 0; i < ic->i_recv_ring.w_nr; i++)
rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
}
static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
gfp_t slab_mask)
{
struct rds_ib_incoming *ibinc;
struct list_head *cache_item;
int avail_allocs;
cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
if (cache_item) {
ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
} else {
avail_allocs = atomic_add_unless(&rds_ib_allocation,
1, rds_ib_sysctl_max_recv_allocation);
if (!avail_allocs) {
rds_ib_stats_inc(s_ib_rx_alloc_limit);
return NULL;
}
ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
if (!ibinc) {
atomic_dec(&rds_ib_allocation);
return NULL;
}
}
INIT_LIST_HEAD(&ibinc->ii_frags);
rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr);
return ibinc;
}
static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
gfp_t slab_mask, gfp_t page_mask)
{
struct rds_page_frag *frag;
struct list_head *cache_item;
int ret;
cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
if (cache_item) {
frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
} else {
frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
if (!frag)
return NULL;
sg_init_table(&frag->f_sg, 1);
ret = rds_page_remainder_alloc(&frag->f_sg,
RDS_FRAG_SIZE, page_mask);
if (ret) {
kmem_cache_free(rds_ib_frag_slab, frag);
return NULL;
}
}
INIT_LIST_HEAD(&frag->f_item);
return frag;
}
static int rds_ib_recv_refill_one(struct rds_connection *conn,
struct rds_ib_recv_work *recv, int prefill)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct ib_sge *sge;
int ret = -ENOMEM;
gfp_t slab_mask = GFP_NOWAIT;
gfp_t page_mask = GFP_NOWAIT;
if (prefill) {
slab_mask = GFP_KERNEL;
page_mask = GFP_HIGHUSER;
}
if (!ic->i_cache_incs.ready)
rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
if (!ic->i_cache_frags.ready)
rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
/*
* ibinc was taken from recv if recv contained the start of a message.
* recvs that were continuations will still have this allocated.
*/
if (!recv->r_ibinc) {
recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
if (!recv->r_ibinc)
goto out;
}
WARN_ON(recv->r_frag); /* leak! */
recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
if (!recv->r_frag)
goto out;
ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
1, DMA_FROM_DEVICE);
WARN_ON(ret != 1);
sge = &recv->r_sge[0];
sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
sge->length = sizeof(struct rds_header);
sge = &recv->r_sge[1];
sge->addr = sg_dma_address(&recv->r_frag->f_sg);
sge->length = sg_dma_len(&recv->r_frag->f_sg);
ret = 0;
out:
return ret;
}
/*
* This tries to allocate and post unused work requests after making sure that
* they have all the allocations they need to queue received fragments into
* sockets.
*
* -1 is returned if posting fails due to temporary resource exhaustion.
*/
void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_recv_work *recv;
struct ib_recv_wr *failed_wr;
unsigned int posted = 0;
int ret = 0;
u32 pos;
while ((prefill || rds_conn_up(conn)) &&
rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
if (pos >= ic->i_recv_ring.w_nr) {
printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
pos);
break;
}
recv = &ic->i_recvs[pos];
ret = rds_ib_recv_refill_one(conn, recv, prefill);
if (ret) {
break;
}
/* XXX when can this fail? */
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
(long) sg_dma_address(&recv->r_frag->f_sg), ret);
if (ret) {
rds_ib_conn_error(conn, "recv post on "
"%pI4 returned %d, disconnecting and "
"reconnecting\n", &conn->c_faddr,
ret);
break;
}
posted++;
}
/* We're doing flow control - update the window. */
if (ic->i_flowctl && posted)
rds_ib_advertise_credits(conn, posted);
if (ret)
rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
}
/*
* We want to recycle several types of recv allocations, like incs and frags.
* To use this, the *_free() function passes in the ptr to a list_head within
* the recyclee, as well as the cache to put it on.
*
* First, we put the memory on a percpu list. When this reaches a certain size,
* We move it to an intermediate non-percpu list in a lockless manner, with some
* xchg/compxchg wizardry.
*
* N.B. Instead of a list_head as the anchor, we use a single pointer, which can
* be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
* list_empty() will return true with one element is actually present.
*/
static void rds_ib_recv_cache_put(struct list_head *new_item,
struct rds_ib_refill_cache *cache)
{
unsigned long flags;
struct rds_ib_cache_head *chp;
struct list_head *old;
local_irq_save(flags);
chp = per_cpu_ptr(cache->percpu, smp_processor_id());
if (!chp->first)
INIT_LIST_HEAD(new_item);
else /* put on front */
list_add_tail(new_item, chp->first);
chp->first = new_item;
chp->count++;
if (chp->count < RDS_IB_RECYCLE_BATCH_COUNT)
goto end;
/*
* Return our per-cpu first list to the cache's xfer by atomically
* grabbing the current xfer list, appending it to our per-cpu list,
* and then atomically returning that entire list back to the
* cache's xfer list as long as it's still empty.
*/
do {
old = xchg(&cache->xfer, NULL);
if (old)
list_splice_entire_tail(old, chp->first);
old = cmpxchg(&cache->xfer, NULL, chp->first);
} while (old);
chp->first = NULL;
chp->count = 0;
end:
local_irq_restore(flags);
}
static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
{
struct list_head *head = cache->ready;
if (head) {
if (!list_empty(head)) {
cache->ready = head->next;
list_del_init(head);
} else
cache->ready = NULL;
}
return head;
}
int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
size_t size)
{
struct rds_ib_incoming *ibinc;
struct rds_page_frag *frag;
struct iovec *iov = first_iov;
unsigned long to_copy;
unsigned long frag_off = 0;
unsigned long iov_off = 0;
int copied = 0;
int ret;
u32 len;
ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
len = be32_to_cpu(inc->i_hdr.h_len);
while (copied < size && copied < len) {
if (frag_off == RDS_FRAG_SIZE) {
frag = list_entry(frag->f_item.next,
struct rds_page_frag, f_item);
frag_off = 0;
}
while (iov_off == iov->iov_len) {
iov_off = 0;
iov++;
}
to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
to_copy = min_t(size_t, to_copy, size - copied);
to_copy = min_t(unsigned long, to_copy, len - copied);
rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
"[%p, %u] + %lu\n",
to_copy, iov->iov_base, iov->iov_len, iov_off,
sg_page(&frag->f_sg), frag->f_sg.offset, frag_off);
/* XXX needs + offset for multiple recvs per page */
ret = rds_page_copy_to_user(sg_page(&frag->f_sg),
frag->f_sg.offset + frag_off,
iov->iov_base + iov_off,
to_copy);
if (ret) {
copied = ret;
break;
}
iov_off += to_copy;
frag_off += to_copy;
copied += to_copy;
}
return copied;
}
/* ic starts out kzalloc()ed */
void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
{
struct ib_send_wr *wr = &ic->i_ack_wr;
struct ib_sge *sge = &ic->i_ack_sge;
sge->addr = ic->i_ack_dma;
sge->length = sizeof(struct rds_header);
sge->lkey = ic->i_mr->lkey;
wr->sg_list = sge;
wr->num_sge = 1;
wr->opcode = IB_WR_SEND;
wr->wr_id = RDS_IB_ACK_WR_ID;
wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
}
/*
* You'd think that with reliable IB connections you wouldn't need to ack
* messages that have been received. The problem is that IB hardware generates
* an ack message before it has DMAed the message into memory. This creates a
* potential message loss if the HCA is disabled for any reason between when it
* sends the ack and before the message is DMAed and processed. This is only a
* potential issue if another HCA is available for fail-over.
*
* When the remote host receives our ack they'll free the sent message from
* their send queue. To decrease the latency of this we always send an ack
* immediately after we've received messages.
*
* For simplicity, we only have one ack in flight at a time. This puts
* pressure on senders to have deep enough send queues to absorb the latency of
* a single ack frame being in flight. This might not be good enough.
*
* This is implemented by have a long-lived send_wr and sge which point to a
* statically allocated ack frame. This ack wr does not fall under the ring
* accounting that the tx and rx wrs do. The QP attribute specifically makes
* room for it beyond the ring size. Send completion notices its special
* wr_id and avoids working with the ring in that case.
*/
#ifndef KERNEL_HAS_ATOMIC64
static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
int ack_required)
{
unsigned long flags;
spin_lock_irqsave(&ic->i_ack_lock, flags);
ic->i_ack_next = seq;
if (ack_required)
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
spin_unlock_irqrestore(&ic->i_ack_lock, flags);
}
static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
{
unsigned long flags;
u64 seq;
clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
spin_lock_irqsave(&ic->i_ack_lock, flags);
seq = ic->i_ack_next;
spin_unlock_irqrestore(&ic->i_ack_lock, flags);
return seq;
}
#else
static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
int ack_required)
{
atomic64_set(&ic->i_ack_next, seq);
if (ack_required) {
smp_mb__before_clear_bit();
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
}
}
static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
{
clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
smp_mb__after_clear_bit();
return atomic64_read(&ic->i_ack_next);
}
#endif
static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
{
struct rds_header *hdr = ic->i_ack;
struct ib_send_wr *failed_wr;
u64 seq;
int ret;
seq = rds_ib_get_ack(ic);
rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
rds_message_populate_header(hdr, 0, 0, 0);
hdr->h_ack = cpu_to_be64(seq);
hdr->h_credit = adv_credits;
rds_message_make_checksum(hdr);
ic->i_ack_queued = jiffies;
ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
if (unlikely(ret)) {
/* Failed to send. Release the WR, and
* force another ACK.
*/
clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
rds_ib_stats_inc(s_ib_ack_send_failure);
rds_ib_conn_error(ic->conn, "sending ack failed\n");
} else
rds_ib_stats_inc(s_ib_ack_sent);
}
/*
* There are 3 ways of getting acknowledgements to the peer:
* 1. We call rds_ib_attempt_ack from the recv completion handler
* to send an ACK-only frame.
* However, there can be only one such frame in the send queue
* at any time, so we may have to postpone it.
* 2. When another (data) packet is transmitted while there's
* an ACK in the queue, we piggyback the ACK sequence number
* on the data packet.
* 3. If the ACK WR is done sending, we get called from the
* send queue completion handler, and check whether there's
* another ACK pending (postponed because the WR was on the
* queue). If so, we transmit it.
*
* We maintain 2 variables:
* - i_ack_flags, which keeps track of whether the ACK WR
* is currently in the send queue or not (IB_ACK_IN_FLIGHT)
* - i_ack_next, which is the last sequence number we received
*
* Potentially, send queue and receive queue handlers can run concurrently.
* It would be nice to not have to use a spinlock to synchronize things,
* but the one problem that rules this out is that 64bit updates are
* not atomic on all platforms. Things would be a lot simpler if
* we had atomic64 or maybe cmpxchg64 everywhere.
*
* Reconnecting complicates this picture just slightly. When we
* reconnect, we may be seeing duplicate packets. The peer
* is retransmitting them, because it hasn't seen an ACK for
* them. It is important that we ACK these.
*
* ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
* this flag set *MUST* be acknowledged immediately.
*/
/*
* When we get here, we're called from the recv queue handler.
* Check whether we ought to transmit an ACK.
*/
void rds_ib_attempt_ack(struct rds_ib_connection *ic)
{
unsigned int adv_credits;
if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
return;
if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
rds_ib_stats_inc(s_ib_ack_send_delayed);
return;
}
/* Can we get a send credit? */
if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
rds_ib_stats_inc(s_ib_tx_throttle);
clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
return;
}
clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
rds_ib_send_ack(ic, adv_credits);
}
/*
* We get here from the send completion handler, when the
* adapter tells us the ACK frame was sent.
*/
void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
{
clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
rds_ib_attempt_ack(ic);
}
/*
* This is called by the regular xmit code when it wants to piggyback
* an ACK on an outgoing frame.
*/
u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
{
if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
rds_ib_stats_inc(s_ib_ack_send_piggybacked);
return rds_ib_get_ack(ic);
}
/*
* It's kind of lame that we're copying from the posted receive pages into
* long-lived bitmaps. We could have posted the bitmaps and rdma written into
* them. But receiving new congestion bitmaps should be a *rare* event, so
* hopefully we won't need to invest that complexity in making it more
* efficient. By copying we can share a simpler core with TCP which has to
* copy.
*/
static void rds_ib_cong_recv(struct rds_connection *conn,
struct rds_ib_incoming *ibinc)
{
struct rds_cong_map *map;
unsigned int map_off;
unsigned int map_page;
struct rds_page_frag *frag;
unsigned long frag_off;
unsigned long to_copy;
unsigned long copied;
uint64_t uncongested = 0;
void *addr;
/* catch completely corrupt packets */
if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
return;
map = conn->c_fcong;
map_page = 0;
map_off = 0;
frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
frag_off = 0;
copied = 0;
while (copied < RDS_CONG_MAP_BYTES) {
uint64_t *src, *dst;
unsigned int k;
to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
addr = kmap_atomic(sg_page(&frag->f_sg));
src = addr + frag_off;
dst = (void *)map->m_page_addrs[map_page] + map_off;
for (k = 0; k < to_copy; k += 8) {
/* Record ports that became uncongested, ie
* bits that changed from 0 to 1. */
uncongested |= ~(*src) & *dst;
*dst++ = *src++;
}
kunmap_atomic(addr);
copied += to_copy;
map_off += to_copy;
if (map_off == PAGE_SIZE) {
map_off = 0;
map_page++;
}
frag_off += to_copy;
if (frag_off == RDS_FRAG_SIZE) {
frag = list_entry(frag->f_item.next,
struct rds_page_frag, f_item);
frag_off = 0;
}
}
/* the congestion map is in little endian order */
uncongested = le64_to_cpu(uncongested);
rds_cong_map_updated(map, uncongested);
}
/*
* Rings are posted with all the allocations they'll need to queue the
* incoming message to the receiving socket so this can't fail.
* All fragments start with a header, so we can make sure we're not receiving
* garbage, and we can tell a small 8 byte fragment from an ACK frame.
*/
struct rds_ib_ack_state {
u64 ack_next;
u64 ack_recv;
unsigned int ack_required:1;
unsigned int ack_next_valid:1;
unsigned int ack_recv_valid:1;
};
static void rds_ib_process_recv(struct rds_connection *conn,
struct rds_ib_recv_work *recv, u32 data_len,
struct rds_ib_ack_state *state)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_incoming *ibinc = ic->i_ibinc;
struct rds_header *ihdr, *hdr;
/* XXX shut down the connection if port 0,0 are seen? */
rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
data_len);
if (data_len < sizeof(struct rds_header)) {
rds_ib_conn_error(conn, "incoming message "
"from %pI4 didn't include a "
"header, disconnecting and "
"reconnecting\n",
&conn->c_faddr);
return;
}
data_len -= sizeof(struct rds_header);
ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
/* Validate the checksum. */
if (!rds_message_verify_checksum(ihdr)) {
rds_ib_conn_error(conn, "incoming message "
"from %pI4 has corrupted header - "
"forcing a reconnect\n",
&conn->c_faddr);
rds_stats_inc(s_recv_drop_bad_checksum);
return;
}
/* Process the ACK sequence which comes with every packet */
state->ack_recv = be64_to_cpu(ihdr->h_ack);
state->ack_recv_valid = 1;
/* Process the credits update if there was one */
if (ihdr->h_credit)
rds_ib_send_add_credits(conn, ihdr->h_credit);
if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
/* This is an ACK-only packet. The fact that it gets
* special treatment here is that historically, ACKs
* were rather special beasts.
*/
rds_ib_stats_inc(s_ib_ack_received);
/*
* Usually the frags make their way on to incs and are then freed as
* the inc is freed. We don't go that route, so we have to drop the
* page ref ourselves. We can't just leave the page on the recv
* because that confuses the dma mapping of pages and each recv's use
* of a partial page.
*
* FIXME: Fold this into the code path below.
*/
rds_ib_frag_free(ic, recv->r_frag);
recv->r_frag = NULL;
return;
}
/*
* If we don't already have an inc on the connection then this
* fragment has a header and starts a message.. copy its header
* into the inc and save the inc so we can hang upcoming fragments
* off its list.
*/
if (!ibinc) {
ibinc = recv->r_ibinc;
recv->r_ibinc = NULL;
ic->i_ibinc = ibinc;
hdr = &ibinc->ii_inc.i_hdr;
memcpy(hdr, ihdr, sizeof(*hdr));
ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
ic->i_recv_data_rem, hdr->h_flags);
} else {
hdr = &ibinc->ii_inc.i_hdr;
/* We can't just use memcmp here; fragments of a
* single message may carry different ACKs */
if (hdr->h_sequence != ihdr->h_sequence ||
hdr->h_len != ihdr->h_len ||
hdr->h_sport != ihdr->h_sport ||
hdr->h_dport != ihdr->h_dport) {
rds_ib_conn_error(conn,
"fragment header mismatch; forcing reconnect\n");
return;
}
}
list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
recv->r_frag = NULL;
if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
ic->i_recv_data_rem -= RDS_FRAG_SIZE;
else {
ic->i_recv_data_rem = 0;
ic->i_ibinc = NULL;
if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
rds_ib_cong_recv(conn, ibinc);
else {
rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
&ibinc->ii_inc, GFP_ATOMIC);
state->ack_next = be64_to_cpu(hdr->h_sequence);
state->ack_next_valid = 1;
}
/* Evaluate the ACK_REQUIRED flag *after* we received
* the complete frame, and after bumping the next_rx
* sequence. */
if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
rds_stats_inc(s_recv_ack_required);
state->ack_required = 1;
}
rds_inc_put(&ibinc->ii_inc);
}
}
/*
* Plucking the oldest entry from the ring can be done concurrently with
* the thread refilling the ring. Each ring operation is protected by
* spinlocks and the transient state of refilling doesn't change the
* recording of which entry is oldest.
*
* This relies on IB only calling one cq comp_handler for each cq so that
* there will only be one caller of rds_recv_incoming() per RDS connection.
*/
void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
{
struct rds_connection *conn = context;
struct rds_ib_connection *ic = conn->c_transport_data;
rdsdebug("conn %p cq %p\n", conn, cq);
rds_ib_stats_inc(s_ib_rx_cq_call);
tasklet_schedule(&ic->i_recv_tasklet);
}
static inline void rds_poll_cq(struct rds_ib_connection *ic,
struct rds_ib_ack_state *state)
{
struct rds_connection *conn = ic->conn;
struct ib_wc wc;
struct rds_ib_recv_work *recv;
while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
(unsigned long long)wc.wr_id, wc.status,
rds_ib_wc_status_str(wc.status), wc.byte_len,
be32_to_cpu(wc.ex.imm_data));
rds_ib_stats_inc(s_ib_rx_cq_event);
recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
/*
* Also process recvs in connecting state because it is possible
* to get a recv completion _before_ the rdmacm ESTABLISHED
* event is processed.
*/
if (wc.status == IB_WC_SUCCESS) {
rds_ib_process_recv(conn, recv, wc.byte_len, state);
} else {
/* We expect errors as the qp is drained during shutdown */
if (rds_conn_up(conn) || rds_conn_connecting(conn))
rds_ib_conn_error(conn, "recv completion on %pI4 had "
"status %u (%s), disconnecting and "
"reconnecting\n", &conn->c_faddr,
wc.status,
rds_ib_wc_status_str(wc.status));
}
/*
* It's very important that we only free this ring entry if we've truly
* freed the resources allocated to the entry. The refilling path can
* leak if we don't.
*/
rds_ib_ring_free(&ic->i_recv_ring, 1);
}
}
void rds_ib_recv_tasklet_fn(unsigned long data)
{
struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
struct rds_connection *conn = ic->conn;
struct rds_ib_ack_state state = { 0, };
rds_poll_cq(ic, &state);
ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
rds_poll_cq(ic, &state);
if (state.ack_next_valid)
rds_ib_set_ack(ic, state.ack_next, state.ack_required);
if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
rds_send_drop_acked(conn, state.ack_recv, NULL);
ic->i_ack_recv = state.ack_recv;
}
if (rds_conn_up(conn))
rds_ib_attempt_ack(ic);
/* If we ever end up with a really empty receive ring, we're
* in deep trouble, as the sender will definitely see RNR
* timeouts. */
if (rds_ib_ring_empty(&ic->i_recv_ring))
rds_ib_stats_inc(s_ib_rx_ring_empty);
if (rds_ib_ring_low(&ic->i_recv_ring))
rds_ib_recv_refill(conn, 0);
}
int rds_ib_recv(struct rds_connection *conn)
{
struct rds_ib_connection *ic = conn->c_transport_data;
int ret = 0;
rdsdebug("conn %p\n", conn);
if (rds_conn_up(conn))
rds_ib_attempt_ack(ic);
return ret;
}
int rds_ib_recv_init(void)
{
struct sysinfo si;
int ret = -ENOMEM;
/* Default to 30% of all available RAM for recv memory */
si_meminfo(&si);
rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
sizeof(struct rds_ib_incoming),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!rds_ib_incoming_slab)
goto out;
rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
sizeof(struct rds_page_frag),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!rds_ib_frag_slab)
kmem_cache_destroy(rds_ib_incoming_slab);
else
ret = 0;
out:
return ret;
}
void rds_ib_recv_exit(void)
{
kmem_cache_destroy(rds_ib_incoming_slab);
kmem_cache_destroy(rds_ib_frag_slab);
}
| gpl-2.0 |
Motorhead1991/rugby_kernel_3.4.24 | arch/frv/mb93090-mb00/pci-dma-nommu.c | 8562 | 3486 | /* pci-dma-nommu.c: Dynamic DMA mapping support for the FRV
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Woodhouse (dwmw2@infradead.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <asm/io.h>
#if 1
#define DMA_SRAM_START dma_coherent_mem_start
#define DMA_SRAM_END dma_coherent_mem_end
#else // Use video RAM on Matrox
#define DMA_SRAM_START 0xe8900000
#define DMA_SRAM_END 0xe8a00000
#endif
struct dma_alloc_record {
struct list_head list;
unsigned long ofs;
unsigned long len;
};
static DEFINE_SPINLOCK(dma_alloc_lock);
static LIST_HEAD(dma_alloc_list);
void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
{
struct dma_alloc_record *new;
struct list_head *this = &dma_alloc_list;
unsigned long flags;
unsigned long start = DMA_SRAM_START;
unsigned long end;
if (!DMA_SRAM_START) {
printk("%s called without any DMA area reserved!\n", __func__);
return NULL;
}
new = kmalloc(sizeof (*new), GFP_ATOMIC);
if (!new)
return NULL;
/* Round up to a reasonable alignment */
new->len = (size + 31) & ~31;
spin_lock_irqsave(&dma_alloc_lock, flags);
list_for_each (this, &dma_alloc_list) {
struct dma_alloc_record *this_r = list_entry(this, struct dma_alloc_record, list);
end = this_r->ofs;
if (end - start >= size)
goto gotone;
start = this_r->ofs + this_r->len;
}
/* Reached end of list. */
end = DMA_SRAM_END;
this = &dma_alloc_list;
if (end - start >= size) {
gotone:
new->ofs = start;
list_add_tail(&new->list, this);
spin_unlock_irqrestore(&dma_alloc_lock, flags);
*dma_handle = start;
return (void *)start;
}
kfree(new);
spin_unlock_irqrestore(&dma_alloc_lock, flags);
return NULL;
}
EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
{
struct dma_alloc_record *rec;
unsigned long flags;
spin_lock_irqsave(&dma_alloc_lock, flags);
list_for_each_entry(rec, &dma_alloc_list, list) {
if (rec->ofs == dma_handle) {
list_del(&rec->list);
kfree(rec);
spin_unlock_irqrestore(&dma_alloc_lock, flags);
return;
}
}
spin_unlock_irqrestore(&dma_alloc_lock, flags);
BUG();
}
EXPORT_SYMBOL(dma_free_coherent);
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
return virt_to_bus(ptr);
}
EXPORT_SYMBOL(dma_map_single);
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
int i;
for (i=0; i<nents; i++)
frv_cache_wback_inv(sg_dma_address(&sg[i]),
sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]));
BUG_ON(direction == DMA_NONE);
return nents;
}
EXPORT_SYMBOL(dma_map_sg);
dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
flush_dcache_page(page);
return (dma_addr_t) page_to_phys(page) + offset;
}
EXPORT_SYMBOL(dma_map_page);
| gpl-2.0 |
rishi1998/android_kernel_samsung_i9082 | arch/mips/sibyte/sb1250/setup.c | 8818 | 6114 | /*
* Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/string.h>
#include <asm/bootinfo.h>
#include <asm/mipsregs.h>
#include <asm/io.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_scd.h>
unsigned int sb1_pass;
unsigned int soc_pass;
unsigned int soc_type;
EXPORT_SYMBOL(soc_type);
unsigned int periph_rev;
unsigned int zbbus_mhz;
EXPORT_SYMBOL(zbbus_mhz);
static char *soc_str;
static char *pass_str;
static unsigned int war_pass; /* XXXKW don't overload PASS defines? */
static int __init setup_bcm1250(void)
{
int ret = 0;
switch (soc_pass) {
case K_SYS_REVISION_BCM1250_PASS1:
periph_rev = 1;
pass_str = "Pass 1";
break;
case K_SYS_REVISION_BCM1250_A10:
periph_rev = 2;
pass_str = "A8/A10";
/* XXXKW different war_pass? */
war_pass = K_SYS_REVISION_BCM1250_PASS2;
break;
case K_SYS_REVISION_BCM1250_PASS2_2:
periph_rev = 2;
pass_str = "B1";
break;
case K_SYS_REVISION_BCM1250_B2:
periph_rev = 2;
pass_str = "B2";
war_pass = K_SYS_REVISION_BCM1250_PASS2_2;
break;
case K_SYS_REVISION_BCM1250_PASS3:
periph_rev = 3;
pass_str = "C0";
break;
case K_SYS_REVISION_BCM1250_C1:
periph_rev = 3;
pass_str = "C1";
break;
default:
if (soc_pass < K_SYS_REVISION_BCM1250_PASS2_2) {
periph_rev = 2;
pass_str = "A0-A6";
war_pass = K_SYS_REVISION_BCM1250_PASS2;
} else {
printk("Unknown BCM1250 rev %x\n", soc_pass);
ret = 1;
}
break;
}
return ret;
}
int sb1250_m3_workaround_needed(void)
{
switch (soc_type) {
case K_SYS_SOC_TYPE_BCM1250:
case K_SYS_SOC_TYPE_BCM1250_ALT:
case K_SYS_SOC_TYPE_BCM1250_ALT2:
case K_SYS_SOC_TYPE_BCM1125:
case K_SYS_SOC_TYPE_BCM1125H:
return soc_pass < K_SYS_REVISION_BCM1250_C0;
default:
return 0;
}
}
static int __init setup_bcm112x(void)
{
int ret = 0;
switch (soc_pass) {
case 0:
/* Early build didn't have revid set */
periph_rev = 3;
pass_str = "A1";
war_pass = K_SYS_REVISION_BCM112x_A1;
break;
case K_SYS_REVISION_BCM112x_A1:
periph_rev = 3;
pass_str = "A1";
break;
case K_SYS_REVISION_BCM112x_A2:
periph_rev = 3;
pass_str = "A2";
break;
case K_SYS_REVISION_BCM112x_A3:
periph_rev = 3;
pass_str = "A3";
break;
case K_SYS_REVISION_BCM112x_A4:
periph_rev = 3;
pass_str = "A4";
break;
case K_SYS_REVISION_BCM112x_B0:
periph_rev = 3;
pass_str = "B0";
break;
default:
printk("Unknown %s rev %x\n", soc_str, soc_pass);
ret = 1;
}
return ret;
}
/* Setup code likely to be common to all SiByte platforms */
static int __init sys_rev_decode(void)
{
int ret = 0;
war_pass = soc_pass;
switch (soc_type) {
case K_SYS_SOC_TYPE_BCM1250:
case K_SYS_SOC_TYPE_BCM1250_ALT:
case K_SYS_SOC_TYPE_BCM1250_ALT2:
soc_str = "BCM1250";
ret = setup_bcm1250();
break;
case K_SYS_SOC_TYPE_BCM1120:
soc_str = "BCM1120";
ret = setup_bcm112x();
break;
case K_SYS_SOC_TYPE_BCM1125:
soc_str = "BCM1125";
ret = setup_bcm112x();
break;
case K_SYS_SOC_TYPE_BCM1125H:
soc_str = "BCM1125H";
ret = setup_bcm112x();
break;
default:
printk("Unknown SOC type %x\n", soc_type);
ret = 1;
break;
}
return ret;
}
void __init sb1250_setup(void)
{
uint64_t sys_rev;
int plldiv;
int bad_config = 0;
sb1_pass = read_c0_prid() & 0xff;
sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION));
soc_type = SYS_SOC_TYPE(sys_rev);
soc_pass = G_SYS_REVISION(sys_rev);
if (sys_rev_decode()) {
printk("Restart after failure to identify SiByte chip\n");
machine_restart(NULL);
}
plldiv = G_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
zbbus_mhz = ((plldiv >> 1) * 50) + ((plldiv & 1) * 25);
printk("Broadcom SiByte %s %s @ %d MHz (SB1 rev %d)\n",
soc_str, pass_str, zbbus_mhz * 2, sb1_pass);
printk("Board type: %s\n", get_system_type());
switch (war_pass) {
case K_SYS_REVISION_BCM1250_PASS1:
#ifndef CONFIG_SB1_PASS_1_WORKAROUNDS
printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, "
"and the kernel doesn't have the proper "
"workarounds compiled in. @@@@\n");
bad_config = 1;
#endif
break;
case K_SYS_REVISION_BCM1250_PASS2:
/* Pass 2 - easiest as default for now - so many numbers */
#if !defined(CONFIG_SB1_PASS_2_WORKAROUNDS) || \
!defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS)
printk("@@@@ This is a BCM1250 A3-A10 board, and the "
"kernel doesn't have the proper workarounds "
"compiled in. @@@@\n");
bad_config = 1;
#endif
#ifdef CONFIG_CPU_HAS_PREFETCH
printk("@@@@ Prefetches may be enabled in this kernel, "
"but are buggy on this board. @@@@\n");
bad_config = 1;
#endif
break;
case K_SYS_REVISION_BCM1250_PASS2_2:
#ifndef CONFIG_SB1_PASS_2_WORKAROUNDS
printk("@@@@ This is a BCM1250 B1/B2. board, and the "
"kernel doesn't have the proper workarounds "
"compiled in. @@@@\n");
bad_config = 1;
#endif
#if defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS) || \
!defined(CONFIG_CPU_HAS_PREFETCH)
printk("@@@@ This is a BCM1250 B1/B2, but the kernel is "
"conservatively configured for an 'A' stepping. "
"@@@@\n");
#endif
break;
default:
break;
}
if (bad_config) {
printk("Invalid configuration for this chip.\n");
machine_restart(NULL);
}
}
| gpl-2.0 |
gchild320/kernel_lge_g3 | arch/sh/kernel/module.c | 9842 | 3741 | /* Kernel module help for SH.
SHcompact version by Kaz Kojima and Paul Mundt.
SHmedia bits:
Copyright 2004 SuperH (UK) Ltd
Author: Richard Curnow
Based on the sh version, and on code from the sh64-specific parts of
modutils, originally written by Richard Curnow and Ben Gaster.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/bug.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <asm/unaligned.h>
#include <asm/dwarf.h>
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
Elf32_Addr relocation;
uint32_t *location;
uint32_t value;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
relocation = sym->st_value + rel[i].r_addend;
#ifdef CONFIG_SUPERH64
/* For text addresses, bit2 of the st_other field indicates
* whether the symbol is SHmedia (1) or SHcompact (0). If
* SHmedia, the LSB of the symbol needs to be asserted
* for the CPU to be in SHmedia mode when it starts executing
* the branch target. */
relocation |= !!(sym->st_other & 4);
#endif
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_SH_NONE:
break;
case R_SH_DIR32:
value = get_unaligned(location);
value += relocation;
put_unaligned(value, location);
break;
case R_SH_REL32:
relocation = (relocation - (Elf32_Addr) location);
value = get_unaligned(location);
value += relocation;
put_unaligned(value, location);
break;
case R_SH_IMM_LOW16:
*location = (*location & ~0x3fffc00) |
((relocation & 0xffff) << 10);
break;
case R_SH_IMM_MEDLOW16:
*location = (*location & ~0x3fffc00) |
(((relocation >> 16) & 0xffff) << 10);
break;
case R_SH_IMM_LOW16_PCREL:
relocation -= (Elf32_Addr) location;
*location = (*location & ~0x3fffc00) |
((relocation & 0xffff) << 10);
break;
case R_SH_IMM_MEDLOW16_PCREL:
relocation -= (Elf32_Addr) location;
*location = (*location & ~0x3fffc00) |
(((relocation >> 16) & 0xffff) << 10);
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
int ret = 0;
ret |= module_dwarf_finalize(hdr, sechdrs, me);
return ret;
}
void module_arch_cleanup(struct module *mod)
{
module_dwarf_cleanup(mod);
}
| gpl-2.0 |
FrostBite-Android/isotope-kernel | arch/avr32/boards/hammerhead/flash.c | 11890 | 8518 | /*
* Hammerhead board-specific flash initialization
*
* Copyright (C) 2008 Miromico AG
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/usb/isp116x.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <mach/portmux.h>
#include <mach/at32ap700x.h>
#include <mach/smc.h>
#include "../../mach-at32ap/clock.h"
#include "flash.h"
#define HAMMERHEAD_USB_PERIPH_GCLK0 0x40000000
#define HAMMERHEAD_USB_PERIPH_CS2 0x02000000
#define HAMMERHEAD_USB_PERIPH_EXTINT0 0x02000000
#define HAMMERHEAD_FPGA_PERIPH_MOSI 0x00000002
#define HAMMERHEAD_FPGA_PERIPH_SCK 0x00000020
#define HAMMERHEAD_FPGA_PERIPH_EXTINT3 0x10000000
static struct smc_timing flash_timing __initdata = {
.ncs_read_setup = 0,
.nrd_setup = 40,
.ncs_write_setup = 0,
.nwe_setup = 10,
.ncs_read_pulse = 80,
.nrd_pulse = 40,
.ncs_write_pulse = 65,
.nwe_pulse = 55,
.read_cycle = 120,
.write_cycle = 120,
};
static struct smc_config flash_config __initdata = {
.bus_width = 2,
.nrd_controlled = 1,
.nwe_controlled = 1,
.byte_write = 1,
};
static struct mtd_partition flash_parts[] = {
{
.name = "u-boot",
.offset = 0x00000000,
.size = 0x00020000, /* 128 KiB */
.mask_flags = MTD_WRITEABLE,
},
{
.name = "root",
.offset = 0x00020000,
.size = 0x007d0000,
},
{
.name = "env",
.offset = 0x007f0000,
.size = 0x00010000,
.mask_flags = MTD_WRITEABLE,
},
};
static struct physmap_flash_data flash_data = {
.width = 2,
.nr_parts = ARRAY_SIZE(flash_parts),
.parts = flash_parts,
};
static struct resource flash_resource = {
.start = 0x00000000,
.end = 0x007fffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device flash_device = {
.name = "physmap-flash",
.id = 0,
.resource = &flash_resource,
.num_resources = 1,
.dev = { .platform_data = &flash_data, },
};
#ifdef CONFIG_BOARD_HAMMERHEAD_USB
static struct smc_timing isp1160_timing __initdata = {
.ncs_read_setup = 75,
.nrd_setup = 75,
.ncs_write_setup = 75,
.nwe_setup = 75,
/* We use conservative timing settings, as the minimal settings aren't
stable. There may be room for tweaking. */
.ncs_read_pulse = 75, /* min. 33ns */
.nrd_pulse = 75, /* min. 33ns */
.ncs_write_pulse = 75, /* min. 26ns */
.nwe_pulse = 75, /* min. 26ns */
.read_cycle = 225, /* min. 143ns */
.write_cycle = 225, /* min. 136ns */
};
static struct smc_config isp1160_config __initdata = {
.bus_width = 2,
.nrd_controlled = 1,
.nwe_controlled = 1,
.byte_write = 0,
};
/*
* The platform delay function is only used to enforce the strange
* read to write delay. This can not be configured in the SMC. All other
* timings are controlled by the SMC (see timings obove)
* So in isp116x-hcd.c we should comment out USE_PLATFORM_DELAY
*/
void isp116x_delay(struct device *dev, int delay)
{
if (delay > 150)
ndelay(delay - 150);
}
static struct isp116x_platform_data isp1160_data = {
.sel15Kres = 1, /* use internal downstream resistors */
.oc_enable = 0, /* external overcurrent detection */
.int_edge_triggered = 0, /* interrupt is level triggered */
.int_act_high = 0, /* interrupt is active low */
.delay = isp116x_delay, /* platform delay function */
};
static struct resource isp1160_resource[] = {
{
.start = 0x08000000,
.end = 0x08000001,
.flags = IORESOURCE_MEM,
},
{
.start = 0x08000002,
.end = 0x08000003,
.flags = IORESOURCE_MEM,
},
{
.start = 64,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device isp1160_device = {
.name = "isp116x-hcd",
.id = 0,
.resource = isp1160_resource,
.num_resources = 3,
.dev = {
.platform_data = &isp1160_data,
},
};
#endif
#ifdef CONFIG_BOARD_HAMMERHEAD_USB
static int __init hammerhead_usbh_init(void)
{
struct clk *gclk;
struct clk *osc;
int ret;
/* setup smc for usbh */
smc_set_timing(&isp1160_config, &isp1160_timing);
ret = smc_set_configuration(2, &isp1160_config);
if (ret < 0) {
printk(KERN_ERR
"hammerhead: failed to set ISP1160 USBH timing\n");
return ret;
}
/* setup gclk0 to run from osc1 */
gclk = clk_get(NULL, "gclk0");
if (IS_ERR(gclk))
goto err_gclk;
osc = clk_get(NULL, "osc1");
if (IS_ERR(osc))
goto err_osc;
if (clk_set_parent(gclk, osc)) {
pr_debug("hammerhead: failed to set osc1 for USBH clock\n");
goto err_set_clk;
}
/* set clock to 6MHz */
clk_set_rate(gclk, 6000000);
/* and enable */
clk_enable(gclk);
/* select GCLK0 peripheral function */
at32_select_periph(GPIO_PIOA_BASE, HAMMERHEAD_USB_PERIPH_GCLK0,
GPIO_PERIPH_A, 0);
/* enable CS2 peripheral function */
at32_select_periph(GPIO_PIOE_BASE, HAMMERHEAD_USB_PERIPH_CS2,
GPIO_PERIPH_A, 0);
/* H_WAKEUP must be driven low */
at32_select_gpio(GPIO_PIN_PA(8), AT32_GPIOF_OUTPUT);
/* Select EXTINT0 for PB25 */
at32_select_periph(GPIO_PIOB_BASE, HAMMERHEAD_USB_PERIPH_EXTINT0,
GPIO_PERIPH_A, 0);
/* register usbh device driver */
platform_device_register(&isp1160_device);
err_set_clk:
clk_put(osc);
err_osc:
clk_put(gclk);
err_gclk:
return ret;
}
#endif
#ifdef CONFIG_BOARD_HAMMERHEAD_FPGA
static struct smc_timing fpga_timing __initdata = {
.ncs_read_setup = 16,
.nrd_setup = 32,
.ncs_read_pulse = 48,
.nrd_pulse = 32,
.read_cycle = 64,
.ncs_write_setup = 16,
.nwe_setup = 16,
.ncs_write_pulse = 32,
.nwe_pulse = 32,
.write_cycle = 64,
};
static struct smc_config fpga_config __initdata = {
.bus_width = 4,
.nrd_controlled = 1,
.nwe_controlled = 1,
.byte_write = 0,
};
static struct resource hh_fpga0_resource[] = {
{
.start = 0xffe00400,
.end = 0xffe00400 + 0x3ff,
.flags = IORESOURCE_MEM,
},
{
.start = 4,
.end = 4,
.flags = IORESOURCE_IRQ,
},
{
.start = 0x0c000000,
.end = 0x0c000100,
.flags = IORESOURCE_MEM,
},
{
.start = 67,
.end = 67,
.flags = IORESOURCE_IRQ,
},
};
static u64 hh_fpga0_dma_mask = DMA_BIT_MASK(32);
static struct platform_device hh_fpga0_device = {
.name = "hh_fpga",
.id = 0,
.dev = {
.dma_mask = &hh_fpga0_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = hh_fpga0_resource,
.num_resources = ARRAY_SIZE(hh_fpga0_resource),
};
static struct clk hh_fpga0_spi_clk = {
.name = "spi_clk",
.dev = &hh_fpga0_device.dev,
.mode = pba_clk_mode,
.get_rate = pba_clk_get_rate,
.index = 1,
};
struct platform_device *__init at32_add_device_hh_fpga(void)
{
/* Select peripheral functionallity for SPI SCK and MOSI */
at32_select_periph(GPIO_PIOB_BASE, HAMMERHEAD_FPGA_PERIPH_SCK,
GPIO_PERIPH_B, 0);
at32_select_periph(GPIO_PIOB_BASE, HAMMERHEAD_FPGA_PERIPH_MOSI,
GPIO_PERIPH_B, 0);
/* reserve all other needed gpio
* We have on board pull ups, so there is no need
* to enable gpio pull ups */
/* INIT_DONE (input) */
at32_select_gpio(GPIO_PIN_PB(0), 0);
/* nSTATUS (input) */
at32_select_gpio(GPIO_PIN_PB(2), 0);
/* nCONFIG (output, low) */
at32_select_gpio(GPIO_PIN_PB(3), AT32_GPIOF_OUTPUT);
/* CONF_DONE (input) */
at32_select_gpio(GPIO_PIN_PB(4), 0);
/* Select EXTINT3 for PB28 (Interrupt from FPGA) */
at32_select_periph(GPIO_PIOB_BASE, HAMMERHEAD_FPGA_PERIPH_EXTINT3,
GPIO_PERIPH_A, 0);
/* Get our parent clock */
hh_fpga0_spi_clk.parent = clk_get(NULL, "pba");
clk_put(hh_fpga0_spi_clk.parent);
/* Register clock in at32 clock tree */
at32_clk_register(&hh_fpga0_spi_clk);
platform_device_register(&hh_fpga0_device);
return &hh_fpga0_device;
}
#endif
/* This needs to be called after the SMC has been initialized */
static int __init hammerhead_flash_init(void)
{
int ret;
smc_set_timing(&flash_config, &flash_timing);
ret = smc_set_configuration(0, &flash_config);
if (ret < 0) {
printk(KERN_ERR "hammerhead: failed to set NOR flash timing\n");
return ret;
}
platform_device_register(&flash_device);
#ifdef CONFIG_BOARD_HAMMERHEAD_USB
hammerhead_usbh_init();
#endif
#ifdef CONFIG_BOARD_HAMMERHEAD_FPGA
/* Setup SMC for FPGA interface */
smc_set_timing(&fpga_config, &fpga_timing);
ret = smc_set_configuration(3, &fpga_config);
#endif
if (ret < 0) {
printk(KERN_ERR "hammerhead: failed to set FPGA timing\n");
return ret;
}
return 0;
}
device_initcall(hammerhead_flash_init);
| gpl-2.0 |
franciscofranco/mako | arch/arm/mach-msm/dma.c | 627 | 21838 | /* linux/arch/arm/mach-msm/dma.c
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2008-2010, 2012 The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/pm_runtime.h>
#include <mach/dma.h>
#define MODULE_NAME "msm_dmov"
#define MSM_DMOV_CHANNEL_COUNT 16
#define MSM_DMOV_CRCI_COUNT 16
enum {
CLK_DIS,
CLK_TO_BE_DIS,
CLK_EN
};
struct msm_dmov_ci_conf {
int start;
int end;
int burst;
};
struct msm_dmov_crci_conf {
int sd;
int blk_size;
};
struct msm_dmov_chan_conf {
int sd;
int block;
int priority;
};
struct msm_dmov_conf {
void *base;
struct msm_dmov_crci_conf *crci_conf;
struct msm_dmov_chan_conf *chan_conf;
int channel_active;
int sd;
size_t sd_size;
struct list_head staged_commands[MSM_DMOV_CHANNEL_COUNT];
struct list_head ready_commands[MSM_DMOV_CHANNEL_COUNT];
struct list_head active_commands[MSM_DMOV_CHANNEL_COUNT];
struct mutex lock;
spinlock_t list_lock;
unsigned int irq;
struct clk *clk;
struct clk *pclk;
struct clk *ebiclk;
unsigned int clk_ctl;
struct delayed_work work;
struct workqueue_struct *cmd_wq;
};
static void msm_dmov_clock_work(struct work_struct *);
#ifdef CONFIG_ARCH_MSM8X60
#define DMOV_CHANNEL_DEFAULT_CONF { .sd = 1, .block = 0, .priority = 0 }
#define DMOV_CHANNEL_MODEM_CONF { .sd = 3, .block = 0, .priority = 0 }
#define DMOV_CHANNEL_CONF(secd, blk, pri) \
{ .sd = secd, .block = blk, .priority = pri }
static struct msm_dmov_chan_conf adm0_chan_conf[] = {
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_MODEM_CONF,
DMOV_CHANNEL_MODEM_CONF,
DMOV_CHANNEL_MODEM_CONF,
DMOV_CHANNEL_MODEM_CONF,
DMOV_CHANNEL_MODEM_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
};
static struct msm_dmov_chan_conf adm1_chan_conf[] = {
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_DEFAULT_CONF,
DMOV_CHANNEL_MODEM_CONF,
DMOV_CHANNEL_MODEM_CONF,
DMOV_CHANNEL_MODEM_CONF,
DMOV_CHANNEL_MODEM_CONF,
DMOV_CHANNEL_MODEM_CONF,
DMOV_CHANNEL_MODEM_CONF,
};
#define DMOV_CRCI_DEFAULT_CONF { .sd = 1, .blk_size = 0 }
#define DMOV_CRCI_CONF(secd, blk) { .sd = secd, .blk_size = blk }
static struct msm_dmov_crci_conf adm0_crci_conf[] = {
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_CONF(1, 4),
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
};
static struct msm_dmov_crci_conf adm1_crci_conf[] = {
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_CONF(1, 1),
DMOV_CRCI_CONF(1, 1),
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_CONF(1, 1),
DMOV_CRCI_CONF(1, 1),
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_DEFAULT_CONF,
DMOV_CRCI_CONF(1, 1),
DMOV_CRCI_DEFAULT_CONF,
};
static struct msm_dmov_conf dmov_conf[] = {
{
.crci_conf = adm0_crci_conf,
.chan_conf = adm0_chan_conf,
.lock = __MUTEX_INITIALIZER(dmov_conf[0].lock),
.list_lock = __SPIN_LOCK_UNLOCKED(dmov_list_lock),
.clk_ctl = CLK_DIS,
.work = __DELAYED_WORK_INITIALIZER(dmov_conf[0].work,
msm_dmov_clock_work),
}, {
.crci_conf = adm1_crci_conf,
.chan_conf = adm1_chan_conf,
.lock = __MUTEX_INITIALIZER(dmov_conf[1].lock),
.list_lock = __SPIN_LOCK_UNLOCKED(dmov_list_lock),
.clk_ctl = CLK_DIS,
.work = __DELAYED_WORK_INITIALIZER(dmov_conf[1].work,
msm_dmov_clock_work),
}
};
#else
static struct msm_dmov_conf dmov_conf[] = {
{
.crci_conf = NULL,
.chan_conf = NULL,
.lock = __MUTEX_INITIALIZER(dmov_conf[0].lock),
.list_lock = __SPIN_LOCK_UNLOCKED(dmov_list_lock),
.clk_ctl = CLK_DIS,
.work = __DELAYED_WORK_INITIALIZER(dmov_conf[0].work,
msm_dmov_clock_work),
}
};
#endif
#define MSM_DMOV_ID_COUNT (MSM_DMOV_CHANNEL_COUNT * ARRAY_SIZE(dmov_conf))
#define DMOV_REG(name, adm) ((name) + (dmov_conf[adm].base) +\
(dmov_conf[adm].sd * dmov_conf[adm].sd_size))
#define DMOV_ID_TO_ADM(id) ((id) / MSM_DMOV_CHANNEL_COUNT)
#define DMOV_ID_TO_CHAN(id) ((id) % MSM_DMOV_CHANNEL_COUNT)
#define DMOV_CHAN_ADM_TO_ID(ch, adm) ((ch) + (adm) * MSM_DMOV_CHANNEL_COUNT)
#ifdef CONFIG_MSM_ADM3
#define DMOV_IRQ_TO_ADM(irq) \
({ \
typeof(irq) _irq = irq; \
((_irq == INT_ADM1_MASTER) || (_irq == INT_ADM1_AARM)); \
})
#else
#define DMOV_IRQ_TO_ADM(irq) 0
#endif
enum {
MSM_DMOV_PRINT_ERRORS = 1,
MSM_DMOV_PRINT_IO = 2,
MSM_DMOV_PRINT_FLOW = 4
};
unsigned int msm_dmov_print_mask = MSM_DMOV_PRINT_ERRORS;
#define MSM_DMOV_DPRINTF(mask, format, args...) \
do { \
if ((mask) & msm_dmov_print_mask) \
printk(KERN_ERR format, args); \
} while (0)
#define PRINT_ERROR(format, args...) \
MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_ERRORS, format, args);
#define PRINT_IO(format, args...) \
MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_IO, format, args);
#define PRINT_FLOW(format, args...) \
MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_FLOW, format, args);
static int msm_dmov_clk_on(int adm)
{
int ret;
ret = clk_prepare_enable(dmov_conf[adm].clk);
if (ret)
return ret;
if (dmov_conf[adm].pclk) {
ret = clk_prepare_enable(dmov_conf[adm].pclk);
if (ret) {
clk_disable_unprepare(dmov_conf[adm].clk);
return ret;
}
}
if (dmov_conf[adm].ebiclk) {
ret = clk_prepare_enable(dmov_conf[adm].ebiclk);
if (ret) {
if (dmov_conf[adm].pclk)
clk_disable_unprepare(dmov_conf[adm].pclk);
clk_disable_unprepare(dmov_conf[adm].clk);
}
}
return ret;
}
static void msm_dmov_clk_off(int adm)
{
if (dmov_conf[adm].ebiclk)
clk_disable_unprepare(dmov_conf[adm].ebiclk);
if (dmov_conf[adm].pclk)
clk_disable_unprepare(dmov_conf[adm].pclk);
clk_disable_unprepare(dmov_conf[adm].clk);
}
static void msm_dmov_clock_work(struct work_struct *work)
{
struct msm_dmov_conf *conf =
container_of(to_delayed_work(work), struct msm_dmov_conf, work);
int adm = DMOV_IRQ_TO_ADM(conf->irq);
mutex_lock(&conf->lock);
if (conf->clk_ctl == CLK_TO_BE_DIS) {
BUG_ON(conf->channel_active);
msm_dmov_clk_off(adm);
conf->clk_ctl = CLK_DIS;
}
mutex_unlock(&conf->lock);
}
enum {
NOFLUSH = 0,
GRACEFUL,
NONGRACEFUL,
};
/* Caller must hold the list lock */
static struct msm_dmov_cmd *start_ready_cmd(unsigned ch, int adm)
{
struct msm_dmov_cmd *cmd;
if (list_empty(&dmov_conf[adm].ready_commands[ch]))
return NULL;
cmd = list_entry(dmov_conf[adm].ready_commands[ch].next, typeof(*cmd),
list);
list_del(&cmd->list);
if (cmd->exec_func)
cmd->exec_func(cmd);
list_add_tail(&cmd->list, &dmov_conf[adm].active_commands[ch]);
if (!dmov_conf[adm].channel_active)
enable_irq(dmov_conf[adm].irq);
dmov_conf[adm].channel_active |= BIT(ch);
PRINT_IO("msm dmov enqueue command, %x, ch %d\n", cmd->cmdptr, ch);
writel_relaxed(cmd->cmdptr, DMOV_REG(DMOV_CMD_PTR(ch), adm));
return cmd;
}
static void msm_dmov_enqueue_cmd_ext_work(struct work_struct *work)
{
struct msm_dmov_cmd *cmd =
container_of(work, struct msm_dmov_cmd, work);
unsigned id = cmd->id;
unsigned status;
unsigned long flags;
int adm = DMOV_ID_TO_ADM(id);
int ch = DMOV_ID_TO_CHAN(id);
mutex_lock(&dmov_conf[adm].lock);
if (dmov_conf[adm].clk_ctl == CLK_DIS) {
status = msm_dmov_clk_on(adm);
if (status != 0)
goto error;
}
dmov_conf[adm].clk_ctl = CLK_EN;
spin_lock_irqsave(&dmov_conf[adm].list_lock, flags);
cmd = list_entry(dmov_conf[adm].staged_commands[ch].next, typeof(*cmd),
list);
list_del(&cmd->list);
list_add_tail(&cmd->list, &dmov_conf[adm].ready_commands[ch]);
status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
if (status & DMOV_STATUS_CMD_PTR_RDY) {
PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n",
id, status);
cmd = start_ready_cmd(ch, adm);
/*
* We added something to the ready list, and still hold the
* list lock. Thus, no need to check for cmd == NULL
*/
if (cmd->toflush) {
int flush = (cmd->toflush == GRACEFUL) ? 1 << 31 : 0;
writel_relaxed(flush, DMOV_REG(DMOV_FLUSH0(ch), adm));
}
} else {
cmd->toflush = 0;
if (list_empty(&dmov_conf[adm].active_commands[ch]) &&
!list_empty(&dmov_conf[adm].ready_commands[ch]))
PRINT_ERROR("msm_dmov_enqueue_cmd_ext(%d), stalled, "
"status %x\n", id, status);
PRINT_IO("msm_dmov_enqueue_cmd(%d), enqueue command, status "
"%x\n", id, status);
}
if (!dmov_conf[adm].channel_active) {
dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
schedule_delayed_work(&dmov_conf[adm].work, (HZ/10));
}
spin_unlock_irqrestore(&dmov_conf[adm].list_lock, flags);
error:
mutex_unlock(&dmov_conf[adm].lock);
}
static void __msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
{
int adm = DMOV_ID_TO_ADM(id);
int ch = DMOV_ID_TO_CHAN(id);
unsigned long flags;
cmd->id = id;
cmd->toflush = 0;
spin_lock_irqsave(&dmov_conf[adm].list_lock, flags);
list_add_tail(&cmd->list, &dmov_conf[adm].staged_commands[ch]);
spin_unlock_irqrestore(&dmov_conf[adm].list_lock, flags);
queue_work(dmov_conf[adm].cmd_wq, &cmd->work);
}
void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
{
INIT_WORK(&cmd->work, msm_dmov_enqueue_cmd_ext_work);
__msm_dmov_enqueue_cmd_ext(id, cmd);
}
EXPORT_SYMBOL(msm_dmov_enqueue_cmd_ext);
void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
{
/* Disable callback function (for backwards compatibility) */
cmd->exec_func = NULL;
INIT_WORK(&cmd->work, msm_dmov_enqueue_cmd_ext_work);
__msm_dmov_enqueue_cmd_ext(id, cmd);
}
EXPORT_SYMBOL(msm_dmov_enqueue_cmd);
void msm_dmov_flush(unsigned int id, int graceful)
{
unsigned long irq_flags;
int ch = DMOV_ID_TO_CHAN(id);
int adm = DMOV_ID_TO_ADM(id);
int flush = graceful ? DMOV_FLUSH_TYPE : 0;
struct msm_dmov_cmd *cmd;
spin_lock_irqsave(&dmov_conf[adm].list_lock, irq_flags);
/* XXX not checking if flush cmd sent already */
if (!list_empty(&dmov_conf[adm].active_commands[ch])) {
PRINT_IO("msm_dmov_flush(%d), send flush cmd\n", id);
writel_relaxed(flush, DMOV_REG(DMOV_FLUSH0(ch), adm));
}
list_for_each_entry(cmd, &dmov_conf[adm].staged_commands[ch], list)
cmd->toflush = graceful ? GRACEFUL : NONGRACEFUL;
/* spin_unlock_irqrestore has the necessary barrier */
spin_unlock_irqrestore(&dmov_conf[adm].list_lock, irq_flags);
}
EXPORT_SYMBOL(msm_dmov_flush);
struct msm_dmov_exec_cmdptr_cmd {
struct msm_dmov_cmd dmov_cmd;
struct completion complete;
unsigned id;
unsigned int result;
struct msm_dmov_errdata err;
};
static void
dmov_exec_cmdptr_complete_func(struct msm_dmov_cmd *_cmd,
unsigned int result,
struct msm_dmov_errdata *err)
{
struct msm_dmov_exec_cmdptr_cmd *cmd = container_of(_cmd, struct msm_dmov_exec_cmdptr_cmd, dmov_cmd);
cmd->result = result;
if (result != 0x80000002 && err)
memcpy(&cmd->err, err, sizeof(struct msm_dmov_errdata));
complete(&cmd->complete);
}
int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr)
{
struct msm_dmov_exec_cmdptr_cmd cmd;
PRINT_FLOW("dmov_exec_cmdptr(%d, %x)\n", id, cmdptr);
cmd.dmov_cmd.cmdptr = cmdptr;
cmd.dmov_cmd.complete_func = dmov_exec_cmdptr_complete_func;
cmd.dmov_cmd.exec_func = NULL;
cmd.id = id;
cmd.result = 0;
INIT_WORK_ONSTACK(&cmd.dmov_cmd.work, msm_dmov_enqueue_cmd_ext_work);
init_completion(&cmd.complete);
__msm_dmov_enqueue_cmd_ext(id, &cmd.dmov_cmd);
wait_for_completion_io(&cmd.complete);
if (cmd.result != 0x80000002) {
PRINT_ERROR("dmov_exec_cmdptr(%d): ERROR, result: %x\n", id, cmd.result);
PRINT_ERROR("dmov_exec_cmdptr(%d): flush: %x %x %x %x\n",
id, cmd.err.flush[0], cmd.err.flush[1], cmd.err.flush[2], cmd.err.flush[3]);
return -EIO;
}
PRINT_FLOW("dmov_exec_cmdptr(%d, %x) done\n", id, cmdptr);
return 0;
}
EXPORT_SYMBOL(msm_dmov_exec_cmd);
static void fill_errdata(struct msm_dmov_errdata *errdata, int ch, int adm)
{
errdata->flush[0] = readl_relaxed(DMOV_REG(DMOV_FLUSH0(ch), adm));
errdata->flush[1] = readl_relaxed(DMOV_REG(DMOV_FLUSH1(ch), adm));
errdata->flush[2] = 0;
errdata->flush[3] = readl_relaxed(DMOV_REG(DMOV_FLUSH3(ch), adm));
errdata->flush[4] = readl_relaxed(DMOV_REG(DMOV_FLUSH4(ch), adm));
errdata->flush[5] = readl_relaxed(DMOV_REG(DMOV_FLUSH5(ch), adm));
}
static irqreturn_t msm_dmov_isr(int irq, void *dev_id)
{
unsigned int int_status;
unsigned int mask;
unsigned int id;
unsigned int ch;
unsigned long irq_flags;
unsigned int ch_status;
unsigned int ch_result;
unsigned int valid = 0;
struct msm_dmov_cmd *cmd;
int adm = DMOV_IRQ_TO_ADM(irq);
mutex_lock(&dmov_conf[adm].lock);
/* read and clear isr */
int_status = readl_relaxed(DMOV_REG(DMOV_ISR, adm));
PRINT_FLOW("msm_datamover_irq_handler: DMOV_ISR %x\n", int_status);
spin_lock_irqsave(&dmov_conf[adm].list_lock, irq_flags);
while (int_status) {
mask = int_status & -int_status;
ch = fls(mask) - 1;
id = DMOV_CHAN_ADM_TO_ID(ch, adm);
PRINT_FLOW("msm_datamover_irq_handler %08x %08x id %d\n", int_status, mask, id);
int_status &= ~mask;
ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
if (!(ch_status & DMOV_STATUS_RSLT_VALID)) {
PRINT_FLOW("msm_datamover_irq_handler id %d, "
"result not valid %x\n", id, ch_status);
continue;
}
do {
valid = 1;
ch_result = readl_relaxed(DMOV_REG(DMOV_RSLT(ch), adm));
if (list_empty(&dmov_conf[adm].active_commands[ch])) {
PRINT_ERROR("msm_datamover_irq_handler id %d, got result "
"with no active command, status %x, result %x\n",
id, ch_status, ch_result);
cmd = NULL;
} else {
cmd = list_entry(dmov_conf[adm].
active_commands[ch].next, typeof(*cmd),
list);
}
PRINT_FLOW("msm_datamover_irq_handler id %d, status %x, result %x\n", id, ch_status, ch_result);
if (ch_result & DMOV_RSLT_DONE) {
PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n",
id, ch_status);
PRINT_IO("msm_datamover_irq_handler id %d, got result "
"for %p, result %x\n", id, cmd, ch_result);
if (cmd) {
list_del(&cmd->list);
cmd->complete_func(cmd, ch_result, NULL);
}
}
if (ch_result & DMOV_RSLT_FLUSH) {
struct msm_dmov_errdata errdata;
fill_errdata(&errdata, ch, adm);
PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
PRINT_FLOW("msm_datamover_irq_handler id %d, flush, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
if (cmd) {
list_del(&cmd->list);
cmd->complete_func(cmd, ch_result, &errdata);
}
}
if (ch_result & DMOV_RSLT_ERROR) {
struct msm_dmov_errdata errdata;
fill_errdata(&errdata, ch, adm);
PRINT_ERROR("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
PRINT_ERROR("msm_datamover_irq_handler id %d, error, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
if (cmd) {
list_del(&cmd->list);
cmd->complete_func(cmd, ch_result, &errdata);
}
/* this does not seem to work, once we get an error */
/* the datamover will no longer accept commands */
writel_relaxed(0, DMOV_REG(DMOV_FLUSH0(ch),
adm));
}
rmb();
ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch),
adm));
PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
if (ch_status & DMOV_STATUS_CMD_PTR_RDY)
start_ready_cmd(ch, adm);
} while (ch_status & DMOV_STATUS_RSLT_VALID);
if (list_empty(&dmov_conf[adm].active_commands[ch]) &&
list_empty(&dmov_conf[adm].ready_commands[ch]))
dmov_conf[adm].channel_active &= ~(1U << ch);
PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
}
spin_unlock_irqrestore(&dmov_conf[adm].list_lock, irq_flags);
if (!dmov_conf[adm].channel_active && valid) {
disable_irq_nosync(dmov_conf[adm].irq);
dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
schedule_delayed_work(&dmov_conf[adm].work, (HZ/10));
}
mutex_unlock(&dmov_conf[adm].lock);
return valid ? IRQ_HANDLED : IRQ_NONE;
}
static int msm_dmov_suspend_late(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
int adm = (pdev->id >= 0) ? pdev->id : 0;
mutex_lock(&dmov_conf[adm].lock);
if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS) {
BUG_ON(dmov_conf[adm].channel_active);
msm_dmov_clk_off(adm);
dmov_conf[adm].clk_ctl = CLK_DIS;
}
mutex_unlock(&dmov_conf[adm].lock);
return 0;
}
static int msm_dmov_runtime_suspend(struct device *dev)
{
dev_dbg(dev, "pm_runtime: suspending...\n");
return 0;
}
static int msm_dmov_runtime_resume(struct device *dev)
{
dev_dbg(dev, "pm_runtime: resuming...\n");
return 0;
}
static int msm_dmov_runtime_idle(struct device *dev)
{
dev_dbg(dev, "pm_runtime: idling...\n");
return 0;
}
static struct dev_pm_ops msm_dmov_dev_pm_ops = {
.runtime_suspend = msm_dmov_runtime_suspend,
.runtime_resume = msm_dmov_runtime_resume,
.runtime_idle = msm_dmov_runtime_idle,
.suspend = msm_dmov_suspend_late,
};
static int msm_dmov_init_clocks(struct platform_device *pdev)
{
int adm = (pdev->id >= 0) ? pdev->id : 0;
int ret;
dmov_conf[adm].clk = clk_get(&pdev->dev, "core_clk");
if (IS_ERR(dmov_conf[adm].clk)) {
printk(KERN_ERR "%s: Error getting adm_clk\n", __func__);
dmov_conf[adm].clk = NULL;
return -ENOENT;
}
dmov_conf[adm].pclk = clk_get(&pdev->dev, "iface_clk");
if (IS_ERR(dmov_conf[adm].pclk)) {
dmov_conf[adm].pclk = NULL;
/* pclk not present on all SoCs, don't bail on failure */
}
dmov_conf[adm].ebiclk = clk_get(&pdev->dev, "mem_clk");
if (IS_ERR(dmov_conf[adm].ebiclk)) {
dmov_conf[adm].ebiclk = NULL;
/* ebiclk not present on all SoCs, don't bail on failure */
} else {
ret = clk_set_rate(dmov_conf[adm].ebiclk, 27000000);
if (ret)
return -ENOENT;
}
return 0;
}
static void config_datamover(int adm)
{
#ifdef CONFIG_MSM_ADM3
int i;
for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
struct msm_dmov_chan_conf *chan_conf =
dmov_conf[adm].chan_conf;
unsigned conf;
/* Only configure scorpion channels */
if (chan_conf[i].sd <= 1) {
conf = readl_relaxed(DMOV_REG(DMOV_CONF(i), adm));
conf &= ~DMOV_CONF_SD(7);
conf |= DMOV_CONF_SD(chan_conf[i].sd);
writel_relaxed(conf | DMOV_CONF_SHADOW_EN,
DMOV_REG(DMOV_CONF(i), adm));
}
}
for (i = 0; i < MSM_DMOV_CRCI_COUNT; i++) {
struct msm_dmov_crci_conf *crci_conf =
dmov_conf[adm].crci_conf;
writel_relaxed(DMOV_CRCI_CTL_BLK_SZ(crci_conf[i].blk_size),
DMOV_REG(DMOV_CRCI_CTL(i), adm));
}
#endif
}
static int msm_dmov_probe(struct platform_device *pdev)
{
int adm = (pdev->id >= 0) ? pdev->id : 0;
int i;
int ret;
struct msm_dmov_pdata *pdata = pdev->dev.platform_data;
struct resource *irqres =
platform_get_resource(pdev, IORESOURCE_IRQ, 0);
struct resource *mres =
platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (pdata) {
dmov_conf[adm].sd = pdata->sd;
dmov_conf[adm].sd_size = pdata->sd_size;
}
if (!dmov_conf[adm].sd_size)
return -ENXIO;
if (!irqres || !irqres->start)
return -ENXIO;
dmov_conf[adm].irq = irqres->start;
if (!mres || !mres->start)
return -ENXIO;
dmov_conf[adm].base = ioremap_nocache(mres->start, resource_size(mres));
if (!dmov_conf[adm].base)
return -ENOMEM;
dmov_conf[adm].cmd_wq = alloc_ordered_workqueue("dmov%d_wq", 0, adm);
if (!dmov_conf[adm].cmd_wq) {
PRINT_ERROR("Couldn't allocate ADM%d workqueue.\n", adm);
ret = -ENOMEM;
goto out_map;
}
ret = request_threaded_irq(dmov_conf[adm].irq, NULL, msm_dmov_isr,
IRQF_ONESHOT, "msmdatamover", NULL);
if (ret) {
PRINT_ERROR("Requesting ADM%d irq %d failed\n", adm,
dmov_conf[adm].irq);
goto out_wq;
}
disable_irq(dmov_conf[adm].irq);
ret = msm_dmov_init_clocks(pdev);
if (ret) {
PRINT_ERROR("Requesting ADM%d clocks failed\n", adm);
goto out_irq;
}
ret = msm_dmov_clk_on(adm);
if (ret) {
PRINT_ERROR("Enabling ADM%d clocks failed\n", adm);
goto out_irq;
}
config_datamover(adm);
for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
INIT_LIST_HEAD(&dmov_conf[adm].staged_commands[i]);
INIT_LIST_HEAD(&dmov_conf[adm].ready_commands[i]);
INIT_LIST_HEAD(&dmov_conf[adm].active_commands[i]);
writel_relaxed(DMOV_RSLT_CONF_IRQ_EN
| DMOV_RSLT_CONF_FORCE_FLUSH_RSLT,
DMOV_REG(DMOV_RSLT_CONF(i), adm));
}
wmb();
msm_dmov_clk_off(adm);
return ret;
out_irq:
free_irq(dmov_conf[adm].irq, NULL);
out_wq:
destroy_workqueue(dmov_conf[adm].cmd_wq);
out_map:
iounmap(dmov_conf[adm].base);
return ret;
}
static struct platform_driver msm_dmov_driver = {
.probe = msm_dmov_probe,
.driver = {
.name = MODULE_NAME,
.owner = THIS_MODULE,
.pm = &msm_dmov_dev_pm_ops,
},
};
/* static int __init */
static int __init msm_init_datamover(void)
{
int ret;
ret = platform_driver_register(&msm_dmov_driver);
if (ret)
return ret;
return 0;
}
arch_initcall(msm_init_datamover);
| gpl-2.0 |
Blefish/kernel_common | drivers/media/usb/dvb-usb/az6027.c | 627 | 27928 | /* DVB USB compliant Linux driver for the AZUREWAVE DVB-S/S2 USB2.0 (AZ6027)
* receiver.
*
* Copyright (C) 2009 Adams.Xu <adams.xu@azwave.com.cn>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation, version 2.
*
* see Documentation/dvb/README.dvb-usb for more information
*/
#include "az6027.h"
#include "stb0899_drv.h"
#include "stb0899_reg.h"
#include "stb0899_cfg.h"
#include "stb6100.h"
#include "stb6100_cfg.h"
#include "dvb_ca_en50221.h"
int dvb_usb_az6027_debug;
module_param_named(debug, dvb_usb_az6027_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2,rc=4 (or-able))." DVB_USB_DEBUG_STATUS);
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct az6027_device_state {
struct dvb_ca_en50221 ca;
struct mutex ca_mutex;
u8 power_state;
};
static const struct stb0899_s1_reg az6027_stb0899_s1_init_1[] = {
/* 0x0000000b, SYSREG */
{ STB0899_DEV_ID , 0x30 },
{ STB0899_DISCNTRL1 , 0x32 },
{ STB0899_DISCNTRL2 , 0x80 },
{ STB0899_DISRX_ST0 , 0x04 },
{ STB0899_DISRX_ST1 , 0x00 },
{ STB0899_DISPARITY , 0x00 },
{ STB0899_DISSTATUS , 0x20 },
{ STB0899_DISF22 , 0x99 },
{ STB0899_DISF22RX , 0xa8 },
/* SYSREG ? */
{ STB0899_ACRPRESC , 0x11 },
{ STB0899_ACRDIV1 , 0x0a },
{ STB0899_ACRDIV2 , 0x05 },
{ STB0899_DACR1 , 0x00 },
{ STB0899_DACR2 , 0x00 },
{ STB0899_OUTCFG , 0x00 },
{ STB0899_MODECFG , 0x00 },
{ STB0899_IRQSTATUS_3 , 0xfe },
{ STB0899_IRQSTATUS_2 , 0x03 },
{ STB0899_IRQSTATUS_1 , 0x7c },
{ STB0899_IRQSTATUS_0 , 0xf4 },
{ STB0899_IRQMSK_3 , 0xf3 },
{ STB0899_IRQMSK_2 , 0xfc },
{ STB0899_IRQMSK_1 , 0xff },
{ STB0899_IRQMSK_0 , 0xff },
{ STB0899_IRQCFG , 0x00 },
{ STB0899_I2CCFG , 0x88 },
{ STB0899_I2CRPT , 0x58 },
{ STB0899_IOPVALUE5 , 0x00 },
{ STB0899_IOPVALUE4 , 0x33 },
{ STB0899_IOPVALUE3 , 0x6d },
{ STB0899_IOPVALUE2 , 0x90 },
{ STB0899_IOPVALUE1 , 0x60 },
{ STB0899_IOPVALUE0 , 0x00 },
{ STB0899_GPIO00CFG , 0x82 },
{ STB0899_GPIO01CFG , 0x82 },
{ STB0899_GPIO02CFG , 0x82 },
{ STB0899_GPIO03CFG , 0x82 },
{ STB0899_GPIO04CFG , 0x82 },
{ STB0899_GPIO05CFG , 0x82 },
{ STB0899_GPIO06CFG , 0x82 },
{ STB0899_GPIO07CFG , 0x82 },
{ STB0899_GPIO08CFG , 0x82 },
{ STB0899_GPIO09CFG , 0x82 },
{ STB0899_GPIO10CFG , 0x82 },
{ STB0899_GPIO11CFG , 0x82 },
{ STB0899_GPIO12CFG , 0x82 },
{ STB0899_GPIO13CFG , 0x82 },
{ STB0899_GPIO14CFG , 0x82 },
{ STB0899_GPIO15CFG , 0x82 },
{ STB0899_GPIO16CFG , 0x82 },
{ STB0899_GPIO17CFG , 0x82 },
{ STB0899_GPIO18CFG , 0x82 },
{ STB0899_GPIO19CFG , 0x82 },
{ STB0899_GPIO20CFG , 0x82 },
{ STB0899_SDATCFG , 0xb8 },
{ STB0899_SCLTCFG , 0xba },
{ STB0899_AGCRFCFG , 0x1c }, /* 0x11 */
{ STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */
{ STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */
{ STB0899_DIRCLKCFG , 0x82 },
{ STB0899_CLKOUT27CFG , 0x7e },
{ STB0899_STDBYCFG , 0x82 },
{ STB0899_CS0CFG , 0x82 },
{ STB0899_CS1CFG , 0x82 },
{ STB0899_DISEQCOCFG , 0x20 },
{ STB0899_GPIO32CFG , 0x82 },
{ STB0899_GPIO33CFG , 0x82 },
{ STB0899_GPIO34CFG , 0x82 },
{ STB0899_GPIO35CFG , 0x82 },
{ STB0899_GPIO36CFG , 0x82 },
{ STB0899_GPIO37CFG , 0x82 },
{ STB0899_GPIO38CFG , 0x82 },
{ STB0899_GPIO39CFG , 0x82 },
{ STB0899_NCOARSE , 0x17 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */
{ STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */
{ STB0899_FILTCTRL , 0x00 },
{ STB0899_SYSCTRL , 0x01 },
{ STB0899_STOPCLK1 , 0x20 },
{ STB0899_STOPCLK2 , 0x00 },
{ STB0899_INTBUFSTATUS , 0x00 },
{ STB0899_INTBUFCTRL , 0x0a },
{ 0xffff , 0xff },
};
static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = {
{ STB0899_DEMOD , 0x00 },
{ STB0899_RCOMPC , 0xc9 },
{ STB0899_AGC1CN , 0x01 },
{ STB0899_AGC1REF , 0x10 },
{ STB0899_RTC , 0x23 },
{ STB0899_TMGCFG , 0x4e },
{ STB0899_AGC2REF , 0x34 },
{ STB0899_TLSR , 0x84 },
{ STB0899_CFD , 0xf7 },
{ STB0899_ACLC , 0x87 },
{ STB0899_BCLC , 0x94 },
{ STB0899_EQON , 0x41 },
{ STB0899_LDT , 0xf1 },
{ STB0899_LDT2 , 0xe3 },
{ STB0899_EQUALREF , 0xb4 },
{ STB0899_TMGRAMP , 0x10 },
{ STB0899_TMGTHD , 0x30 },
{ STB0899_IDCCOMP , 0xfd },
{ STB0899_QDCCOMP , 0xff },
{ STB0899_POWERI , 0x0c },
{ STB0899_POWERQ , 0x0f },
{ STB0899_RCOMP , 0x6c },
{ STB0899_AGCIQIN , 0x80 },
{ STB0899_AGC2I1 , 0x06 },
{ STB0899_AGC2I2 , 0x00 },
{ STB0899_TLIR , 0x30 },
{ STB0899_RTF , 0x7f },
{ STB0899_DSTATUS , 0x00 },
{ STB0899_LDI , 0xbc },
{ STB0899_CFRM , 0xea },
{ STB0899_CFRL , 0x31 },
{ STB0899_NIRM , 0x2b },
{ STB0899_NIRL , 0x80 },
{ STB0899_ISYMB , 0x1d },
{ STB0899_QSYMB , 0xa6 },
{ STB0899_SFRH , 0x2f },
{ STB0899_SFRM , 0x68 },
{ STB0899_SFRL , 0x40 },
{ STB0899_SFRUPH , 0x2f },
{ STB0899_SFRUPM , 0x68 },
{ STB0899_SFRUPL , 0x40 },
{ STB0899_EQUAI1 , 0x02 },
{ STB0899_EQUAQ1 , 0xff },
{ STB0899_EQUAI2 , 0x04 },
{ STB0899_EQUAQ2 , 0x05 },
{ STB0899_EQUAI3 , 0x02 },
{ STB0899_EQUAQ3 , 0xfd },
{ STB0899_EQUAI4 , 0x03 },
{ STB0899_EQUAQ4 , 0x07 },
{ STB0899_EQUAI5 , 0x08 },
{ STB0899_EQUAQ5 , 0xf5 },
{ STB0899_DSTATUS2 , 0x00 },
{ STB0899_VSTATUS , 0x00 },
{ STB0899_VERROR , 0x86 },
{ STB0899_IQSWAP , 0x2a },
{ STB0899_ECNT1M , 0x00 },
{ STB0899_ECNT1L , 0x00 },
{ STB0899_ECNT2M , 0x00 },
{ STB0899_ECNT2L , 0x00 },
{ STB0899_ECNT3M , 0x0a },
{ STB0899_ECNT3L , 0xad },
{ STB0899_FECAUTO1 , 0x06 },
{ STB0899_FECM , 0x01 },
{ STB0899_VTH12 , 0xb0 },
{ STB0899_VTH23 , 0x7a },
{ STB0899_VTH34 , 0x58 },
{ STB0899_VTH56 , 0x38 },
{ STB0899_VTH67 , 0x34 },
{ STB0899_VTH78 , 0x24 },
{ STB0899_PRVIT , 0xff },
{ STB0899_VITSYNC , 0x19 },
{ STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
{ STB0899_TSULC , 0x42 },
{ STB0899_RSLLC , 0x41 },
{ STB0899_TSLPL , 0x12 },
{ STB0899_TSCFGH , 0x0c },
{ STB0899_TSCFGM , 0x00 },
{ STB0899_TSCFGL , 0x00 },
{ STB0899_TSOUT , 0x69 }, /* 0x0d for CAM */
{ STB0899_RSSYNCDEL , 0x00 },
{ STB0899_TSINHDELH , 0x02 },
{ STB0899_TSINHDELM , 0x00 },
{ STB0899_TSINHDELL , 0x00 },
{ STB0899_TSLLSTKM , 0x1b },
{ STB0899_TSLLSTKL , 0xb3 },
{ STB0899_TSULSTKM , 0x00 },
{ STB0899_TSULSTKL , 0x00 },
{ STB0899_PCKLENUL , 0xbc },
{ STB0899_PCKLENLL , 0xcc },
{ STB0899_RSPCKLEN , 0xbd },
{ STB0899_TSSTATUS , 0x90 },
{ STB0899_ERRCTRL1 , 0xb6 },
{ STB0899_ERRCTRL2 , 0x95 },
{ STB0899_ERRCTRL3 , 0x8d },
{ STB0899_DMONMSK1 , 0x27 },
{ STB0899_DMONMSK0 , 0x03 },
{ STB0899_DEMAPVIT , 0x5c },
{ STB0899_PLPARM , 0x19 },
{ STB0899_PDELCTRL , 0x48 },
{ STB0899_PDELCTRL2 , 0x00 },
{ STB0899_BBHCTRL1 , 0x00 },
{ STB0899_BBHCTRL2 , 0x00 },
{ STB0899_HYSTTHRESH , 0x77 },
{ STB0899_MATCSTM , 0x00 },
{ STB0899_MATCSTL , 0x00 },
{ STB0899_UPLCSTM , 0x00 },
{ STB0899_UPLCSTL , 0x00 },
{ STB0899_DFLCSTM , 0x00 },
{ STB0899_DFLCSTL , 0x00 },
{ STB0899_SYNCCST , 0x00 },
{ STB0899_SYNCDCSTM , 0x00 },
{ STB0899_SYNCDCSTL , 0x00 },
{ STB0899_ISI_ENTRY , 0x00 },
{ STB0899_ISI_BIT_EN , 0x00 },
{ STB0899_MATSTRM , 0xf0 },
{ STB0899_MATSTRL , 0x02 },
{ STB0899_UPLSTRM , 0x45 },
{ STB0899_UPLSTRL , 0x60 },
{ STB0899_DFLSTRM , 0xe3 },
{ STB0899_DFLSTRL , 0x00 },
{ STB0899_SYNCSTR , 0x47 },
{ STB0899_SYNCDSTRM , 0x05 },
{ STB0899_SYNCDSTRL , 0x18 },
{ STB0899_CFGPDELSTATUS1 , 0x19 },
{ STB0899_CFGPDELSTATUS2 , 0x2b },
{ STB0899_BBFERRORM , 0x00 },
{ STB0899_BBFERRORL , 0x01 },
{ STB0899_UPKTERRORM , 0x00 },
{ STB0899_UPKTERRORL , 0x00 },
{ 0xffff , 0xff },
};
static struct stb0899_config az6027_stb0899_config = {
.init_dev = az6027_stb0899_s1_init_1,
.init_s2_demod = stb0899_s2_init_2,
.init_s1_demod = az6027_stb0899_s1_init_3,
.init_s2_fec = stb0899_s2_init_4,
.init_tst = stb0899_s1_init_5,
.demod_address = 0xd0, /* 0x68, 0xd0 >> 1 */
.xtal_freq = 27000000,
.inversion = IQ_SWAP_ON,
.lo_clk = 76500000,
.hi_clk = 99000000,
.esno_ave = STB0899_DVBS2_ESNO_AVE,
.esno_quant = STB0899_DVBS2_ESNO_QUANT,
.avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE,
.avframes_fine = STB0899_DVBS2_AVFRAMES_FINE,
.miss_threshold = STB0899_DVBS2_MISS_THRESHOLD,
.uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ,
.uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK,
.uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF,
.sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT,
.btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS,
.btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET,
.crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS,
.ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER,
.tuner_get_frequency = stb6100_get_frequency,
.tuner_set_frequency = stb6100_set_frequency,
.tuner_set_bandwidth = stb6100_set_bandwidth,
.tuner_get_bandwidth = stb6100_get_bandwidth,
.tuner_set_rfsiggain = NULL,
};
static struct stb6100_config az6027_stb6100_config = {
.tuner_address = 0xc0,
.refclock = 27000000,
};
/* check for mutex FIXME */
static int az6027_usb_in_op(struct dvb_usb_device *d, u8 req,
u16 value, u16 index, u8 *b, int blen)
{
int ret = -1;
if (mutex_lock_interruptible(&d->usb_mutex))
return -EAGAIN;
ret = usb_control_msg(d->udev,
usb_rcvctrlpipe(d->udev, 0),
req,
USB_TYPE_VENDOR | USB_DIR_IN,
value,
index,
b,
blen,
2000);
if (ret < 0) {
warn("usb in operation failed. (%d)", ret);
ret = -EIO;
} else
ret = 0;
deb_xfer("in: req. %02x, val: %04x, ind: %04x, buffer: ", req, value, index);
debug_dump(b, blen, deb_xfer);
mutex_unlock(&d->usb_mutex);
return ret;
}
static int az6027_usb_out_op(struct dvb_usb_device *d,
u8 req,
u16 value,
u16 index,
u8 *b,
int blen)
{
int ret;
deb_xfer("out: req. %02x, val: %04x, ind: %04x, buffer: ", req, value, index);
debug_dump(b, blen, deb_xfer);
if (mutex_lock_interruptible(&d->usb_mutex))
return -EAGAIN;
ret = usb_control_msg(d->udev,
usb_sndctrlpipe(d->udev, 0),
req,
USB_TYPE_VENDOR | USB_DIR_OUT,
value,
index,
b,
blen,
2000);
if (ret != blen) {
warn("usb out operation failed. (%d)", ret);
mutex_unlock(&d->usb_mutex);
return -EIO;
} else{
mutex_unlock(&d->usb_mutex);
return 0;
}
}
static int az6027_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
int ret;
u8 req;
u16 value;
u16 index;
int blen;
deb_info("%s %d", __func__, onoff);
req = 0xBC;
value = onoff;
index = 0;
blen = 0;
ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen);
if (ret != 0)
warn("usb out operation failed. (%d)", ret);
return ret;
}
/* keys for the enclosed remote control */
static struct rc_map_table rc_map_az6027_table[] = {
{ 0x01, KEY_1 },
{ 0x02, KEY_2 },
};
/* remote control stuff (does not work with my box) */
static int az6027_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
return 0;
}
/*
int az6027_power_ctrl(struct dvb_usb_device *d, int onoff)
{
u8 v = onoff;
return az6027_usb_out_op(d,0xBC,v,3,NULL,1);
}
*/
static int az6027_ci_read_attribute_mem(struct dvb_ca_en50221 *ca,
int slot,
int address)
{
struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
int ret;
u8 req;
u16 value;
u16 index;
int blen;
u8 *b;
if (slot != 0)
return -EINVAL;
b = kmalloc(12, GFP_KERNEL);
if (!b)
return -ENOMEM;
mutex_lock(&state->ca_mutex);
req = 0xC1;
value = address;
index = 0;
blen = 1;
ret = az6027_usb_in_op(d, req, value, index, b, blen);
if (ret < 0) {
warn("usb in operation failed. (%d)", ret);
ret = -EINVAL;
} else {
ret = b[0];
}
mutex_unlock(&state->ca_mutex);
kfree(b);
return ret;
}
static int az6027_ci_write_attribute_mem(struct dvb_ca_en50221 *ca,
int slot,
int address,
u8 value)
{
struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
int ret;
u8 req;
u16 value1;
u16 index;
int blen;
deb_info("%s %d", __func__, slot);
if (slot != 0)
return -EINVAL;
mutex_lock(&state->ca_mutex);
req = 0xC2;
value1 = address;
index = value;
blen = 0;
ret = az6027_usb_out_op(d, req, value1, index, NULL, blen);
if (ret != 0)
warn("usb out operation failed. (%d)", ret);
mutex_unlock(&state->ca_mutex);
return ret;
}
static int az6027_ci_read_cam_control(struct dvb_ca_en50221 *ca,
int slot,
u8 address)
{
struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
int ret;
u8 req;
u16 value;
u16 index;
int blen;
u8 *b;
if (slot != 0)
return -EINVAL;
b = kmalloc(12, GFP_KERNEL);
if (!b)
return -ENOMEM;
mutex_lock(&state->ca_mutex);
req = 0xC3;
value = address;
index = 0;
blen = 2;
ret = az6027_usb_in_op(d, req, value, index, b, blen);
if (ret < 0) {
warn("usb in operation failed. (%d)", ret);
ret = -EINVAL;
} else {
if (b[0] == 0)
warn("Read CI IO error");
ret = b[1];
deb_info("read cam data = %x from 0x%x", b[1], value);
}
mutex_unlock(&state->ca_mutex);
kfree(b);
return ret;
}
static int az6027_ci_write_cam_control(struct dvb_ca_en50221 *ca,
int slot,
u8 address,
u8 value)
{
struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
int ret;
u8 req;
u16 value1;
u16 index;
int blen;
if (slot != 0)
return -EINVAL;
mutex_lock(&state->ca_mutex);
req = 0xC4;
value1 = address;
index = value;
blen = 0;
ret = az6027_usb_out_op(d, req, value1, index, NULL, blen);
if (ret != 0) {
warn("usb out operation failed. (%d)", ret);
goto failed;
}
failed:
mutex_unlock(&state->ca_mutex);
return ret;
}
static int CI_CamReady(struct dvb_ca_en50221 *ca, int slot)
{
struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
int ret;
u8 req;
u16 value;
u16 index;
int blen;
u8 *b;
b = kmalloc(12, GFP_KERNEL);
if (!b)
return -ENOMEM;
req = 0xC8;
value = 0;
index = 0;
blen = 1;
ret = az6027_usb_in_op(d, req, value, index, b, blen);
if (ret < 0) {
warn("usb in operation failed. (%d)", ret);
ret = -EIO;
} else{
ret = b[0];
}
kfree(b);
return ret;
}
static int az6027_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot)
{
struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
int ret, i;
u8 req;
u16 value;
u16 index;
int blen;
mutex_lock(&state->ca_mutex);
req = 0xC6;
value = 1;
index = 0;
blen = 0;
ret = az6027_usb_out_op(d, req, value, index, NULL, blen);
if (ret != 0) {
warn("usb out operation failed. (%d)", ret);
goto failed;
}
msleep(500);
req = 0xC6;
value = 0;
index = 0;
blen = 0;
ret = az6027_usb_out_op(d, req, value, index, NULL, blen);
if (ret != 0) {
warn("usb out operation failed. (%d)", ret);
goto failed;
}
for (i = 0; i < 15; i++) {
msleep(100);
if (CI_CamReady(ca, slot)) {
deb_info("CAM Ready");
break;
}
}
msleep(5000);
failed:
mutex_unlock(&state->ca_mutex);
return ret;
}
static int az6027_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
{
return 0;
}
static int az6027_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
{
struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
int ret;
u8 req;
u16 value;
u16 index;
int blen;
deb_info("%s", __func__);
mutex_lock(&state->ca_mutex);
req = 0xC7;
value = 1;
index = 0;
blen = 0;
ret = az6027_usb_out_op(d, req, value, index, NULL, blen);
if (ret != 0) {
warn("usb out operation failed. (%d)", ret);
goto failed;
}
failed:
mutex_unlock(&state->ca_mutex);
return ret;
}
static int az6027_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
{
struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
int ret;
u8 req;
u16 value;
u16 index;
int blen;
u8 *b;
b = kmalloc(12, GFP_KERNEL);
if (!b)
return -ENOMEM;
mutex_lock(&state->ca_mutex);
req = 0xC5;
value = 0;
index = 0;
blen = 1;
ret = az6027_usb_in_op(d, req, value, index, b, blen);
if (ret < 0) {
warn("usb in operation failed. (%d)", ret);
ret = -EIO;
} else
ret = 0;
if (!ret && b[0] == 1) {
ret = DVB_CA_EN50221_POLL_CAM_PRESENT |
DVB_CA_EN50221_POLL_CAM_READY;
}
mutex_unlock(&state->ca_mutex);
kfree(b);
return ret;
}
static void az6027_ci_uninit(struct dvb_usb_device *d)
{
struct az6027_device_state *state;
deb_info("%s", __func__);
if (NULL == d)
return;
state = (struct az6027_device_state *)d->priv;
if (NULL == state)
return;
if (NULL == state->ca.data)
return;
dvb_ca_en50221_release(&state->ca);
memset(&state->ca, 0, sizeof(state->ca));
}
static int az6027_ci_init(struct dvb_usb_adapter *a)
{
struct dvb_usb_device *d = a->dev;
struct az6027_device_state *state = (struct az6027_device_state *)d->priv;
int ret;
deb_info("%s", __func__);
mutex_init(&state->ca_mutex);
state->ca.owner = THIS_MODULE;
state->ca.read_attribute_mem = az6027_ci_read_attribute_mem;
state->ca.write_attribute_mem = az6027_ci_write_attribute_mem;
state->ca.read_cam_control = az6027_ci_read_cam_control;
state->ca.write_cam_control = az6027_ci_write_cam_control;
state->ca.slot_reset = az6027_ci_slot_reset;
state->ca.slot_shutdown = az6027_ci_slot_shutdown;
state->ca.slot_ts_enable = az6027_ci_slot_ts_enable;
state->ca.poll_slot_status = az6027_ci_poll_slot_status;
state->ca.data = d;
ret = dvb_ca_en50221_init(&a->dvb_adap,
&state->ca,
0, /* flags */
1);/* n_slots */
if (ret != 0) {
err("Cannot initialize CI: Error %d.", ret);
memset(&state->ca, 0, sizeof(state->ca));
return ret;
}
deb_info("CI initialized.");
return 0;
}
/*
static int az6027_read_mac_addr(struct dvb_usb_device *d, u8 mac[6])
{
az6027_usb_in_op(d, 0xb7, 6, 0, &mac[0], 6);
return 0;
}
*/
static int az6027_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
u8 buf;
struct dvb_usb_adapter *adap = fe->dvb->priv;
struct i2c_msg i2c_msg = {
.addr = 0x99,
.flags = 0,
.buf = &buf,
.len = 1
};
/*
* 2 --18v
* 1 --13v
* 0 --off
*/
switch (voltage) {
case SEC_VOLTAGE_13:
buf = 1;
i2c_transfer(&adap->dev->i2c_adap, &i2c_msg, 1);
break;
case SEC_VOLTAGE_18:
buf = 2;
i2c_transfer(&adap->dev->i2c_adap, &i2c_msg, 1);
break;
case SEC_VOLTAGE_OFF:
buf = 0;
i2c_transfer(&adap->dev->i2c_adap, &i2c_msg, 1);
break;
default:
return -EINVAL;
}
return 0;
}
static int az6027_frontend_poweron(struct dvb_usb_adapter *adap)
{
int ret;
u8 req;
u16 value;
u16 index;
int blen;
req = 0xBC;
value = 1; /* power on */
index = 3;
blen = 0;
ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen);
if (ret != 0)
return -EIO;
return 0;
}
static int az6027_frontend_reset(struct dvb_usb_adapter *adap)
{
int ret;
u8 req;
u16 value;
u16 index;
int blen;
/* reset demodulator */
req = 0xC0;
value = 1; /* high */
index = 3;
blen = 0;
ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen);
if (ret != 0)
return -EIO;
req = 0xC0;
value = 0; /* low */
index = 3;
blen = 0;
msleep_interruptible(200);
ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen);
if (ret != 0)
return -EIO;
msleep_interruptible(200);
req = 0xC0;
value = 1; /*high */
index = 3;
blen = 0;
ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen);
if (ret != 0)
return -EIO;
msleep_interruptible(200);
return 0;
}
static int az6027_frontend_tsbypass(struct dvb_usb_adapter *adap, int onoff)
{
int ret;
u8 req;
u16 value;
u16 index;
int blen;
/* TS passthrough */
req = 0xC7;
value = onoff;
index = 0;
blen = 0;
ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen);
if (ret != 0)
return -EIO;
return 0;
}
static int az6027_frontend_attach(struct dvb_usb_adapter *adap)
{
az6027_frontend_poweron(adap);
az6027_frontend_reset(adap);
deb_info("adap = %p, dev = %p\n", adap, adap->dev);
adap->fe_adap[0].fe = stb0899_attach(&az6027_stb0899_config, &adap->dev->i2c_adap);
if (adap->fe_adap[0].fe) {
deb_info("found STB0899 DVB-S/DVB-S2 frontend @0x%02x", az6027_stb0899_config.demod_address);
if (stb6100_attach(adap->fe_adap[0].fe, &az6027_stb6100_config, &adap->dev->i2c_adap)) {
deb_info("found STB6100 DVB-S/DVB-S2 frontend @0x%02x", az6027_stb6100_config.tuner_address);
adap->fe_adap[0].fe->ops.set_voltage = az6027_set_voltage;
az6027_ci_init(adap);
} else {
adap->fe_adap[0].fe = NULL;
}
} else
warn("no front-end attached\n");
az6027_frontend_tsbypass(adap, 0);
return 0;
}
static struct dvb_usb_device_properties az6027_properties;
static void az6027_usb_disconnect(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
az6027_ci_uninit(d);
dvb_usb_device_exit(intf);
}
static int az6027_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return dvb_usb_device_init(intf,
&az6027_properties,
THIS_MODULE,
NULL,
adapter_nr);
}
/* I2C */
static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
int i = 0, j = 0, len = 0;
u16 index;
u16 value;
int length;
u8 req;
u8 *data;
data = kmalloc(256, GFP_KERNEL);
if (!data)
return -ENOMEM;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0) {
kfree(data);
return -EAGAIN;
}
if (num > 2)
warn("more than 2 i2c messages at a time is not handled yet. TODO.");
for (i = 0; i < num; i++) {
if (msg[i].addr == 0x99) {
req = 0xBE;
index = 0;
value = msg[i].buf[0] & 0x00ff;
length = 1;
az6027_usb_out_op(d, req, value, index, data, length);
}
if (msg[i].addr == 0xd0) {
/* write/read request */
if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD)) {
req = 0xB9;
index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
value = msg[i].addr + (msg[i].len << 8);
length = msg[i + 1].len + 6;
az6027_usb_in_op(d, req, value, index, data, length);
len = msg[i + 1].len;
for (j = 0; j < len; j++)
msg[i + 1].buf[j] = data[j + 5];
i++;
} else {
/* demod 16bit addr */
req = 0xBD;
index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
value = msg[i].addr + (2 << 8);
length = msg[i].len - 2;
len = msg[i].len - 2;
for (j = 0; j < len; j++)
data[j] = msg[i].buf[j + 2];
az6027_usb_out_op(d, req, value, index, data, length);
}
}
if (msg[i].addr == 0xc0) {
if (msg[i].flags & I2C_M_RD) {
req = 0xB9;
index = 0x0;
value = msg[i].addr;
length = msg[i].len + 6;
az6027_usb_in_op(d, req, value, index, data, length);
len = msg[i].len;
for (j = 0; j < len; j++)
msg[i].buf[j] = data[j + 5];
} else {
req = 0xBD;
index = msg[i].buf[0] & 0x00FF;
value = msg[i].addr + (1 << 8);
length = msg[i].len - 1;
len = msg[i].len - 1;
for (j = 0; j < len; j++)
data[j] = msg[i].buf[j + 1];
az6027_usb_out_op(d, req, value, index, data, length);
}
}
}
mutex_unlock(&d->i2c_mutex);
kfree(data);
return i;
}
static u32 az6027_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
static struct i2c_algorithm az6027_i2c_algo = {
.master_xfer = az6027_i2c_xfer,
.functionality = az6027_i2c_func,
};
static int az6027_identify_state(struct usb_device *udev,
struct dvb_usb_device_properties *props,
struct dvb_usb_device_description **desc,
int *cold)
{
u8 *b;
s16 ret;
b = kmalloc(16, GFP_KERNEL);
if (!b)
return -ENOMEM;
ret = usb_control_msg(udev,
usb_rcvctrlpipe(udev, 0),
0xb7,
USB_TYPE_VENDOR | USB_DIR_IN,
6,
0,
b,
6,
USB_CTRL_GET_TIMEOUT);
*cold = ret <= 0;
kfree(b);
deb_info("cold: %d\n", *cold);
return 0;
}
static struct usb_device_id az6027_usb_table[] = {
{ USB_DEVICE(USB_VID_AZUREWAVE, USB_PID_AZUREWAVE_AZ6027) },
{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_DVBS2CI_V1) },
{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_DVBS2CI_V2) },
{ USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI_V1) },
{ USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI_V2) },
{ USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_SAT) },
{ USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_SAT_V2) },
{ },
};
MODULE_DEVICE_TABLE(usb, az6027_usb_table);
static struct dvb_usb_device_properties az6027_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-az6027-03.fw",
.no_reconnect = 1,
.size_of_priv = sizeof(struct az6027_device_state),
.identify_state = az6027_identify_state,
.num_adapters = 1,
.adapter = {
{
.num_frontends = 1,
.fe = {{
.streaming_ctrl = az6027_streaming_ctrl,
.frontend_attach = az6027_frontend_attach,
/* parameter for the MPEG2-data transfer */
.stream = {
.type = USB_BULK,
.count = 10,
.endpoint = 0x02,
.u = {
.bulk = {
.buffersize = 4096,
}
}
},
}},
}
},
/*
.power_ctrl = az6027_power_ctrl,
.read_mac_address = az6027_read_mac_addr,
*/
.rc.legacy = {
.rc_map_table = rc_map_az6027_table,
.rc_map_size = ARRAY_SIZE(rc_map_az6027_table),
.rc_interval = 400,
.rc_query = az6027_rc_query,
},
.i2c_algo = &az6027_i2c_algo,
.num_device_descs = 7,
.devices = {
{
.name = "AZUREWAVE DVB-S/S2 USB2.0 (AZ6027)",
.cold_ids = { &az6027_usb_table[0], NULL },
.warm_ids = { NULL },
}, {
.name = "TERRATEC S7",
.cold_ids = { &az6027_usb_table[1], NULL },
.warm_ids = { NULL },
}, {
.name = "TERRATEC S7 MKII",
.cold_ids = { &az6027_usb_table[2], NULL },
.warm_ids = { NULL },
}, {
.name = "Technisat SkyStar USB 2 HD CI",
.cold_ids = { &az6027_usb_table[3], NULL },
.warm_ids = { NULL },
}, {
.name = "Technisat SkyStar USB 2 HD CI",
.cold_ids = { &az6027_usb_table[4], NULL },
.warm_ids = { NULL },
}, {
.name = "Elgato EyeTV Sat",
.cold_ids = { &az6027_usb_table[5], NULL },
.warm_ids = { NULL },
}, {
.name = "Elgato EyeTV Sat",
.cold_ids = { &az6027_usb_table[6], NULL },
.warm_ids = { NULL },
},
{ NULL },
}
};
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver az6027_usb_driver = {
.name = "dvb_usb_az6027",
.probe = az6027_usb_probe,
.disconnect = az6027_usb_disconnect,
.id_table = az6027_usb_table,
};
module_usb_driver(az6027_usb_driver);
MODULE_AUTHOR("Adams Xu <Adams.xu@azwave.com.cn>");
MODULE_DESCRIPTION("Driver for AZUREWAVE DVB-S/S2 USB2.0 (AZ6027)");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
| gpl-2.0 |
raphui/linux | drivers/scsi/snic/snic_isr.c | 627 | 5270 | /*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "snic_io.h"
#include "snic.h"
/*
* snic_isr_msix_wq : MSIx ISR for work queue.
*/
static irqreturn_t
snic_isr_msix_wq(int irq, void *data)
{
struct snic *snic = data;
unsigned long wq_work_done = 0;
snic->s_stats.misc.last_isr_time = jiffies;
atomic64_inc(&snic->s_stats.misc.isr_cnt);
wq_work_done = snic_wq_cmpl_handler(snic, -1);
svnic_intr_return_credits(&snic->intr[SNIC_MSIX_WQ],
wq_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
} /* end of snic_isr_msix_wq */
static irqreturn_t
snic_isr_msix_io_cmpl(int irq, void *data)
{
struct snic *snic = data;
unsigned long iocmpl_work_done = 0;
snic->s_stats.misc.last_isr_time = jiffies;
atomic64_inc(&snic->s_stats.misc.isr_cnt);
iocmpl_work_done = snic_fwcq_cmpl_handler(snic, -1);
svnic_intr_return_credits(&snic->intr[SNIC_MSIX_IO_CMPL],
iocmpl_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
} /* end of snic_isr_msix_io_cmpl */
static irqreturn_t
snic_isr_msix_err_notify(int irq, void *data)
{
struct snic *snic = data;
snic->s_stats.misc.last_isr_time = jiffies;
atomic64_inc(&snic->s_stats.misc.isr_cnt);
svnic_intr_return_all_credits(&snic->intr[SNIC_MSIX_ERR_NOTIFY]);
snic_log_q_error(snic);
/*Handling link events */
snic_handle_link_event(snic);
return IRQ_HANDLED;
} /* end of snic_isr_msix_err_notify */
void
snic_free_intr(struct snic *snic)
{
int i;
/* ONLY interrupt mode MSIX is supported */
for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
if (snic->msix[i].requested) {
free_irq(snic->msix_entry[i].vector,
snic->msix[i].devid);
}
}
} /* end of snic_free_intr */
int
snic_request_intr(struct snic *snic)
{
int ret = 0, i;
enum vnic_dev_intr_mode intr_mode;
intr_mode = svnic_dev_get_intr_mode(snic->vdev);
SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
/*
* Currently HW supports single WQ and CQ. So passing devid as snic.
* When hardware supports multiple WQs and CQs, one idea is
* to pass devid as corresponding WQ or CQ ptr and retrieve snic
* from queue ptr.
* Except for err_notify, which is always one.
*/
sprintf(snic->msix[SNIC_MSIX_WQ].devname,
"%.11s-scsi-wq",
snic->name);
snic->msix[SNIC_MSIX_WQ].isr = snic_isr_msix_wq;
snic->msix[SNIC_MSIX_WQ].devid = snic;
sprintf(snic->msix[SNIC_MSIX_IO_CMPL].devname,
"%.11s-io-cmpl",
snic->name);
snic->msix[SNIC_MSIX_IO_CMPL].isr = snic_isr_msix_io_cmpl;
snic->msix[SNIC_MSIX_IO_CMPL].devid = snic;
sprintf(snic->msix[SNIC_MSIX_ERR_NOTIFY].devname,
"%.11s-err-notify",
snic->name);
snic->msix[SNIC_MSIX_ERR_NOTIFY].isr = snic_isr_msix_err_notify;
snic->msix[SNIC_MSIX_ERR_NOTIFY].devid = snic;
for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
ret = request_irq(snic->msix_entry[i].vector,
snic->msix[i].isr,
0,
snic->msix[i].devname,
snic->msix[i].devid);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"MSI-X: requrest_irq(%d) failed %d\n",
i,
ret);
snic_free_intr(snic);
break;
}
snic->msix[i].requested = 1;
}
return ret;
} /* end of snic_requrest_intr */
int
snic_set_intr_mode(struct snic *snic)
{
unsigned int n = ARRAY_SIZE(snic->wq);
unsigned int m = SNIC_CQ_IO_CMPL_MAX;
unsigned int i;
/*
* We need n WQs, m CQs, and n+m+1 INTRs
* (last INTR is used for WQ/CQ errors and notification area
*/
BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) >
ARRAY_SIZE(snic->intr));
SNIC_BUG_ON(ARRAY_SIZE(snic->msix_entry) < (n + m + 1));
for (i = 0; i < (n + m + 1); i++)
snic->msix_entry[i].entry = i;
if (snic->wq_count >= n && snic->cq_count >= (n + m)) {
if (!pci_enable_msix(snic->pdev,
snic->msix_entry,
(n + m + 1))) {
snic->wq_count = n;
snic->cq_count = n + m;
snic->intr_count = n + m + 1;
snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY;
SNIC_ISR_DBG(snic->shost,
"Using MSI-X Interrupts\n");
svnic_dev_set_intr_mode(snic->vdev,
VNIC_DEV_INTR_MODE_MSIX);
return 0;
}
}
svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
return -EINVAL;
} /* end of snic_set_intr_mode */
void
snic_clear_intr_mode(struct snic *snic)
{
pci_disable_msix(snic->pdev);
svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_INTX);
}
| gpl-2.0 |
spacex/kernel-centos6 | net/rose/rose_out.c | 883 | 2841 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/system.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/rose.h>
/*
* This procedure is passed a buffer descriptor for an iframe. It builds
* the rest of the control part of the frame and then writes it out.
*/
static void rose_send_iframe(struct sock *sk, struct sk_buff *skb)
{
struct rose_sock *rose = rose_sk(sk);
if (skb == NULL)
return;
skb->data[2] |= (rose->vr << 5) & 0xE0;
skb->data[2] |= (rose->vs << 1) & 0x0E;
rose_start_idletimer(sk);
rose_transmit_link(skb, rose->neighbour);
}
void rose_kick(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
struct sk_buff *skb, *skbn;
unsigned short start, end;
if (rose->state != ROSE_STATE_3)
return;
if (rose->condition & ROSE_COND_PEER_RX_BUSY)
return;
if (!skb_peek(&sk->sk_write_queue))
return;
start = (skb_peek(&rose->ack_queue) == NULL) ? rose->va : rose->vs;
end = (rose->va + sysctl_rose_window_size) % ROSE_MODULUS;
if (start == end)
return;
rose->vs = start;
/*
* Transmit data until either we're out of data to send or
* the window is full.
*/
skb = skb_dequeue(&sk->sk_write_queue);
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb_queue_head(&sk->sk_write_queue, skb);
break;
}
skb_set_owner_w(skbn, sk);
/*
* Transmit the frame copy.
*/
rose_send_iframe(sk, skbn);
rose->vs = (rose->vs + 1) % ROSE_MODULUS;
/*
* Requeue the original data frame.
*/
skb_queue_tail(&rose->ack_queue, skb);
} while (rose->vs != end &&
(skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
rose->vl = rose->vr;
rose->condition &= ~ROSE_COND_ACK_PENDING;
rose_stop_timer(sk);
}
/*
* The following routines are taken from page 170 of the 7th ARRL Computer
* Networking Conference paper, as is the whole state machine.
*/
void rose_enquiry_response(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
if (rose->condition & ROSE_COND_OWN_RX_BUSY)
rose_write_internal(sk, ROSE_RNR);
else
rose_write_internal(sk, ROSE_RR);
rose->vl = rose->vr;
rose->condition &= ~ROSE_COND_ACK_PENDING;
rose_stop_timer(sk);
}
| gpl-2.0 |
RyanAM/gs5-kernel | fs/ext4/acl.c | 1651 | 10042 | /*
* linux/fs/ext4/acl.c
*
* Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
#include "acl.h"
/*
* Convert from filesystem to in-memory representation.
*/
static struct posix_acl *
ext4_acl_from_disk(const void *value, size_t size)
{
const char *end = (char *)value + size;
int n, count;
struct posix_acl *acl;
if (!value)
return NULL;
if (size < sizeof(ext4_acl_header))
return ERR_PTR(-EINVAL);
if (((ext4_acl_header *)value)->a_version !=
cpu_to_le32(EXT4_ACL_VERSION))
return ERR_PTR(-EINVAL);
value = (char *)value + sizeof(ext4_acl_header);
count = ext4_acl_count(size);
if (count < 0)
return ERR_PTR(-EINVAL);
if (count == 0)
return NULL;
acl = posix_acl_alloc(count, GFP_NOFS);
if (!acl)
return ERR_PTR(-ENOMEM);
for (n = 0; n < count; n++) {
ext4_acl_entry *entry =
(ext4_acl_entry *)value;
if ((char *)value + sizeof(ext4_acl_entry_short) > end)
goto fail;
acl->a_entries[n].e_tag = le16_to_cpu(entry->e_tag);
acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm);
switch (acl->a_entries[n].e_tag) {
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
case ACL_MASK:
case ACL_OTHER:
value = (char *)value +
sizeof(ext4_acl_entry_short);
acl->a_entries[n].e_id = ACL_UNDEFINED_ID;
break;
case ACL_USER:
case ACL_GROUP:
value = (char *)value + sizeof(ext4_acl_entry);
if ((char *)value > end)
goto fail;
acl->a_entries[n].e_id =
le32_to_cpu(entry->e_id);
break;
default:
goto fail;
}
}
if (value != end)
goto fail;
return acl;
fail:
posix_acl_release(acl);
return ERR_PTR(-EINVAL);
}
/*
* Convert from in-memory to filesystem representation.
*/
static void *
ext4_acl_to_disk(const struct posix_acl *acl, size_t *size)
{
ext4_acl_header *ext_acl;
char *e;
size_t n;
*size = ext4_acl_size(acl->a_count);
ext_acl = kmalloc(sizeof(ext4_acl_header) + acl->a_count *
sizeof(ext4_acl_entry), GFP_NOFS);
if (!ext_acl)
return ERR_PTR(-ENOMEM);
ext_acl->a_version = cpu_to_le32(EXT4_ACL_VERSION);
e = (char *)ext_acl + sizeof(ext4_acl_header);
for (n = 0; n < acl->a_count; n++) {
ext4_acl_entry *entry = (ext4_acl_entry *)e;
entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag);
entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
switch (acl->a_entries[n].e_tag) {
case ACL_USER:
case ACL_GROUP:
entry->e_id = cpu_to_le32(acl->a_entries[n].e_id);
e += sizeof(ext4_acl_entry);
break;
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
case ACL_MASK:
case ACL_OTHER:
e += sizeof(ext4_acl_entry_short);
break;
default:
goto fail;
}
}
return (char *)ext_acl;
fail:
kfree(ext_acl);
return ERR_PTR(-EINVAL);
}
/*
* Inode operation get_posix_acl().
*
* inode->i_mutex: don't care
*/
struct posix_acl *
ext4_get_acl(struct inode *inode, int type)
{
int name_index;
char *value = NULL;
struct posix_acl *acl;
int retval;
if (!test_opt(inode->i_sb, POSIX_ACL))
return NULL;
acl = get_cached_acl(inode, type);
if (acl != ACL_NOT_CACHED)
return acl;
switch (type) {
case ACL_TYPE_ACCESS:
name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
break;
case ACL_TYPE_DEFAULT:
name_index = EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT;
break;
default:
BUG();
}
retval = ext4_xattr_get(inode, name_index, "", NULL, 0);
if (retval > 0) {
value = kmalloc(retval, GFP_NOFS);
if (!value)
return ERR_PTR(-ENOMEM);
retval = ext4_xattr_get(inode, name_index, "", value, retval);
}
if (retval > 0)
acl = ext4_acl_from_disk(value, retval);
else if (retval == -ENODATA || retval == -ENOSYS)
acl = NULL;
else
acl = ERR_PTR(retval);
kfree(value);
if (!IS_ERR(acl))
set_cached_acl(inode, type, acl);
return acl;
}
/*
* Set the access or default ACL of an inode.
*
* inode->i_mutex: down unless called from ext4_new_inode
*/
static int
ext4_set_acl(handle_t *handle, struct inode *inode, int type,
struct posix_acl *acl)
{
int name_index;
void *value = NULL;
size_t size = 0;
int error;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
switch (type) {
case ACL_TYPE_ACCESS:
name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
return error;
else {
inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
if (error == 0)
acl = NULL;
}
}
break;
case ACL_TYPE_DEFAULT:
name_index = EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT;
if (!S_ISDIR(inode->i_mode))
return acl ? -EACCES : 0;
break;
default:
return -EINVAL;
}
if (acl) {
value = ext4_acl_to_disk(acl, &size);
if (IS_ERR(value))
return (int)PTR_ERR(value);
}
error = ext4_xattr_set_handle(handle, inode, name_index, "",
value, size, 0);
kfree(value);
if (!error)
set_cached_acl(inode, type, acl);
return error;
}
/*
* Initialize the ACLs of a new inode. Called from ext4_new_inode.
*
* dir->i_mutex: down
* inode->i_mutex: up (access to inode is still exclusive)
*/
int
ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
{
struct posix_acl *acl = NULL;
int error = 0;
if (!S_ISLNK(inode->i_mode)) {
if (test_opt(dir->i_sb, POSIX_ACL)) {
acl = ext4_get_acl(dir, ACL_TYPE_DEFAULT);
if (IS_ERR(acl))
return PTR_ERR(acl);
}
if (!acl)
inode->i_mode &= ~current_umask();
}
if (test_opt(inode->i_sb, POSIX_ACL) && acl) {
if (S_ISDIR(inode->i_mode)) {
error = ext4_set_acl(handle, inode,
ACL_TYPE_DEFAULT, acl);
if (error)
goto cleanup;
}
error = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
if (error < 0)
return error;
if (error > 0) {
/* This is an extended ACL */
error = ext4_set_acl(handle, inode, ACL_TYPE_ACCESS, acl);
}
}
cleanup:
posix_acl_release(acl);
return error;
}
/*
* Does chmod for an inode that may have an Access Control List. The
* inode->i_mode field must be updated to the desired value by the caller
* before calling this function.
* Returns 0 on success, or a negative error number.
*
* We change the ACL rather than storing some ACL entries in the file
* mode permission bits (which would be more efficient), because that
* would break once additional permissions (like ACL_APPEND, ACL_DELETE
* for directories) are added. There are no more bits available in the
* file mode.
*
* inode->i_mutex: down
*/
int
ext4_acl_chmod(struct inode *inode)
{
struct posix_acl *acl;
handle_t *handle;
int retries = 0;
int error;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
if (!test_opt(inode->i_sb, POSIX_ACL))
return 0;
acl = ext4_get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl) || !acl)
return PTR_ERR(acl);
error = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
if (error)
return error;
retry:
handle = ext4_journal_start(inode,
EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
ext4_std_error(inode->i_sb, error);
goto out;
}
error = ext4_set_acl(handle, inode, ACL_TYPE_ACCESS, acl);
ext4_journal_stop(handle);
if (error == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
out:
posix_acl_release(acl);
return error;
}
/*
* Extended attribute handlers
*/
static size_t
ext4_xattr_list_acl_access(struct dentry *dentry, char *list, size_t list_len,
const char *name, size_t name_len, int type)
{
const size_t size = sizeof(POSIX_ACL_XATTR_ACCESS);
if (!test_opt(dentry->d_sb, POSIX_ACL))
return 0;
if (list && size <= list_len)
memcpy(list, POSIX_ACL_XATTR_ACCESS, size);
return size;
}
static size_t
ext4_xattr_list_acl_default(struct dentry *dentry, char *list, size_t list_len,
const char *name, size_t name_len, int type)
{
const size_t size = sizeof(POSIX_ACL_XATTR_DEFAULT);
if (!test_opt(dentry->d_sb, POSIX_ACL))
return 0;
if (list && size <= list_len)
memcpy(list, POSIX_ACL_XATTR_DEFAULT, size);
return size;
}
static int
ext4_xattr_get_acl(struct dentry *dentry, const char *name, void *buffer,
size_t size, int type)
{
struct posix_acl *acl;
int error;
if (strcmp(name, "") != 0)
return -EINVAL;
if (!test_opt(dentry->d_sb, POSIX_ACL))
return -EOPNOTSUPP;
acl = ext4_get_acl(dentry->d_inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
return -ENODATA;
error = posix_acl_to_xattr(acl, buffer, size);
posix_acl_release(acl);
return error;
}
static int
ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags, int type)
{
struct inode *inode = dentry->d_inode;
handle_t *handle;
struct posix_acl *acl;
int error, retries = 0;
if (strcmp(name, "") != 0)
return -EINVAL;
if (!test_opt(inode->i_sb, POSIX_ACL))
return -EOPNOTSUPP;
if (!inode_owner_or_capable(inode))
return -EPERM;
if (value) {
acl = posix_acl_from_xattr(value, size);
if (IS_ERR(acl))
return PTR_ERR(acl);
else if (acl) {
error = posix_acl_valid(acl);
if (error)
goto release_and_out;
}
} else
acl = NULL;
retry:
handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
goto release_and_out;
}
error = ext4_set_acl(handle, inode, type, acl);
ext4_journal_stop(handle);
if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
release_and_out:
posix_acl_release(acl);
return error;
}
const struct xattr_handler ext4_xattr_acl_access_handler = {
.prefix = POSIX_ACL_XATTR_ACCESS,
.flags = ACL_TYPE_ACCESS,
.list = ext4_xattr_list_acl_access,
.get = ext4_xattr_get_acl,
.set = ext4_xattr_set_acl,
};
const struct xattr_handler ext4_xattr_acl_default_handler = {
.prefix = POSIX_ACL_XATTR_DEFAULT,
.flags = ACL_TYPE_DEFAULT,
.list = ext4_xattr_list_acl_default,
.get = ext4_xattr_get_acl,
.set = ext4_xattr_set_acl,
};
| gpl-2.0 |
TenchiMasaki/android_kernel_asus_moorefield | arch/x86/platform/olpc/olpc-xo1-pm.c | 2675 | 4809 | /*
* Support for power management features of the OLPC XO-1 laptop
*
* Copyright (C) 2010 Andres Salomon <dilinger@queued.net>
* Copyright (C) 2010 One Laptop per Child
* Copyright (C) 2006 Red Hat, Inc.
* Copyright (C) 2006 Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/cs5535.h>
#include <linux/platform_device.h>
#include <linux/export.h>
#include <linux/pm.h>
#include <linux/mfd/core.h>
#include <linux/suspend.h>
#include <linux/olpc-ec.h>
#include <asm/io.h>
#include <asm/olpc.h>
#define DRV_NAME "olpc-xo1-pm"
static unsigned long acpi_base;
static unsigned long pms_base;
static u16 wakeup_mask = CS5536_PM_PWRBTN;
static struct {
unsigned long address;
unsigned short segment;
} ofw_bios_entry = { 0xF0000 + PAGE_OFFSET, __KERNEL_CS };
/* Set bits in the wakeup mask */
void olpc_xo1_pm_wakeup_set(u16 value)
{
wakeup_mask |= value;
}
EXPORT_SYMBOL_GPL(olpc_xo1_pm_wakeup_set);
/* Clear bits in the wakeup mask */
void olpc_xo1_pm_wakeup_clear(u16 value)
{
wakeup_mask &= ~value;
}
EXPORT_SYMBOL_GPL(olpc_xo1_pm_wakeup_clear);
static int xo1_power_state_enter(suspend_state_t pm_state)
{
unsigned long saved_sci_mask;
/* Only STR is supported */
if (pm_state != PM_SUSPEND_MEM)
return -EINVAL;
/*
* Save SCI mask (this gets lost since PM1_EN is used as a mask for
* wakeup events, which is not necessarily the same event set)
*/
saved_sci_mask = inl(acpi_base + CS5536_PM1_STS);
saved_sci_mask &= 0xffff0000;
/* Save CPU state */
do_olpc_suspend_lowlevel();
/* Resume path starts here */
/* Restore SCI mask (using dword access to CS5536_PM1_EN) */
outl(saved_sci_mask, acpi_base + CS5536_PM1_STS);
return 0;
}
asmlinkage int xo1_do_sleep(u8 sleep_state)
{
void *pgd_addr = __va(read_cr3());
/* Program wakeup mask (using dword access to CS5536_PM1_EN) */
outl(wakeup_mask << 16, acpi_base + CS5536_PM1_STS);
__asm__("movl %0,%%eax" : : "r" (pgd_addr));
__asm__("call *(%%edi); cld"
: : "D" (&ofw_bios_entry));
__asm__("movb $0x34, %al\n\t"
"outb %al, $0x70\n\t"
"movb $0x30, %al\n\t"
"outb %al, $0x71\n\t");
return 0;
}
static void xo1_power_off(void)
{
printk(KERN_INFO "OLPC XO-1 power off sequence...\n");
/* Enable all of these controls with 0 delay */
outl(0x40000000, pms_base + CS5536_PM_SCLK);
outl(0x40000000, pms_base + CS5536_PM_IN_SLPCTL);
outl(0x40000000, pms_base + CS5536_PM_WKXD);
outl(0x40000000, pms_base + CS5536_PM_WKD);
/* Clear status bits (possibly unnecessary) */
outl(0x0002ffff, pms_base + CS5536_PM_SSC);
outl(0xffffffff, acpi_base + CS5536_PM_GPE0_STS);
/* Write SLP_EN bit to start the machinery */
outl(0x00002000, acpi_base + CS5536_PM1_CNT);
}
static int xo1_power_state_valid(suspend_state_t pm_state)
{
/* suspend-to-RAM only */
return pm_state == PM_SUSPEND_MEM;
}
static const struct platform_suspend_ops xo1_suspend_ops = {
.valid = xo1_power_state_valid,
.enter = xo1_power_state_enter,
};
static int xo1_pm_probe(struct platform_device *pdev)
{
struct resource *res;
int err;
/* don't run on non-XOs */
if (!machine_is_olpc())
return -ENODEV;
err = mfd_cell_enable(pdev);
if (err)
return err;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res) {
dev_err(&pdev->dev, "can't fetch device resource info\n");
return -EIO;
}
if (strcmp(pdev->name, "cs5535-pms") == 0)
pms_base = res->start;
else if (strcmp(pdev->name, "olpc-xo1-pm-acpi") == 0)
acpi_base = res->start;
/* If we have both addresses, we can override the poweroff hook */
if (pms_base && acpi_base) {
suspend_set_ops(&xo1_suspend_ops);
pm_power_off = xo1_power_off;
printk(KERN_INFO "OLPC XO-1 support registered\n");
}
return 0;
}
static int xo1_pm_remove(struct platform_device *pdev)
{
mfd_cell_disable(pdev);
if (strcmp(pdev->name, "cs5535-pms") == 0)
pms_base = 0;
else if (strcmp(pdev->name, "olpc-xo1-pm-acpi") == 0)
acpi_base = 0;
pm_power_off = NULL;
return 0;
}
static struct platform_driver cs5535_pms_driver = {
.driver = {
.name = "cs5535-pms",
.owner = THIS_MODULE,
},
.probe = xo1_pm_probe,
.remove = xo1_pm_remove,
};
static struct platform_driver cs5535_acpi_driver = {
.driver = {
.name = "olpc-xo1-pm-acpi",
.owner = THIS_MODULE,
},
.probe = xo1_pm_probe,
.remove = xo1_pm_remove,
};
static int __init xo1_pm_init(void)
{
int r;
r = platform_driver_register(&cs5535_pms_driver);
if (r)
return r;
r = platform_driver_register(&cs5535_acpi_driver);
if (r)
platform_driver_unregister(&cs5535_pms_driver);
return r;
}
arch_initcall(xo1_pm_init);
| gpl-2.0 |
C457/android_kernel_samsung_corsica | drivers/s390/char/sclp_config.c | 2931 | 1758 | /*
* drivers/s390/char/sclp_config.c
*
* Copyright IBM Corp. 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#define KMSG_COMPONENT "sclp_config"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/cpu.h>
#include <linux/sysdev.h>
#include <linux/workqueue.h>
#include <asm/smp.h>
#include "sclp.h"
struct conf_mgm_data {
u8 reserved;
u8 ev_qualifier;
} __attribute__((packed));
#define EV_QUAL_CPU_CHANGE 1
#define EV_QUAL_CAP_CHANGE 3
static struct work_struct sclp_cpu_capability_work;
static struct work_struct sclp_cpu_change_work;
static void sclp_cpu_capability_notify(struct work_struct *work)
{
int cpu;
struct sys_device *sysdev;
s390_adjust_jiffies();
pr_warning("cpu capability changed.\n");
get_online_cpus();
for_each_online_cpu(cpu) {
sysdev = get_cpu_sysdev(cpu);
kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
}
put_online_cpus();
}
static void __ref sclp_cpu_change_notify(struct work_struct *work)
{
smp_rescan_cpus();
}
static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
{
struct conf_mgm_data *cdata;
cdata = (struct conf_mgm_data *)(evbuf + 1);
switch (cdata->ev_qualifier) {
case EV_QUAL_CPU_CHANGE:
schedule_work(&sclp_cpu_change_work);
break;
case EV_QUAL_CAP_CHANGE:
schedule_work(&sclp_cpu_capability_work);
break;
}
}
static struct sclp_register sclp_conf_register =
{
.receive_mask = EVTYP_CONFMGMDATA_MASK,
.receiver_fn = sclp_conf_receiver_fn,
};
static int __init sclp_conf_init(void)
{
INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
return sclp_register(&sclp_conf_register);
}
__initcall(sclp_conf_init);
| gpl-2.0 |
SlimDev/kernel_samsung_msm8660-common | drivers/uwb/lc-rc.c | 2931 | 11113 | /*
* Ultra Wide Band
* Life cycle of radio controllers
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* FIXME: docs
*
* A UWB radio controller is also a UWB device, so it embeds one...
*
* List of RCs comes from the 'struct class uwb_rc_class'.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/random.h>
#include <linux/kdev_t.h>
#include <linux/etherdevice.h>
#include <linux/usb.h>
#include <linux/slab.h>
#include "uwb-internal.h"
static int uwb_rc_index_match(struct device *dev, void *data)
{
int *index = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc->index == *index)
return 1;
return 0;
}
static struct uwb_rc *uwb_rc_find_by_index(int index)
{
struct device *dev;
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
if (dev)
rc = dev_get_drvdata(dev);
return rc;
}
static int uwb_rc_new_index(void)
{
int index = 0;
for (;;) {
if (!uwb_rc_find_by_index(index))
return index;
if (++index < 0)
index = 0;
}
}
/**
* Release the backing device of a uwb_rc that has been dynamically allocated.
*/
static void uwb_rc_sys_release(struct device *dev)
{
struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev);
struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev);
uwb_rc_ie_release(rc);
kfree(rc);
}
void uwb_rc_init(struct uwb_rc *rc)
{
struct uwb_dev *uwb_dev = &rc->uwb_dev;
uwb_dev_init(uwb_dev);
rc->uwb_dev.dev.class = &uwb_rc_class;
rc->uwb_dev.dev.release = uwb_rc_sys_release;
uwb_rc_neh_create(rc);
rc->beaconing = -1;
rc->scan_type = UWB_SCAN_DISABLED;
INIT_LIST_HEAD(&rc->notifs_chain.list);
mutex_init(&rc->notifs_chain.mutex);
INIT_LIST_HEAD(&rc->uwb_beca.list);
mutex_init(&rc->uwb_beca.mutex);
uwb_drp_avail_init(rc);
uwb_rc_ie_init(rc);
uwb_rsv_init(rc);
uwb_rc_pal_init(rc);
}
EXPORT_SYMBOL_GPL(uwb_rc_init);
struct uwb_rc *uwb_rc_alloc(void)
{
struct uwb_rc *rc;
rc = kzalloc(sizeof(*rc), GFP_KERNEL);
if (rc == NULL)
return NULL;
uwb_rc_init(rc);
return rc;
}
EXPORT_SYMBOL_GPL(uwb_rc_alloc);
static struct attribute *rc_attrs[] = {
&dev_attr_mac_address.attr,
&dev_attr_scan.attr,
&dev_attr_beacon.attr,
NULL,
};
static struct attribute_group rc_attr_group = {
.attrs = rc_attrs,
};
/*
* Registration of sysfs specific stuff
*/
static int uwb_rc_sys_add(struct uwb_rc *rc)
{
return sysfs_create_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
}
static void __uwb_rc_sys_rm(struct uwb_rc *rc)
{
sysfs_remove_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
}
/**
* uwb_rc_mac_addr_setup - get an RC's EUI-48 address or set it
* @rc: the radio controller.
*
* If the EUI-48 address is 00:00:00:00:00:00 or FF:FF:FF:FF:FF:FF
* then a random locally administered EUI-48 is generated and set on
* the device. The probability of address collisions is sufficiently
* unlikely (1/2^40 = 9.1e-13) that they're not checked for.
*/
static
int uwb_rc_mac_addr_setup(struct uwb_rc *rc)
{
int result;
struct device *dev = &rc->uwb_dev.dev;
struct uwb_dev *uwb_dev = &rc->uwb_dev;
char devname[UWB_ADDR_STRSIZE];
struct uwb_mac_addr addr;
result = uwb_rc_mac_addr_get(rc, &addr);
if (result < 0) {
dev_err(dev, "cannot retrieve UWB EUI-48 address: %d\n", result);
return result;
}
if (uwb_mac_addr_unset(&addr) || uwb_mac_addr_bcast(&addr)) {
addr.data[0] = 0x02; /* locally administered and unicast */
get_random_bytes(&addr.data[1], sizeof(addr.data)-1);
result = uwb_rc_mac_addr_set(rc, &addr);
if (result < 0) {
uwb_mac_addr_print(devname, sizeof(devname), &addr);
dev_err(dev, "cannot set EUI-48 address %s: %d\n",
devname, result);
return result;
}
}
uwb_dev->mac_addr = addr;
return 0;
}
static int uwb_rc_setup(struct uwb_rc *rc)
{
int result;
struct device *dev = &rc->uwb_dev.dev;
result = uwb_radio_setup(rc);
if (result < 0) {
dev_err(dev, "cannot setup UWB radio: %d\n", result);
goto error;
}
result = uwb_rc_mac_addr_setup(rc);
if (result < 0) {
dev_err(dev, "cannot setup UWB MAC address: %d\n", result);
goto error;
}
result = uwb_rc_dev_addr_assign(rc);
if (result < 0) {
dev_err(dev, "cannot assign UWB DevAddr: %d\n", result);
goto error;
}
result = uwb_rc_ie_setup(rc);
if (result < 0) {
dev_err(dev, "cannot setup IE subsystem: %d\n", result);
goto error_ie_setup;
}
result = uwb_rsv_setup(rc);
if (result < 0) {
dev_err(dev, "cannot setup reservation subsystem: %d\n", result);
goto error_rsv_setup;
}
uwb_dbg_add_rc(rc);
return 0;
error_rsv_setup:
uwb_rc_ie_release(rc);
error_ie_setup:
error:
return result;
}
/**
* Register a new UWB radio controller
*
* Did you call uwb_rc_init() on your rc?
*
* We assume that this is being called with a > 0 refcount on
* it [through ops->{get|put}_device(). We'll take our own, though.
*
* @parent_dev is our real device, the one that provides the actual UWB device
*/
int uwb_rc_add(struct uwb_rc *rc, struct device *parent_dev, void *priv)
{
int result;
struct device *dev;
char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
rc->index = uwb_rc_new_index();
dev = &rc->uwb_dev.dev;
dev_set_name(dev, "uwb%d", rc->index);
rc->priv = priv;
init_waitqueue_head(&rc->uwbd.wq);
INIT_LIST_HEAD(&rc->uwbd.event_list);
spin_lock_init(&rc->uwbd.event_list_lock);
uwbd_start(rc);
result = rc->start(rc);
if (result < 0)
goto error_rc_start;
result = uwb_rc_setup(rc);
if (result < 0) {
dev_err(dev, "cannot setup UWB radio controller: %d\n", result);
goto error_rc_setup;
}
result = uwb_dev_add(&rc->uwb_dev, parent_dev, rc);
if (result < 0 && result != -EADDRNOTAVAIL)
goto error_dev_add;
result = uwb_rc_sys_add(rc);
if (result < 0) {
dev_err(parent_dev, "cannot register UWB radio controller "
"dev attributes: %d\n", result);
goto error_sys_add;
}
uwb_mac_addr_print(macbuf, sizeof(macbuf), &rc->uwb_dev.mac_addr);
uwb_dev_addr_print(devbuf, sizeof(devbuf), &rc->uwb_dev.dev_addr);
dev_info(dev,
"new uwb radio controller (mac %s dev %s) on %s %s\n",
macbuf, devbuf, parent_dev->bus->name, dev_name(parent_dev));
rc->ready = 1;
return 0;
error_sys_add:
uwb_dev_rm(&rc->uwb_dev);
error_dev_add:
error_rc_setup:
rc->stop(rc);
error_rc_start:
uwbd_stop(rc);
return result;
}
EXPORT_SYMBOL_GPL(uwb_rc_add);
static int uwb_dev_offair_helper(struct device *dev, void *priv)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
return __uwb_dev_offair(uwb_dev, uwb_dev->rc);
}
/*
* Remove a Radio Controller; stop beaconing/scanning, disconnect all children
*/
void uwb_rc_rm(struct uwb_rc *rc)
{
rc->ready = 0;
uwb_dbg_del_rc(rc);
uwb_rsv_remove_all(rc);
uwb_radio_shutdown(rc);
rc->stop(rc);
uwbd_stop(rc);
uwb_rc_neh_destroy(rc);
uwb_dev_lock(&rc->uwb_dev);
rc->priv = NULL;
rc->cmd = NULL;
uwb_dev_unlock(&rc->uwb_dev);
mutex_lock(&rc->uwb_beca.mutex);
uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL);
__uwb_rc_sys_rm(rc);
mutex_unlock(&rc->uwb_beca.mutex);
uwb_rsv_cleanup(rc);
uwb_beca_release(rc);
uwb_dev_rm(&rc->uwb_dev);
}
EXPORT_SYMBOL_GPL(uwb_rc_rm);
static int find_rc_try_get(struct device *dev, void *data)
{
struct uwb_rc *target_rc = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc == NULL) {
WARN_ON(1);
return 0;
}
if (rc == target_rc) {
if (rc->ready == 0)
return 0;
else
return 1;
}
return 0;
}
/**
* Given a radio controller descriptor, validate and refcount it
*
* @returns NULL if the rc does not exist or is quiescing; the ptr to
* it otherwise.
*/
struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
{
struct device *dev;
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, target_rc,
find_rc_try_get);
if (dev) {
rc = dev_get_drvdata(dev);
__uwb_rc_get(rc);
}
return rc;
}
EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
/*
* RC get for external refcount acquirers...
*
* Increments the refcount of the device and it's backend modules
*/
static inline struct uwb_rc *uwb_rc_get(struct uwb_rc *rc)
{
if (rc->ready == 0)
return NULL;
uwb_dev_get(&rc->uwb_dev);
return rc;
}
static int find_rc_grandpa(struct device *dev, void *data)
{
struct device *grandpa_dev = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc->uwb_dev.dev.parent->parent == grandpa_dev) {
rc = uwb_rc_get(rc);
return 1;
}
return 0;
}
/**
* Locate and refcount a radio controller given a common grand-parent
*
* @grandpa_dev Pointer to the 'grandparent' device structure.
* @returns NULL If the rc does not exist or is quiescing; the ptr to
* it otherwise, properly referenced.
*
* The Radio Control interface (or the UWB Radio Controller) is always
* an interface of a device. The parent is the interface, the
* grandparent is the device that encapsulates the interface.
*
* There is no need to lock around as the "grandpa" would be
* refcounted by the target, and to remove the referemes, the
* uwb_rc_class->sem would have to be taken--we hold it, ergo we
* should be safe.
*/
struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
{
struct device *dev;
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, (void *)grandpa_dev,
find_rc_grandpa);
if (dev)
rc = dev_get_drvdata(dev);
return rc;
}
EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
/**
* Find a radio controller by device address
*
* @returns the pointer to the radio controller, properly referenced
*/
static int find_rc_dev(struct device *dev, void *data)
{
struct uwb_dev_addr *addr = data;
struct uwb_rc *rc = dev_get_drvdata(dev);
if (rc == NULL) {
WARN_ON(1);
return 0;
}
if (!uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, addr)) {
rc = uwb_rc_get(rc);
return 1;
}
return 0;
}
struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
{
struct device *dev;
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, (void *)addr,
find_rc_dev);
if (dev)
rc = dev_get_drvdata(dev);
return rc;
}
EXPORT_SYMBOL_GPL(uwb_rc_get_by_dev);
/**
* Drop a reference on a radio controller
*
* This is the version that should be done by entities external to the
* UWB Radio Control stack (ie: clients of the API).
*/
void uwb_rc_put(struct uwb_rc *rc)
{
__uwb_rc_put(rc);
}
EXPORT_SYMBOL_GPL(uwb_rc_put);
| gpl-2.0 |
aatjitra/M7 | arch/arm/mach-ixp23xx/core.c | 4723 | 11410 | /*
* arch/arm/mach-ixp23xx/core.c
*
* Core routines for IXP23xx chips
*
* Author: Deepak Saxena <dsaxena@plexity.net>
*
* Copyright 2005 (c) MontaVista Software, Inc.
*
* Based on 2.4 code Copyright 2004 (c) Intel Corporation
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/bitops.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include <asm/system_misc.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <asm/mach/irq.h>
#include <asm/mach/arch.h>
/*************************************************************************
* Chip specific mappings shared by all IXP23xx systems
*************************************************************************/
static struct map_desc ixp23xx_io_desc[] __initdata = {
{ /* XSI-CPP CSRs */
.virtual = IXP23XX_XSI2CPP_CSR_VIRT,
.pfn = __phys_to_pfn(IXP23XX_XSI2CPP_CSR_PHYS),
.length = IXP23XX_XSI2CPP_CSR_SIZE,
.type = MT_DEVICE,
}, { /* Expansion Bus Config */
.virtual = IXP23XX_EXP_CFG_VIRT,
.pfn = __phys_to_pfn(IXP23XX_EXP_CFG_PHYS),
.length = IXP23XX_EXP_CFG_SIZE,
.type = MT_DEVICE,
}, { /* UART, Interrupt ctrl, GPIO, timers, NPEs, MACS,.... */
.virtual = IXP23XX_PERIPHERAL_VIRT,
.pfn = __phys_to_pfn(IXP23XX_PERIPHERAL_PHYS),
.length = IXP23XX_PERIPHERAL_SIZE,
.type = MT_DEVICE,
}, { /* CAP CSRs */
.virtual = IXP23XX_CAP_CSR_VIRT,
.pfn = __phys_to_pfn(IXP23XX_CAP_CSR_PHYS),
.length = IXP23XX_CAP_CSR_SIZE,
.type = MT_DEVICE,
}, { /* MSF CSRs */
.virtual = IXP23XX_MSF_CSR_VIRT,
.pfn = __phys_to_pfn(IXP23XX_MSF_CSR_PHYS),
.length = IXP23XX_MSF_CSR_SIZE,
.type = MT_DEVICE,
}, { /* PCI I/O Space */
.virtual = IXP23XX_PCI_IO_VIRT,
.pfn = __phys_to_pfn(IXP23XX_PCI_IO_PHYS),
.length = IXP23XX_PCI_IO_SIZE,
.type = MT_DEVICE,
}, { /* PCI Config Space */
.virtual = IXP23XX_PCI_CFG_VIRT,
.pfn = __phys_to_pfn(IXP23XX_PCI_CFG_PHYS),
.length = IXP23XX_PCI_CFG_SIZE,
.type = MT_DEVICE,
}, { /* PCI local CFG CSRs */
.virtual = IXP23XX_PCI_CREG_VIRT,
.pfn = __phys_to_pfn(IXP23XX_PCI_CREG_PHYS),
.length = IXP23XX_PCI_CREG_SIZE,
.type = MT_DEVICE,
}, { /* PCI MEM Space */
.virtual = IXP23XX_PCI_MEM_VIRT,
.pfn = __phys_to_pfn(IXP23XX_PCI_MEM_PHYS),
.length = IXP23XX_PCI_MEM_SIZE,
.type = MT_DEVICE,
}
};
void __init ixp23xx_map_io(void)
{
iotable_init(ixp23xx_io_desc, ARRAY_SIZE(ixp23xx_io_desc));
}
/***************************************************************************
* IXP23xx Interrupt Handling
***************************************************************************/
enum ixp23xx_irq_type {
IXP23XX_IRQ_LEVEL, IXP23XX_IRQ_EDGE
};
static void ixp23xx_config_irq(unsigned int, enum ixp23xx_irq_type);
static int ixp23xx_irq_set_type(struct irq_data *d, unsigned int type)
{
int line = d->irq - IRQ_IXP23XX_GPIO6 + 6;
u32 int_style;
enum ixp23xx_irq_type irq_type;
volatile u32 *int_reg;
/*
* Only GPIOs 6-15 are wired to interrupts on IXP23xx
*/
if (line < 6 || line > 15)
return -EINVAL;
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
int_style = IXP23XX_GPIO_STYLE_TRANSITIONAL;
irq_type = IXP23XX_IRQ_EDGE;
break;
case IRQ_TYPE_EDGE_RISING:
int_style = IXP23XX_GPIO_STYLE_RISING_EDGE;
irq_type = IXP23XX_IRQ_EDGE;
break;
case IRQ_TYPE_EDGE_FALLING:
int_style = IXP23XX_GPIO_STYLE_FALLING_EDGE;
irq_type = IXP23XX_IRQ_EDGE;
break;
case IRQ_TYPE_LEVEL_HIGH:
int_style = IXP23XX_GPIO_STYLE_ACTIVE_HIGH;
irq_type = IXP23XX_IRQ_LEVEL;
break;
case IRQ_TYPE_LEVEL_LOW:
int_style = IXP23XX_GPIO_STYLE_ACTIVE_LOW;
irq_type = IXP23XX_IRQ_LEVEL;
break;
default:
return -EINVAL;
}
ixp23xx_config_irq(d->irq, irq_type);
if (line >= 8) { /* pins 8-15 */
line -= 8;
int_reg = (volatile u32 *)IXP23XX_GPIO_GPIT2R;
} else { /* pins 0-7 */
int_reg = (volatile u32 *)IXP23XX_GPIO_GPIT1R;
}
/*
* Clear pending interrupts
*/
*IXP23XX_GPIO_GPISR = (1 << line);
/* Clear the style for the appropriate pin */
*int_reg &= ~(IXP23XX_GPIO_STYLE_MASK <<
(line * IXP23XX_GPIO_STYLE_SIZE));
/* Set the new style */
*int_reg |= (int_style << (line * IXP23XX_GPIO_STYLE_SIZE));
return 0;
}
static void ixp23xx_irq_mask(struct irq_data *d)
{
volatile unsigned long *intr_reg;
unsigned int irq = d->irq;
if (irq >= 56)
irq += 8;
intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
*intr_reg &= ~(1 << (irq % 32));
}
static void ixp23xx_irq_ack(struct irq_data *d)
{
int line = d->irq - IRQ_IXP23XX_GPIO6 + 6;
if ((line < 6) || (line > 15))
return;
*IXP23XX_GPIO_GPISR = (1 << line);
}
/*
* Level triggered interrupts on GPIO lines can only be cleared when the
* interrupt condition disappears.
*/
static void ixp23xx_irq_level_unmask(struct irq_data *d)
{
volatile unsigned long *intr_reg;
unsigned int irq = d->irq;
ixp23xx_irq_ack(d);
if (irq >= 56)
irq += 8;
intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
*intr_reg |= (1 << (irq % 32));
}
static void ixp23xx_irq_edge_unmask(struct irq_data *d)
{
volatile unsigned long *intr_reg;
unsigned int irq = d->irq;
if (irq >= 56)
irq += 8;
intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
*intr_reg |= (1 << (irq % 32));
}
static struct irq_chip ixp23xx_irq_level_chip = {
.irq_ack = ixp23xx_irq_mask,
.irq_mask = ixp23xx_irq_mask,
.irq_unmask = ixp23xx_irq_level_unmask,
.irq_set_type = ixp23xx_irq_set_type
};
static struct irq_chip ixp23xx_irq_edge_chip = {
.irq_ack = ixp23xx_irq_ack,
.irq_mask = ixp23xx_irq_mask,
.irq_unmask = ixp23xx_irq_edge_unmask,
.irq_set_type = ixp23xx_irq_set_type
};
static void ixp23xx_pci_irq_mask(struct irq_data *d)
{
unsigned int irq = d->irq;
*IXP23XX_PCI_XSCALE_INT_ENABLE &= ~(1 << (IRQ_IXP23XX_INTA + 27 - irq));
}
static void ixp23xx_pci_irq_unmask(struct irq_data *d)
{
unsigned int irq = d->irq;
*IXP23XX_PCI_XSCALE_INT_ENABLE |= (1 << (IRQ_IXP23XX_INTA + 27 - irq));
}
/*
* TODO: Should this just be done at ASM level?
*/
static void pci_handler(unsigned int irq, struct irq_desc *desc)
{
u32 pci_interrupt;
unsigned int irqno;
pci_interrupt = *IXP23XX_PCI_XSCALE_INT_STATUS;
desc->irq_data.chip->irq_ack(&desc->irq_data);
/* See which PCI_INTA, or PCI_INTB interrupted */
if (pci_interrupt & (1 << 26)) {
irqno = IRQ_IXP23XX_INTB;
} else if (pci_interrupt & (1 << 27)) {
irqno = IRQ_IXP23XX_INTA;
} else {
BUG();
}
generic_handle_irq(irqno);
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
static struct irq_chip ixp23xx_pci_irq_chip = {
.irq_ack = ixp23xx_pci_irq_mask,
.irq_mask = ixp23xx_pci_irq_mask,
.irq_unmask = ixp23xx_pci_irq_unmask
};
static void ixp23xx_config_irq(unsigned int irq, enum ixp23xx_irq_type type)
{
switch (type) {
case IXP23XX_IRQ_LEVEL:
irq_set_chip_and_handler(irq, &ixp23xx_irq_level_chip,
handle_level_irq);
break;
case IXP23XX_IRQ_EDGE:
irq_set_chip_and_handler(irq, &ixp23xx_irq_edge_chip,
handle_edge_irq);
break;
}
set_irq_flags(irq, IRQF_VALID);
}
void __init ixp23xx_init_irq(void)
{
int irq;
/* Route everything to IRQ */
*IXP23XX_INTR_SEL1 = 0x0;
*IXP23XX_INTR_SEL2 = 0x0;
*IXP23XX_INTR_SEL3 = 0x0;
*IXP23XX_INTR_SEL4 = 0x0;
/* Mask all sources */
*IXP23XX_INTR_EN1 = 0x0;
*IXP23XX_INTR_EN2 = 0x0;
*IXP23XX_INTR_EN3 = 0x0;
*IXP23XX_INTR_EN4 = 0x0;
/*
* Configure all IRQs for level-sensitive operation
*/
for (irq = 0; irq <= NUM_IXP23XX_RAW_IRQS; irq++) {
ixp23xx_config_irq(irq, IXP23XX_IRQ_LEVEL);
}
for (irq = IRQ_IXP23XX_INTA; irq <= IRQ_IXP23XX_INTB; irq++) {
irq_set_chip_and_handler(irq, &ixp23xx_pci_irq_chip,
handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
irq_set_chained_handler(IRQ_IXP23XX_PCI_INT_RPH, pci_handler);
}
/*************************************************************************
* Timer-tick functions for IXP23xx
*************************************************************************/
#define CLOCK_TICKS_PER_USEC (CLOCK_TICK_RATE / USEC_PER_SEC)
static unsigned long next_jiffy_time;
static unsigned long
ixp23xx_gettimeoffset(void)
{
unsigned long elapsed;
elapsed = *IXP23XX_TIMER_CONT - (next_jiffy_time - LATCH);
return elapsed / CLOCK_TICKS_PER_USEC;
}
static irqreturn_t
ixp23xx_timer_interrupt(int irq, void *dev_id)
{
/* Clear Pending Interrupt by writing '1' to it */
*IXP23XX_TIMER_STATUS = IXP23XX_TIMER1_INT_PEND;
while ((signed long)(*IXP23XX_TIMER_CONT - next_jiffy_time) >= LATCH) {
timer_tick();
next_jiffy_time += LATCH;
}
return IRQ_HANDLED;
}
static struct irqaction ixp23xx_timer_irq = {
.name = "IXP23xx Timer Tick",
.handler = ixp23xx_timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
};
void __init ixp23xx_init_timer(void)
{
/* Clear Pending Interrupt by writing '1' to it */
*IXP23XX_TIMER_STATUS = IXP23XX_TIMER1_INT_PEND;
/* Setup the Timer counter value */
*IXP23XX_TIMER1_RELOAD =
(LATCH & ~IXP23XX_TIMER_RELOAD_MASK) | IXP23XX_TIMER_ENABLE;
*IXP23XX_TIMER_CONT = 0;
next_jiffy_time = LATCH;
/* Connect the interrupt handler and enable the interrupt */
setup_irq(IRQ_IXP23XX_TIMER1, &ixp23xx_timer_irq);
}
struct sys_timer ixp23xx_timer = {
.init = ixp23xx_init_timer,
.offset = ixp23xx_gettimeoffset,
};
/*************************************************************************
* IXP23xx Platform Initialization
*************************************************************************/
static struct resource ixp23xx_uart_resources[] = {
{
.start = IXP23XX_UART1_PHYS,
.end = IXP23XX_UART1_PHYS + 0x0fff,
.flags = IORESOURCE_MEM
}, {
.start = IXP23XX_UART2_PHYS,
.end = IXP23XX_UART2_PHYS + 0x0fff,
.flags = IORESOURCE_MEM
}
};
static struct plat_serial8250_port ixp23xx_uart_data[] = {
{
.mapbase = IXP23XX_UART1_PHYS,
.membase = (char *)(IXP23XX_UART1_VIRT + 3),
.irq = IRQ_IXP23XX_UART1,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = IXP23XX_UART_XTAL,
}, {
.mapbase = IXP23XX_UART2_PHYS,
.membase = (char *)(IXP23XX_UART2_VIRT + 3),
.irq = IRQ_IXP23XX_UART2,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = IXP23XX_UART_XTAL,
},
{ },
};
static struct platform_device ixp23xx_uart = {
.name = "serial8250",
.id = 0,
.dev.platform_data = ixp23xx_uart_data,
.num_resources = 2,
.resource = ixp23xx_uart_resources,
};
static struct platform_device *ixp23xx_devices[] __initdata = {
&ixp23xx_uart,
};
void __init ixp23xx_sys_init(void)
{
/* by default, the idle code is disabled */
disable_hlt();
*IXP23XX_EXP_UNIT_FUSE |= 0xf;
platform_add_devices(ixp23xx_devices, ARRAY_SIZE(ixp23xx_devices));
}
void ixp23xx_restart(char mode, const char *cmd)
{
/* Use on-chip reset capability */
*IXP23XX_RESET0 |= IXP23XX_RST_ALL;
}
| gpl-2.0 |
nunogia/Z7Max_NX505J_H129_kernel | drivers/mtd/maps/vmu-flash.c | 4979 | 19904 | /* vmu-flash.c
* Driver for SEGA Dreamcast Visual Memory Unit
*
* Copyright (c) Adrian McMenamin 2002 - 2009
* Copyright (c) Paul Mundt 2001
*
* Licensed under version 2 of the
* GNU General Public Licence
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/maple.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
struct vmu_cache {
unsigned char *buffer; /* Cache */
unsigned int block; /* Which block was cached */
unsigned long jiffies_atc; /* When was it cached? */
int valid;
};
struct mdev_part {
struct maple_device *mdev;
int partition;
};
struct vmupart {
u16 user_blocks;
u16 root_block;
u16 numblocks;
char *name;
struct vmu_cache *pcache;
};
struct memcard {
u16 tempA;
u16 tempB;
u32 partitions;
u32 blocklen;
u32 writecnt;
u32 readcnt;
u32 removeable;
int partition;
int read;
unsigned char *blockread;
struct vmupart *parts;
struct mtd_info *mtd;
};
struct vmu_block {
unsigned int num; /* block number */
unsigned int ofs; /* block offset */
};
static struct vmu_block *ofs_to_block(unsigned long src_ofs,
struct mtd_info *mtd, int partition)
{
struct vmu_block *vblock;
struct maple_device *mdev;
struct memcard *card;
struct mdev_part *mpart;
int num;
mpart = mtd->priv;
mdev = mpart->mdev;
card = maple_get_drvdata(mdev);
if (src_ofs >= card->parts[partition].numblocks * card->blocklen)
goto failed;
num = src_ofs / card->blocklen;
if (num > card->parts[partition].numblocks)
goto failed;
vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL);
if (!vblock)
goto failed;
vblock->num = num;
vblock->ofs = src_ofs % card->blocklen;
return vblock;
failed:
return NULL;
}
/* Maple bus callback function for reads */
static void vmu_blockread(struct mapleq *mq)
{
struct maple_device *mdev;
struct memcard *card;
mdev = mq->dev;
card = maple_get_drvdata(mdev);
/* copy the read in data */
if (unlikely(!card->blockread))
return;
memcpy(card->blockread, mq->recvbuf->buf + 12,
card->blocklen/card->readcnt);
}
/* Interface with maple bus to read blocks
* caching the results so that other parts
* of the driver can access block reads */
static int maple_vmu_read_block(unsigned int num, unsigned char *buf,
struct mtd_info *mtd)
{
struct memcard *card;
struct mdev_part *mpart;
struct maple_device *mdev;
int partition, error = 0, x, wait;
unsigned char *blockread = NULL;
struct vmu_cache *pcache;
__be32 sendbuf;
mpart = mtd->priv;
mdev = mpart->mdev;
partition = mpart->partition;
card = maple_get_drvdata(mdev);
pcache = card->parts[partition].pcache;
pcache->valid = 0;
/* prepare the cache for this block */
if (!pcache->buffer) {
pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL);
if (!pcache->buffer) {
dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due"
" to lack of memory\n", mdev->port,
mdev->unit);
error = -ENOMEM;
goto outB;
}
}
/*
* Reads may be phased - again the hardware spec
* supports this - though may not be any devices in
* the wild that implement it, but we will here
*/
for (x = 0; x < card->readcnt; x++) {
sendbuf = cpu_to_be32(partition << 24 | x << 16 | num);
if (atomic_read(&mdev->busy) == 1) {
wait_event_interruptible_timeout(mdev->maple_wait,
atomic_read(&mdev->busy) == 0, HZ);
if (atomic_read(&mdev->busy) == 1) {
dev_notice(&mdev->dev, "VMU at (%d, %d)"
" is busy\n", mdev->port, mdev->unit);
error = -EAGAIN;
goto outB;
}
}
atomic_set(&mdev->busy, 1);
blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL);
if (!blockread) {
error = -ENOMEM;
atomic_set(&mdev->busy, 0);
goto outB;
}
card->blockread = blockread;
maple_getcond_callback(mdev, vmu_blockread, 0,
MAPLE_FUNC_MEMCARD);
error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
MAPLE_COMMAND_BREAD, 2, &sendbuf);
/* Very long timeouts seem to be needed when box is stressed */
wait = wait_event_interruptible_timeout(mdev->maple_wait,
(atomic_read(&mdev->busy) == 0 ||
atomic_read(&mdev->busy) == 2), HZ * 3);
/*
* MTD layer does not handle hotplugging well
* so have to return errors when VMU is unplugged
* in the middle of a read (busy == 2)
*/
if (error || atomic_read(&mdev->busy) == 2) {
if (atomic_read(&mdev->busy) == 2)
error = -ENXIO;
atomic_set(&mdev->busy, 0);
card->blockread = NULL;
goto outA;
}
if (wait == 0 || wait == -ERESTARTSYS) {
card->blockread = NULL;
atomic_set(&mdev->busy, 0);
error = -EIO;
list_del_init(&(mdev->mq->list));
kfree(mdev->mq->sendbuf);
mdev->mq->sendbuf = NULL;
if (wait == -ERESTARTSYS) {
dev_warn(&mdev->dev, "VMU read on (%d, %d)"
" interrupted on block 0x%X\n",
mdev->port, mdev->unit, num);
} else
dev_notice(&mdev->dev, "VMU read on (%d, %d)"
" timed out on block 0x%X\n",
mdev->port, mdev->unit, num);
goto outA;
}
memcpy(buf + (card->blocklen/card->readcnt) * x, blockread,
card->blocklen/card->readcnt);
memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x,
card->blockread, card->blocklen/card->readcnt);
card->blockread = NULL;
pcache->block = num;
pcache->jiffies_atc = jiffies;
pcache->valid = 1;
kfree(blockread);
}
return error;
outA:
kfree(blockread);
outB:
return error;
}
/* communicate with maple bus for phased writing */
static int maple_vmu_write_block(unsigned int num, const unsigned char *buf,
struct mtd_info *mtd)
{
struct memcard *card;
struct mdev_part *mpart;
struct maple_device *mdev;
int partition, error, locking, x, phaselen, wait;
__be32 *sendbuf;
mpart = mtd->priv;
mdev = mpart->mdev;
partition = mpart->partition;
card = maple_get_drvdata(mdev);
phaselen = card->blocklen/card->writecnt;
sendbuf = kmalloc(phaselen + 4, GFP_KERNEL);
if (!sendbuf) {
error = -ENOMEM;
goto fail_nosendbuf;
}
for (x = 0; x < card->writecnt; x++) {
sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num);
memcpy(&sendbuf[1], buf + phaselen * x, phaselen);
/* wait until the device is not busy doing something else
* or 1 second - which ever is longer */
if (atomic_read(&mdev->busy) == 1) {
wait_event_interruptible_timeout(mdev->maple_wait,
atomic_read(&mdev->busy) == 0, HZ);
if (atomic_read(&mdev->busy) == 1) {
error = -EBUSY;
dev_notice(&mdev->dev, "VMU write at (%d, %d)"
"failed - device is busy\n",
mdev->port, mdev->unit);
goto fail_nolock;
}
}
atomic_set(&mdev->busy, 1);
locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf);
wait = wait_event_interruptible_timeout(mdev->maple_wait,
atomic_read(&mdev->busy) == 0, HZ/10);
if (locking) {
error = -EIO;
atomic_set(&mdev->busy, 0);
goto fail_nolock;
}
if (atomic_read(&mdev->busy) == 2) {
atomic_set(&mdev->busy, 0);
} else if (wait == 0 || wait == -ERESTARTSYS) {
error = -EIO;
dev_warn(&mdev->dev, "Write at (%d, %d) of block"
" 0x%X at phase %d failed: could not"
" communicate with VMU", mdev->port,
mdev->unit, num, x);
atomic_set(&mdev->busy, 0);
kfree(mdev->mq->sendbuf);
mdev->mq->sendbuf = NULL;
list_del_init(&(mdev->mq->list));
goto fail_nolock;
}
}
kfree(sendbuf);
return card->blocklen;
fail_nolock:
kfree(sendbuf);
fail_nosendbuf:
dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port,
mdev->unit);
return error;
}
/* mtd function to simulate reading byte by byte */
static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval,
struct mtd_info *mtd)
{
struct vmu_block *vblock;
struct memcard *card;
struct mdev_part *mpart;
struct maple_device *mdev;
unsigned char *buf, ret;
int partition, error;
mpart = mtd->priv;
mdev = mpart->mdev;
partition = mpart->partition;
card = maple_get_drvdata(mdev);
*retval = 0;
buf = kmalloc(card->blocklen, GFP_KERNEL);
if (!buf) {
*retval = 1;
ret = -ENOMEM;
goto finish;
}
vblock = ofs_to_block(ofs, mtd, partition);
if (!vblock) {
*retval = 3;
ret = -ENOMEM;
goto out_buf;
}
error = maple_vmu_read_block(vblock->num, buf, mtd);
if (error) {
ret = error;
*retval = 2;
goto out_vblock;
}
ret = buf[vblock->ofs];
out_vblock:
kfree(vblock);
out_buf:
kfree(buf);
finish:
return ret;
}
/* mtd higher order function to read flash */
static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct maple_device *mdev;
struct memcard *card;
struct mdev_part *mpart;
struct vmu_cache *pcache;
struct vmu_block *vblock;
int index = 0, retval, partition, leftover, numblocks;
unsigned char cx;
mpart = mtd->priv;
mdev = mpart->mdev;
partition = mpart->partition;
card = maple_get_drvdata(mdev);
numblocks = card->parts[partition].numblocks;
if (from + len > numblocks * card->blocklen)
len = numblocks * card->blocklen - from;
if (len == 0)
return -EIO;
/* Have we cached this bit already? */
pcache = card->parts[partition].pcache;
do {
vblock = ofs_to_block(from + index, mtd, partition);
if (!vblock)
return -ENOMEM;
/* Have we cached this and is the cache valid and timely? */
if (pcache->valid &&
time_before(jiffies, pcache->jiffies_atc + HZ) &&
(pcache->block == vblock->num)) {
/* we have cached it, so do necessary copying */
leftover = card->blocklen - vblock->ofs;
if (vblock->ofs + len - index < card->blocklen) {
/* only a bit of this block to copy */
memcpy(buf + index,
pcache->buffer + vblock->ofs,
len - index);
index = len;
} else {
/* otherwise copy remainder of whole block */
memcpy(buf + index, pcache->buffer +
vblock->ofs, leftover);
index += leftover;
}
} else {
/*
* Not cached so read one byte -
* but cache the rest of the block
*/
cx = vmu_flash_read_char(from + index, &retval, mtd);
if (retval) {
*retlen = index;
kfree(vblock);
return cx;
}
memset(buf + index, cx, 1);
index++;
}
kfree(vblock);
} while (len > index);
*retlen = index;
return 0;
}
static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct maple_device *mdev;
struct memcard *card;
struct mdev_part *mpart;
int index = 0, partition, error = 0, numblocks;
struct vmu_cache *pcache;
struct vmu_block *vblock;
unsigned char *buffer;
mpart = mtd->priv;
mdev = mpart->mdev;
partition = mpart->partition;
card = maple_get_drvdata(mdev);
numblocks = card->parts[partition].numblocks;
if (to + len > numblocks * card->blocklen)
len = numblocks * card->blocklen - to;
if (len == 0) {
error = -EIO;
goto failed;
}
vblock = ofs_to_block(to, mtd, partition);
if (!vblock) {
error = -ENOMEM;
goto failed;
}
buffer = kmalloc(card->blocklen, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto fail_buffer;
}
do {
/* Read in the block we are to write to */
error = maple_vmu_read_block(vblock->num, buffer, mtd);
if (error)
goto fail_io;
do {
buffer[vblock->ofs] = buf[index];
vblock->ofs++;
index++;
if (index >= len)
break;
} while (vblock->ofs < card->blocklen);
/* write out new buffer */
error = maple_vmu_write_block(vblock->num, buffer, mtd);
/* invalidate the cache */
pcache = card->parts[partition].pcache;
pcache->valid = 0;
if (error != card->blocklen)
goto fail_io;
vblock->num++;
vblock->ofs = 0;
} while (len > index);
kfree(buffer);
*retlen = index;
kfree(vblock);
return 0;
fail_io:
kfree(buffer);
fail_buffer:
kfree(vblock);
failed:
dev_err(&mdev->dev, "VMU write failing with error %d\n", error);
return error;
}
static void vmu_flash_sync(struct mtd_info *mtd)
{
/* Do nothing here */
}
/* Maple bus callback function to recursively query hardware details */
static void vmu_queryblocks(struct mapleq *mq)
{
struct maple_device *mdev;
unsigned short *res;
struct memcard *card;
__be32 partnum;
struct vmu_cache *pcache;
struct mdev_part *mpart;
struct mtd_info *mtd_cur;
struct vmupart *part_cur;
int error;
mdev = mq->dev;
card = maple_get_drvdata(mdev);
res = (unsigned short *) (mq->recvbuf->buf);
card->tempA = res[12];
card->tempB = res[6];
dev_info(&mdev->dev, "VMU device at partition %d has %d user "
"blocks with a root block at %d\n", card->partition,
card->tempA, card->tempB);
part_cur = &card->parts[card->partition];
part_cur->user_blocks = card->tempA;
part_cur->root_block = card->tempB;
part_cur->numblocks = card->tempB + 1;
part_cur->name = kmalloc(12, GFP_KERNEL);
if (!part_cur->name)
goto fail_name;
sprintf(part_cur->name, "vmu%d.%d.%d",
mdev->port, mdev->unit, card->partition);
mtd_cur = &card->mtd[card->partition];
mtd_cur->name = part_cur->name;
mtd_cur->type = 8;
mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
mtd_cur->size = part_cur->numblocks * card->blocklen;
mtd_cur->erasesize = card->blocklen;
mtd_cur->_write = vmu_flash_write;
mtd_cur->_read = vmu_flash_read;
mtd_cur->_sync = vmu_flash_sync;
mtd_cur->writesize = card->blocklen;
mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
if (!mpart)
goto fail_mpart;
mpart->mdev = mdev;
mpart->partition = card->partition;
mtd_cur->priv = mpart;
mtd_cur->owner = THIS_MODULE;
pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL);
if (!pcache)
goto fail_cache_create;
part_cur->pcache = pcache;
error = mtd_device_register(mtd_cur, NULL, 0);
if (error)
goto fail_mtd_register;
maple_getcond_callback(mdev, NULL, 0,
MAPLE_FUNC_MEMCARD);
/*
* Set up a recursive call to the (probably theoretical)
* second or more partition
*/
if (++card->partition < card->partitions) {
partnum = cpu_to_be32(card->partition << 24);
maple_getcond_callback(mdev, vmu_queryblocks, 0,
MAPLE_FUNC_MEMCARD);
maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
MAPLE_COMMAND_GETMINFO, 2, &partnum);
}
return;
fail_mtd_register:
dev_err(&mdev->dev, "Could not register maple device at (%d, %d)"
"error is 0x%X\n", mdev->port, mdev->unit, error);
for (error = 0; error <= card->partition; error++) {
kfree(((card->parts)[error]).pcache);
((card->parts)[error]).pcache = NULL;
}
fail_cache_create:
fail_mpart:
for (error = 0; error <= card->partition; error++) {
kfree(((card->mtd)[error]).priv);
((card->mtd)[error]).priv = NULL;
}
maple_getcond_callback(mdev, NULL, 0,
MAPLE_FUNC_MEMCARD);
kfree(part_cur->name);
fail_name:
return;
}
/* Handles very basic info about the flash, queries for details */
static int __devinit vmu_connect(struct maple_device *mdev)
{
unsigned long test_flash_data, basic_flash_data;
int c, error;
struct memcard *card;
u32 partnum = 0;
test_flash_data = be32_to_cpu(mdev->devinfo.function);
/* Need to count how many bits are set - to find out which
* function_data element has details of the memory card
*/
c = hweight_long(test_flash_data);
basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
if (!card) {
error = -ENOMEM;
goto fail_nomem;
}
card->partitions = (basic_flash_data >> 24 & 0xFF) + 1;
card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
card->writecnt = basic_flash_data >> 12 & 0xF;
card->readcnt = basic_flash_data >> 8 & 0xF;
card->removeable = basic_flash_data >> 7 & 1;
card->partition = 0;
/*
* Not sure there are actually any multi-partition devices in the
* real world, but the hardware supports them, so, so will we
*/
card->parts = kmalloc(sizeof(struct vmupart) * card->partitions,
GFP_KERNEL);
if (!card->parts) {
error = -ENOMEM;
goto fail_partitions;
}
card->mtd = kmalloc(sizeof(struct mtd_info) * card->partitions,
GFP_KERNEL);
if (!card->mtd) {
error = -ENOMEM;
goto fail_mtd_info;
}
maple_set_drvdata(mdev, card);
/*
* We want to trap meminfo not get cond
* so set interval to zero, but rely on maple bus
* driver to pass back the results of the meminfo
*/
maple_getcond_callback(mdev, vmu_queryblocks, 0,
MAPLE_FUNC_MEMCARD);
/* Make sure we are clear to go */
if (atomic_read(&mdev->busy) == 1) {
wait_event_interruptible_timeout(mdev->maple_wait,
atomic_read(&mdev->busy) == 0, HZ);
if (atomic_read(&mdev->busy) == 1) {
dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n",
mdev->port, mdev->unit);
error = -EAGAIN;
goto fail_device_busy;
}
}
atomic_set(&mdev->busy, 1);
/*
* Set up the minfo call: vmu_queryblocks will handle
* the information passed back
*/
error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
MAPLE_COMMAND_GETMINFO, 2, &partnum);
if (error) {
dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)"
" error is 0x%X\n", mdev->port, mdev->unit, error);
goto fail_mtd_info;
}
return 0;
fail_device_busy:
kfree(card->mtd);
fail_mtd_info:
kfree(card->parts);
fail_partitions:
kfree(card);
fail_nomem:
return error;
}
static void __devexit vmu_disconnect(struct maple_device *mdev)
{
struct memcard *card;
struct mdev_part *mpart;
int x;
mdev->callback = NULL;
card = maple_get_drvdata(mdev);
for (x = 0; x < card->partitions; x++) {
mpart = ((card->mtd)[x]).priv;
mpart->mdev = NULL;
mtd_device_unregister(&((card->mtd)[x]));
kfree(((card->parts)[x]).name);
}
kfree(card->parts);
kfree(card->mtd);
kfree(card);
}
/* Callback to handle eccentricities of both mtd subsystem
* and general flakyness of Dreamcast VMUs
*/
static int vmu_can_unload(struct maple_device *mdev)
{
struct memcard *card;
int x;
struct mtd_info *mtd;
card = maple_get_drvdata(mdev);
for (x = 0; x < card->partitions; x++) {
mtd = &((card->mtd)[x]);
if (mtd->usecount > 0)
return 0;
}
return 1;
}
#define ERRSTR "VMU at (%d, %d) file error -"
static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
{
enum maple_file_errors error = ((int *)recvbuf)[1];
switch (error) {
case MAPLE_FILEERR_INVALID_PARTITION:
dev_notice(&mdev->dev, ERRSTR " invalid partition number\n",
mdev->port, mdev->unit);
break;
case MAPLE_FILEERR_PHASE_ERROR:
dev_notice(&mdev->dev, ERRSTR " phase error\n",
mdev->port, mdev->unit);
break;
case MAPLE_FILEERR_INVALID_BLOCK:
dev_notice(&mdev->dev, ERRSTR " invalid block number\n",
mdev->port, mdev->unit);
break;
case MAPLE_FILEERR_WRITE_ERROR:
dev_notice(&mdev->dev, ERRSTR " write error\n",
mdev->port, mdev->unit);
break;
case MAPLE_FILEERR_INVALID_WRITE_LENGTH:
dev_notice(&mdev->dev, ERRSTR " invalid write length\n",
mdev->port, mdev->unit);
break;
case MAPLE_FILEERR_BAD_CRC:
dev_notice(&mdev->dev, ERRSTR " bad CRC\n",
mdev->port, mdev->unit);
break;
default:
dev_notice(&mdev->dev, ERRSTR " 0x%X\n",
mdev->port, mdev->unit, error);
}
}
static int __devinit probe_maple_vmu(struct device *dev)
{
int error;
struct maple_device *mdev = to_maple_dev(dev);
struct maple_driver *mdrv = to_maple_driver(dev->driver);
mdev->can_unload = vmu_can_unload;
mdev->fileerr_handler = vmu_file_error;
mdev->driver = mdrv;
error = vmu_connect(mdev);
if (error)
return error;
return 0;
}
static int __devexit remove_maple_vmu(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
vmu_disconnect(mdev);
return 0;
}
static struct maple_driver vmu_flash_driver = {
.function = MAPLE_FUNC_MEMCARD,
.drv = {
.name = "Dreamcast_visual_memory",
.probe = probe_maple_vmu,
.remove = __devexit_p(remove_maple_vmu),
},
};
static int __init vmu_flash_map_init(void)
{
return maple_driver_register(&vmu_flash_driver);
}
static void __exit vmu_flash_map_exit(void)
{
maple_driver_unregister(&vmu_flash_driver);
}
module_init(vmu_flash_map_init);
module_exit(vmu_flash_map_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Adrian McMenamin");
MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory");
| gpl-2.0 |
InsomniaROM/kernel_lge_mako | net/sunrpc/addr.c | 4979 | 9003 | /*
* Copyright 2009, Oracle. All rights reserved.
*
* Convert socket addresses to presentation addresses and universal
* addresses, and vice versa.
*
* Universal addresses are introduced by RFC 1833 and further refined by
* recent RFCs describing NFSv4. The universal address format is part
* of the external (network) interface provided by rpcbind version 3
* and 4, and by NFSv4. Such an address is a string containing a
* presentation format IP address followed by a port number in
* "hibyte.lobyte" format.
*
* IPv6 addresses can also include a scope ID, typically denoted by
* a '%' followed by a device name or a non-negative integer. Refer to
* RFC 4291, Section 2.2 for details on IPv6 presentation formats.
*/
#include <net/ipv6.h>
#include <linux/sunrpc/clnt.h>
#include <linux/slab.h>
#include <linux/export.h>
#if IS_ENABLED(CONFIG_IPV6)
static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
char *buf, const int buflen)
{
const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
const struct in6_addr *addr = &sin6->sin6_addr;
/*
* RFC 4291, Section 2.2.2
*
* Shorthanded ANY address
*/
if (ipv6_addr_any(addr))
return snprintf(buf, buflen, "::");
/*
* RFC 4291, Section 2.2.2
*
* Shorthanded loopback address
*/
if (ipv6_addr_loopback(addr))
return snprintf(buf, buflen, "::1");
/*
* RFC 4291, Section 2.2.3
*
* Special presentation address format for mapped v4
* addresses.
*/
if (ipv6_addr_v4mapped(addr))
return snprintf(buf, buflen, "::ffff:%pI4",
&addr->s6_addr32[3]);
/*
* RFC 4291, Section 2.2.1
*/
return snprintf(buf, buflen, "%pI6c", addr);
}
static size_t rpc_ntop6(const struct sockaddr *sap,
char *buf, const size_t buflen)
{
const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
char scopebuf[IPV6_SCOPE_ID_LEN];
size_t len;
int rc;
len = rpc_ntop6_noscopeid(sap, buf, buflen);
if (unlikely(len == 0))
return len;
if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
return len;
if (sin6->sin6_scope_id == 0)
return len;
rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u",
IPV6_SCOPE_DELIMITER, sin6->sin6_scope_id);
if (unlikely((size_t)rc > sizeof(scopebuf)))
return 0;
len += rc;
if (unlikely(len > buflen))
return 0;
strcat(buf, scopebuf);
return len;
}
#else /* !IS_ENABLED(CONFIG_IPV6) */
static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
char *buf, const int buflen)
{
return 0;
}
static size_t rpc_ntop6(const struct sockaddr *sap,
char *buf, const size_t buflen)
{
return 0;
}
#endif /* !IS_ENABLED(CONFIG_IPV6) */
static int rpc_ntop4(const struct sockaddr *sap,
char *buf, const size_t buflen)
{
const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
}
/**
* rpc_ntop - construct a presentation address in @buf
* @sap: socket address
* @buf: construction area
* @buflen: size of @buf, in bytes
*
* Plants a %NUL-terminated string in @buf and returns the length
* of the string, excluding the %NUL. Otherwise zero is returned.
*/
size_t rpc_ntop(const struct sockaddr *sap, char *buf, const size_t buflen)
{
switch (sap->sa_family) {
case AF_INET:
return rpc_ntop4(sap, buf, buflen);
case AF_INET6:
return rpc_ntop6(sap, buf, buflen);
}
return 0;
}
EXPORT_SYMBOL_GPL(rpc_ntop);
static size_t rpc_pton4(const char *buf, const size_t buflen,
struct sockaddr *sap, const size_t salen)
{
struct sockaddr_in *sin = (struct sockaddr_in *)sap;
u8 *addr = (u8 *)&sin->sin_addr.s_addr;
if (buflen > INET_ADDRSTRLEN || salen < sizeof(struct sockaddr_in))
return 0;
memset(sap, 0, sizeof(struct sockaddr_in));
if (in4_pton(buf, buflen, addr, '\0', NULL) == 0)
return 0;
sin->sin_family = AF_INET;
return sizeof(struct sockaddr_in);
}
#if IS_ENABLED(CONFIG_IPV6)
static int rpc_parse_scope_id(struct net *net, const char *buf,
const size_t buflen, const char *delim,
struct sockaddr_in6 *sin6)
{
char *p;
size_t len;
if ((buf + buflen) == delim)
return 1;
if (*delim != IPV6_SCOPE_DELIMITER)
return 0;
if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
return 0;
len = (buf + buflen) - delim - 1;
p = kstrndup(delim + 1, len, GFP_KERNEL);
if (p) {
unsigned long scope_id = 0;
struct net_device *dev;
dev = dev_get_by_name(net, p);
if (dev != NULL) {
scope_id = dev->ifindex;
dev_put(dev);
} else {
if (strict_strtoul(p, 10, &scope_id) == 0) {
kfree(p);
return 0;
}
}
kfree(p);
sin6->sin6_scope_id = scope_id;
return 1;
}
return 0;
}
static size_t rpc_pton6(struct net *net, const char *buf, const size_t buflen,
struct sockaddr *sap, const size_t salen)
{
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
u8 *addr = (u8 *)&sin6->sin6_addr.in6_u;
const char *delim;
if (buflen > (INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN) ||
salen < sizeof(struct sockaddr_in6))
return 0;
memset(sap, 0, sizeof(struct sockaddr_in6));
if (in6_pton(buf, buflen, addr, IPV6_SCOPE_DELIMITER, &delim) == 0)
return 0;
if (!rpc_parse_scope_id(net, buf, buflen, delim, sin6))
return 0;
sin6->sin6_family = AF_INET6;
return sizeof(struct sockaddr_in6);
}
#else
static size_t rpc_pton6(struct net *net, const char *buf, const size_t buflen,
struct sockaddr *sap, const size_t salen)
{
return 0;
}
#endif
/**
* rpc_pton - Construct a sockaddr in @sap
* @net: applicable network namespace
* @buf: C string containing presentation format IP address
* @buflen: length of presentation address in bytes
* @sap: buffer into which to plant socket address
* @salen: size of buffer in bytes
*
* Returns the size of the socket address if successful; otherwise
* zero is returned.
*
* Plants a socket address in @sap and returns the size of the
* socket address, if successful. Returns zero if an error
* occurred.
*/
size_t rpc_pton(struct net *net, const char *buf, const size_t buflen,
struct sockaddr *sap, const size_t salen)
{
unsigned int i;
for (i = 0; i < buflen; i++)
if (buf[i] == ':')
return rpc_pton6(net, buf, buflen, sap, salen);
return rpc_pton4(buf, buflen, sap, salen);
}
EXPORT_SYMBOL_GPL(rpc_pton);
/**
* rpc_sockaddr2uaddr - Construct a universal address string from @sap.
* @sap: socket address
* @gfp_flags: allocation mode
*
* Returns a %NUL-terminated string in dynamically allocated memory;
* otherwise NULL is returned if an error occurred. Caller must
* free the returned string.
*/
char *rpc_sockaddr2uaddr(const struct sockaddr *sap, gfp_t gfp_flags)
{
char portbuf[RPCBIND_MAXUADDRPLEN];
char addrbuf[RPCBIND_MAXUADDRLEN];
unsigned short port;
switch (sap->sa_family) {
case AF_INET:
if (rpc_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
return NULL;
port = ntohs(((struct sockaddr_in *)sap)->sin_port);
break;
case AF_INET6:
if (rpc_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
return NULL;
port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
break;
default:
return NULL;
}
if (snprintf(portbuf, sizeof(portbuf),
".%u.%u", port >> 8, port & 0xff) > (int)sizeof(portbuf))
return NULL;
if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf))
return NULL;
return kstrdup(addrbuf, gfp_flags);
}
/**
* rpc_uaddr2sockaddr - convert a universal address to a socket address.
* @net: applicable network namespace
* @uaddr: C string containing universal address to convert
* @uaddr_len: length of universal address string
* @sap: buffer into which to plant socket address
* @salen: size of buffer
*
* @uaddr does not have to be '\0'-terminated, but strict_strtoul() and
* rpc_pton() require proper string termination to be successful.
*
* Returns the size of the socket address if successful; otherwise
* zero is returned.
*/
size_t rpc_uaddr2sockaddr(struct net *net, const char *uaddr,
const size_t uaddr_len, struct sockaddr *sap,
const size_t salen)
{
char *c, buf[RPCBIND_MAXUADDRLEN + sizeof('\0')];
unsigned long portlo, porthi;
unsigned short port;
if (uaddr_len > RPCBIND_MAXUADDRLEN)
return 0;
memcpy(buf, uaddr, uaddr_len);
buf[uaddr_len] = '\0';
c = strrchr(buf, '.');
if (unlikely(c == NULL))
return 0;
if (unlikely(strict_strtoul(c + 1, 10, &portlo) != 0))
return 0;
if (unlikely(portlo > 255))
return 0;
*c = '\0';
c = strrchr(buf, '.');
if (unlikely(c == NULL))
return 0;
if (unlikely(strict_strtoul(c + 1, 10, &porthi) != 0))
return 0;
if (unlikely(porthi > 255))
return 0;
port = (unsigned short)((porthi << 8) | portlo);
*c = '\0';
if (rpc_pton(net, buf, strlen(buf), sap, salen) == 0)
return 0;
switch (sap->sa_family) {
case AF_INET:
((struct sockaddr_in *)sap)->sin_port = htons(port);
return sizeof(struct sockaddr_in);
case AF_INET6:
((struct sockaddr_in6 *)sap)->sin6_port = htons(port);
return sizeof(struct sockaddr_in6);
}
return 0;
}
EXPORT_SYMBOL_GPL(rpc_uaddr2sockaddr);
| gpl-2.0 |
upndwn4par/android_kernel_lge_hammerhead | drivers/gpio/gpio-pl061.c | 4979 | 9484 | /*
* Copyright (C) 2008, 2009 Provigent Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Driver for the ARM PrimeCell(tm) General Purpose Input/Output (PL061)
*
* Data sheet: ARM DDI 0190B, September 2000
*/
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <linux/gpio.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
#include <linux/slab.h>
#include <linux/pm.h>
#include <asm/mach/irq.h>
#define GPIODIR 0x400
#define GPIOIS 0x404
#define GPIOIBE 0x408
#define GPIOIEV 0x40C
#define GPIOIE 0x410
#define GPIORIS 0x414
#define GPIOMIS 0x418
#define GPIOIC 0x41C
#define PL061_GPIO_NR 8
#ifdef CONFIG_PM
struct pl061_context_save_regs {
u8 gpio_data;
u8 gpio_dir;
u8 gpio_is;
u8 gpio_ibe;
u8 gpio_iev;
u8 gpio_ie;
};
#endif
struct pl061_gpio {
/* Each of the two spinlocks protects a different set of hardware
* regiters and data structurs. This decouples the code of the IRQ from
* the GPIO code. This also makes the case of a GPIO routine call from
* the IRQ code simpler.
*/
spinlock_t lock; /* GPIO registers */
void __iomem *base;
int irq_base;
struct irq_chip_generic *irq_gc;
struct gpio_chip gc;
#ifdef CONFIG_PM
struct pl061_context_save_regs csave_regs;
#endif
};
static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
unsigned long flags;
unsigned char gpiodir;
if (offset >= gc->ngpio)
return -EINVAL;
spin_lock_irqsave(&chip->lock, flags);
gpiodir = readb(chip->base + GPIODIR);
gpiodir &= ~(1 << offset);
writeb(gpiodir, chip->base + GPIODIR);
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int pl061_direction_output(struct gpio_chip *gc, unsigned offset,
int value)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
unsigned long flags;
unsigned char gpiodir;
if (offset >= gc->ngpio)
return -EINVAL;
spin_lock_irqsave(&chip->lock, flags);
writeb(!!value << offset, chip->base + (1 << (offset + 2)));
gpiodir = readb(chip->base + GPIODIR);
gpiodir |= 1 << offset;
writeb(gpiodir, chip->base + GPIODIR);
/*
* gpio value is set again, because pl061 doesn't allow to set value of
* a gpio pin before configuring it in OUT mode.
*/
writeb(!!value << offset, chip->base + (1 << (offset + 2)));
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int pl061_get_value(struct gpio_chip *gc, unsigned offset)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
return !!readb(chip->base + (1 << (offset + 2)));
}
static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
writeb(!!value << offset, chip->base + (1 << (offset + 2)));
}
static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
if (chip->irq_base <= 0)
return -EINVAL;
return chip->irq_base + offset;
}
static int pl061_irq_type(struct irq_data *d, unsigned trigger)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct pl061_gpio *chip = gc->private;
int offset = d->irq - chip->irq_base;
unsigned long flags;
u8 gpiois, gpioibe, gpioiev;
if (offset < 0 || offset >= PL061_GPIO_NR)
return -EINVAL;
raw_spin_lock_irqsave(&gc->lock, flags);
gpioiev = readb(chip->base + GPIOIEV);
gpiois = readb(chip->base + GPIOIS);
if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
gpiois |= 1 << offset;
if (trigger & IRQ_TYPE_LEVEL_HIGH)
gpioiev |= 1 << offset;
else
gpioiev &= ~(1 << offset);
} else
gpiois &= ~(1 << offset);
writeb(gpiois, chip->base + GPIOIS);
gpioibe = readb(chip->base + GPIOIBE);
if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
gpioibe |= 1 << offset;
else {
gpioibe &= ~(1 << offset);
if (trigger & IRQ_TYPE_EDGE_RISING)
gpioiev |= 1 << offset;
else if (trigger & IRQ_TYPE_EDGE_FALLING)
gpioiev &= ~(1 << offset);
}
writeb(gpioibe, chip->base + GPIOIBE);
writeb(gpioiev, chip->base + GPIOIEV);
raw_spin_unlock_irqrestore(&gc->lock, flags);
return 0;
}
static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
{
unsigned long pending;
int offset;
struct pl061_gpio *chip = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
chained_irq_enter(irqchip, desc);
pending = readb(chip->base + GPIOMIS);
writeb(pending, chip->base + GPIOIC);
if (pending) {
for_each_set_bit(offset, &pending, PL061_GPIO_NR)
generic_handle_irq(pl061_to_irq(&chip->gc, offset));
}
chained_irq_exit(irqchip, desc);
}
static void __init pl061_init_gc(struct pl061_gpio *chip, int irq_base)
{
struct irq_chip_type *ct;
chip->irq_gc = irq_alloc_generic_chip("gpio-pl061", 1, irq_base,
chip->base, handle_simple_irq);
chip->irq_gc->private = chip;
ct = chip->irq_gc->chip_types;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = pl061_irq_type;
ct->chip.irq_set_wake = irq_gc_set_wake;
ct->regs.mask = GPIOIE;
irq_setup_generic_chip(chip->irq_gc, IRQ_MSK(PL061_GPIO_NR),
IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0);
}
static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl061_platform_data *pdata;
struct pl061_gpio *chip;
int ret, irq, i;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
pdata = dev->dev.platform_data;
if (pdata) {
chip->gc.base = pdata->gpio_base;
chip->irq_base = pdata->irq_base;
} else if (dev->dev.of_node) {
chip->gc.base = -1;
chip->irq_base = 0;
} else {
ret = -ENODEV;
goto free_mem;
}
if (!request_mem_region(dev->res.start,
resource_size(&dev->res), "pl061")) {
ret = -EBUSY;
goto free_mem;
}
chip->base = ioremap(dev->res.start, resource_size(&dev->res));
if (chip->base == NULL) {
ret = -ENOMEM;
goto release_region;
}
spin_lock_init(&chip->lock);
chip->gc.direction_input = pl061_direction_input;
chip->gc.direction_output = pl061_direction_output;
chip->gc.get = pl061_get_value;
chip->gc.set = pl061_set_value;
chip->gc.to_irq = pl061_to_irq;
chip->gc.ngpio = PL061_GPIO_NR;
chip->gc.label = dev_name(&dev->dev);
chip->gc.dev = &dev->dev;
chip->gc.owner = THIS_MODULE;
ret = gpiochip_add(&chip->gc);
if (ret)
goto iounmap;
/*
* irq_chip support
*/
if (chip->irq_base <= 0)
return 0;
pl061_init_gc(chip, chip->irq_base);
writeb(0, chip->base + GPIOIE); /* disable irqs */
irq = dev->irq[0];
if (irq < 0) {
ret = -ENODEV;
goto iounmap;
}
irq_set_chained_handler(irq, pl061_irq_handler);
irq_set_handler_data(irq, chip);
for (i = 0; i < PL061_GPIO_NR; i++) {
if (pdata) {
if (pdata->directions & (1 << i))
pl061_direction_output(&chip->gc, i,
pdata->values & (1 << i));
else
pl061_direction_input(&chip->gc, i);
}
}
amba_set_drvdata(dev, chip);
return 0;
iounmap:
iounmap(chip->base);
release_region:
release_mem_region(dev->res.start, resource_size(&dev->res));
free_mem:
kfree(chip);
return ret;
}
#ifdef CONFIG_PM
static int pl061_suspend(struct device *dev)
{
struct pl061_gpio *chip = dev_get_drvdata(dev);
int offset;
chip->csave_regs.gpio_data = 0;
chip->csave_regs.gpio_dir = readb(chip->base + GPIODIR);
chip->csave_regs.gpio_is = readb(chip->base + GPIOIS);
chip->csave_regs.gpio_ibe = readb(chip->base + GPIOIBE);
chip->csave_regs.gpio_iev = readb(chip->base + GPIOIEV);
chip->csave_regs.gpio_ie = readb(chip->base + GPIOIE);
for (offset = 0; offset < PL061_GPIO_NR; offset++) {
if (chip->csave_regs.gpio_dir & (1 << offset))
chip->csave_regs.gpio_data |=
pl061_get_value(&chip->gc, offset) << offset;
}
return 0;
}
static int pl061_resume(struct device *dev)
{
struct pl061_gpio *chip = dev_get_drvdata(dev);
int offset;
for (offset = 0; offset < PL061_GPIO_NR; offset++) {
if (chip->csave_regs.gpio_dir & (1 << offset))
pl061_direction_output(&chip->gc, offset,
chip->csave_regs.gpio_data &
(1 << offset));
else
pl061_direction_input(&chip->gc, offset);
}
writeb(chip->csave_regs.gpio_is, chip->base + GPIOIS);
writeb(chip->csave_regs.gpio_ibe, chip->base + GPIOIBE);
writeb(chip->csave_regs.gpio_iev, chip->base + GPIOIEV);
writeb(chip->csave_regs.gpio_ie, chip->base + GPIOIE);
return 0;
}
static const struct dev_pm_ops pl061_dev_pm_ops = {
.suspend = pl061_suspend,
.resume = pl061_resume,
.freeze = pl061_suspend,
.restore = pl061_resume,
};
#endif
static struct amba_id pl061_ids[] = {
{
.id = 0x00041061,
.mask = 0x000fffff,
},
{ 0, 0 },
};
MODULE_DEVICE_TABLE(amba, pl061_ids);
static struct amba_driver pl061_gpio_driver = {
.drv = {
.name = "pl061_gpio",
#ifdef CONFIG_PM
.pm = &pl061_dev_pm_ops,
#endif
},
.id_table = pl061_ids,
.probe = pl061_probe,
};
static int __init pl061_gpio_init(void)
{
return amba_driver_register(&pl061_gpio_driver);
}
subsys_initcall(pl061_gpio_init);
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("PL061 GPIO driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
bigbiff/android_kernel_samsung_sm900t | drivers/usb/host/u132-hcd.c | 4979 | 92925 | /*
* Host Controller Driver for the Elan Digital Systems U132 adapter
*
* Copyright(C) 2006 Elan Digital Systems Limited
* http://www.elandigitalsystems.com
*
* Author and Maintainer - Tony Olech - Elan Digital Systems
* tony.olech@elandigitalsystems.com
*
* This program is free software;you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*
*
* This driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
* based on various USB host drivers in the 2.6.15 linux kernel
* with constant reference to the 3rd Edition of Linux Device Drivers
* published by O'Reilly
*
* The U132 adapter is a USB to CardBus adapter specifically designed
* for PC cards that contain an OHCI host controller. Typical PC cards
* are the Orange Mobile 3G Option GlobeTrotter Fusion card.
*
* The U132 adapter will *NOT *work with PC cards that do not contain
* an OHCI controller. A simple way to test whether a PC card has an
* OHCI controller as an interface is to insert the PC card directly
* into a laptop(or desktop) with a CardBus slot and if "lspci" shows
* a new USB controller and "lsusb -v" shows a new OHCI Host Controller
* then there is a good chance that the U132 adapter will support the
* PC card.(you also need the specific client driver for the PC card)
*
* Please inform the Author and Maintainer about any PC cards that
* contain OHCI Host Controller and work when directly connected to
* an embedded CardBus slot but do not work when they are connected
* via an ELAN U132 adapter.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/pci_ids.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
/* FIXME ohci.h is ONLY for internal use by the OHCI driver.
* If you're going to try stuff like this, you need to split
* out shareable stuff (register declarations?) into its own
* file, maybe name <linux/usb/ohci.h>
*/
#include "ohci.h"
#define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
#define OHCI_INTR_INIT (OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_RD | \
OHCI_INTR_WDH)
MODULE_AUTHOR("Tony Olech - Elan Digital Systems Limited");
MODULE_DESCRIPTION("U132 USB Host Controller Driver");
MODULE_LICENSE("GPL");
#define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444)
INT_MODULE_PARM(testing, 0);
/* Some boards misreport power switching/overcurrent*/
static bool distrust_firmware = 1;
module_param(distrust_firmware, bool, 0);
MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
"t setup");
static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
/*
* u132_module_lock exists to protect access to global variables
*
*/
static struct mutex u132_module_lock;
static int u132_exiting;
static int u132_instances;
static struct list_head u132_static_list;
/*
* end of the global variables protected by u132_module_lock
*/
static struct workqueue_struct *workqueue;
#define MAX_U132_PORTS 7
#define MAX_U132_ADDRS 128
#define MAX_U132_UDEVS 4
#define MAX_U132_ENDPS 100
#define MAX_U132_RINGS 4
static const char *cc_to_text[16] = {
"No Error ",
"CRC Error ",
"Bit Stuff ",
"Data Togg ",
"Stall ",
"DevNotResp ",
"PIDCheck ",
"UnExpPID ",
"DataOver ",
"DataUnder ",
"(for hw) ",
"(for hw) ",
"BufferOver ",
"BuffUnder ",
"(for HCD) ",
"(for HCD) "
};
struct u132_port {
struct u132 *u132;
int reset;
int enable;
int power;
int Status;
};
struct u132_addr {
u8 address;
};
struct u132_udev {
struct kref kref;
struct usb_device *usb_device;
u8 enumeration;
u8 udev_number;
u8 usb_addr;
u8 portnumber;
u8 endp_number_in[16];
u8 endp_number_out[16];
};
#define ENDP_QUEUE_SHIFT 3
#define ENDP_QUEUE_SIZE (1<<ENDP_QUEUE_SHIFT)
#define ENDP_QUEUE_MASK (ENDP_QUEUE_SIZE-1)
struct u132_urbq {
struct list_head urb_more;
struct urb *urb;
};
struct u132_spin {
spinlock_t slock;
};
struct u132_endp {
struct kref kref;
u8 udev_number;
u8 endp_number;
u8 usb_addr;
u8 usb_endp;
struct u132 *u132;
struct list_head endp_ring;
struct u132_ring *ring;
unsigned toggle_bits:2;
unsigned active:1;
unsigned delayed:1;
unsigned input:1;
unsigned output:1;
unsigned pipetype:2;
unsigned dequeueing:1;
unsigned edset_flush:1;
unsigned spare_bits:14;
unsigned long jiffies;
struct usb_host_endpoint *hep;
struct u132_spin queue_lock;
u16 queue_size;
u16 queue_last;
u16 queue_next;
struct urb *urb_list[ENDP_QUEUE_SIZE];
struct list_head urb_more;
struct delayed_work scheduler;
};
struct u132_ring {
unsigned in_use:1;
unsigned length:7;
u8 number;
struct u132 *u132;
struct u132_endp *curr_endp;
struct delayed_work scheduler;
};
struct u132 {
struct kref kref;
struct list_head u132_list;
struct mutex sw_lock;
struct mutex scheduler_lock;
struct u132_platform_data *board;
struct platform_device *platform_dev;
struct u132_ring ring[MAX_U132_RINGS];
int sequence_num;
int going;
int power;
int reset;
int num_ports;
u32 hc_control;
u32 hc_fminterval;
u32 hc_roothub_status;
u32 hc_roothub_a;
u32 hc_roothub_portstatus[MAX_ROOT_PORTS];
int flags;
unsigned long next_statechange;
struct delayed_work monitor;
int num_endpoints;
struct u132_addr addr[MAX_U132_ADDRS];
struct u132_udev udev[MAX_U132_UDEVS];
struct u132_port port[MAX_U132_PORTS];
struct u132_endp *endp[MAX_U132_ENDPS];
};
/*
* these cannot be inlines because we need the structure offset!!
* Does anyone have a better way?????
*/
#define ftdi_read_pcimem(pdev, member, data) usb_ftdi_elan_read_pcimem(pdev, \
offsetof(struct ohci_regs, member), 0, data);
#define ftdi_write_pcimem(pdev, member, data) usb_ftdi_elan_write_pcimem(pdev, \
offsetof(struct ohci_regs, member), 0, data);
#define u132_read_pcimem(u132, member, data) \
usb_ftdi_elan_read_pcimem(u132->platform_dev, offsetof(struct \
ohci_regs, member), 0, data);
#define u132_write_pcimem(u132, member, data) \
usb_ftdi_elan_write_pcimem(u132->platform_dev, offsetof(struct \
ohci_regs, member), 0, data);
static inline struct u132 *udev_to_u132(struct u132_udev *udev)
{
u8 udev_number = udev->udev_number;
return container_of(udev, struct u132, udev[udev_number]);
}
static inline struct u132 *hcd_to_u132(struct usb_hcd *hcd)
{
return (struct u132 *)(hcd->hcd_priv);
}
static inline struct usb_hcd *u132_to_hcd(struct u132 *u132)
{
return container_of((void *)u132, struct usb_hcd, hcd_priv);
}
static inline void u132_disable(struct u132 *u132)
{
u132_to_hcd(u132)->state = HC_STATE_HALT;
}
#define kref_to_u132(d) container_of(d, struct u132, kref)
#define kref_to_u132_endp(d) container_of(d, struct u132_endp, kref)
#define kref_to_u132_udev(d) container_of(d, struct u132_udev, kref)
#include "../misc/usb_u132.h"
static const char hcd_name[] = "u132_hcd";
#define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE | \
USB_PORT_STAT_C_SUSPEND | USB_PORT_STAT_C_OVERCURRENT | \
USB_PORT_STAT_C_RESET) << 16)
static void u132_hcd_delete(struct kref *kref)
{
struct u132 *u132 = kref_to_u132(kref);
struct platform_device *pdev = u132->platform_dev;
struct usb_hcd *hcd = u132_to_hcd(u132);
u132->going += 1;
mutex_lock(&u132_module_lock);
list_del_init(&u132->u132_list);
u132_instances -= 1;
mutex_unlock(&u132_module_lock);
dev_warn(&u132->platform_dev->dev, "FREEING the hcd=%p and thus the u13"
"2=%p going=%d pdev=%p\n", hcd, u132, u132->going, pdev);
usb_put_hcd(hcd);
}
static inline void u132_u132_put_kref(struct u132 *u132)
{
kref_put(&u132->kref, u132_hcd_delete);
}
static inline void u132_u132_init_kref(struct u132 *u132)
{
kref_init(&u132->kref);
}
static void u132_udev_delete(struct kref *kref)
{
struct u132_udev *udev = kref_to_u132_udev(kref);
udev->udev_number = 0;
udev->usb_device = NULL;
udev->usb_addr = 0;
udev->enumeration = 0;
}
static inline void u132_udev_put_kref(struct u132 *u132, struct u132_udev *udev)
{
kref_put(&udev->kref, u132_udev_delete);
}
static inline void u132_udev_get_kref(struct u132 *u132, struct u132_udev *udev)
{
kref_get(&udev->kref);
}
static inline void u132_udev_init_kref(struct u132 *u132,
struct u132_udev *udev)
{
kref_init(&udev->kref);
}
static inline void u132_ring_put_kref(struct u132 *u132, struct u132_ring *ring)
{
kref_put(&u132->kref, u132_hcd_delete);
}
static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring,
unsigned int delta)
{
if (delta > 0) {
if (queue_delayed_work(workqueue, &ring->scheduler, delta))
return;
} else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
return;
kref_put(&u132->kref, u132_hcd_delete);
}
static void u132_ring_queue_work(struct u132 *u132, struct u132_ring *ring,
unsigned int delta)
{
kref_get(&u132->kref);
u132_ring_requeue_work(u132, ring, delta);
}
static void u132_ring_cancel_work(struct u132 *u132, struct u132_ring *ring)
{
if (cancel_delayed_work(&ring->scheduler))
kref_put(&u132->kref, u132_hcd_delete);
}
static void u132_endp_delete(struct kref *kref)
{
struct u132_endp *endp = kref_to_u132_endp(kref);
struct u132 *u132 = endp->u132;
u8 usb_addr = endp->usb_addr;
u8 usb_endp = endp->usb_endp;
u8 address = u132->addr[usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
u8 endp_number = endp->endp_number;
struct usb_host_endpoint *hep = endp->hep;
struct u132_ring *ring = endp->ring;
struct list_head *head = &endp->endp_ring;
ring->length -= 1;
if (endp == ring->curr_endp) {
if (list_empty(head)) {
ring->curr_endp = NULL;
list_del(head);
} else {
struct u132_endp *next_endp = list_entry(head->next,
struct u132_endp, endp_ring);
ring->curr_endp = next_endp;
list_del(head);
}
} else
list_del(head);
if (endp->input) {
udev->endp_number_in[usb_endp] = 0;
u132_udev_put_kref(u132, udev);
}
if (endp->output) {
udev->endp_number_out[usb_endp] = 0;
u132_udev_put_kref(u132, udev);
}
u132->endp[endp_number - 1] = NULL;
hep->hcpriv = NULL;
kfree(endp);
u132_u132_put_kref(u132);
}
static inline void u132_endp_put_kref(struct u132 *u132, struct u132_endp *endp)
{
kref_put(&endp->kref, u132_endp_delete);
}
static inline void u132_endp_get_kref(struct u132 *u132, struct u132_endp *endp)
{
kref_get(&endp->kref);
}
static inline void u132_endp_init_kref(struct u132 *u132,
struct u132_endp *endp)
{
kref_init(&endp->kref);
kref_get(&u132->kref);
}
static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
unsigned int delta)
{
if (queue_delayed_work(workqueue, &endp->scheduler, delta))
kref_get(&endp->kref);
}
static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
{
if (cancel_delayed_work(&endp->scheduler))
kref_put(&endp->kref, u132_endp_delete);
}
static inline void u132_monitor_put_kref(struct u132 *u132)
{
kref_put(&u132->kref, u132_hcd_delete);
}
static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta)
{
if (queue_delayed_work(workqueue, &u132->monitor, delta))
kref_get(&u132->kref);
}
static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta)
{
if (!queue_delayed_work(workqueue, &u132->monitor, delta))
kref_put(&u132->kref, u132_hcd_delete);
}
static void u132_monitor_cancel_work(struct u132 *u132)
{
if (cancel_delayed_work(&u132->monitor))
kref_put(&u132->kref, u132_hcd_delete);
}
static int read_roothub_info(struct u132 *u132)
{
u32 revision;
int retval;
retval = u132_read_pcimem(u132, revision, &revision);
if (retval) {
dev_err(&u132->platform_dev->dev, "error %d accessing device co"
"ntrol\n", retval);
return retval;
} else if ((revision & 0xFF) == 0x10) {
} else if ((revision & 0xFF) == 0x11) {
} else {
dev_err(&u132->platform_dev->dev, "device revision is not valid"
" %08X\n", revision);
return -ENODEV;
}
retval = u132_read_pcimem(u132, control, &u132->hc_control);
if (retval) {
dev_err(&u132->platform_dev->dev, "error %d accessing device co"
"ntrol\n", retval);
return retval;
}
retval = u132_read_pcimem(u132, roothub.status,
&u132->hc_roothub_status);
if (retval) {
dev_err(&u132->platform_dev->dev, "error %d accessing device re"
"g roothub.status\n", retval);
return retval;
}
retval = u132_read_pcimem(u132, roothub.a, &u132->hc_roothub_a);
if (retval) {
dev_err(&u132->platform_dev->dev, "error %d accessing device re"
"g roothub.a\n", retval);
return retval;
}
{
int I = u132->num_ports;
int i = 0;
while (I-- > 0) {
retval = u132_read_pcimem(u132, roothub.portstatus[i],
&u132->hc_roothub_portstatus[i]);
if (retval) {
dev_err(&u132->platform_dev->dev, "error %d acc"
"essing device roothub.portstatus[%d]\n"
, retval, i);
return retval;
} else
i += 1;
}
}
return 0;
}
static void u132_hcd_monitor_work(struct work_struct *work)
{
struct u132 *u132 = container_of(work, struct u132, monitor.work);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
u132_monitor_put_kref(u132);
return;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed\n");
u132_monitor_put_kref(u132);
return;
} else {
int retval;
mutex_lock(&u132->sw_lock);
retval = read_roothub_info(u132);
if (retval) {
struct usb_hcd *hcd = u132_to_hcd(u132);
u132_disable(u132);
u132->going = 1;
mutex_unlock(&u132->sw_lock);
usb_hc_died(hcd);
ftdi_elan_gone_away(u132->platform_dev);
u132_monitor_put_kref(u132);
return;
} else {
u132_monitor_requeue_work(u132, 500);
mutex_unlock(&u132->sw_lock);
return;
}
}
}
static void u132_hcd_giveback_urb(struct u132 *u132, struct u132_endp *endp,
struct urb *urb, int status)
{
struct u132_ring *ring;
unsigned long irqs;
struct usb_hcd *hcd = u132_to_hcd(u132);
urb->error_count = 0;
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
usb_hcd_unlink_urb_from_ep(hcd, urb);
endp->queue_next += 1;
if (ENDP_QUEUE_SIZE > --endp->queue_size) {
endp->active = 0;
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
} else {
struct list_head *next = endp->urb_more.next;
struct u132_urbq *urbq = list_entry(next, struct u132_urbq,
urb_more);
list_del(next);
endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] =
urbq->urb;
endp->active = 0;
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
kfree(urbq);
}
mutex_lock(&u132->scheduler_lock);
ring = endp->ring;
ring->in_use = 0;
u132_ring_cancel_work(u132, ring);
u132_ring_queue_work(u132, ring, 0);
mutex_unlock(&u132->scheduler_lock);
u132_endp_put_kref(u132, endp);
usb_hcd_giveback_urb(hcd, urb, status);
}
static void u132_hcd_forget_urb(struct u132 *u132, struct u132_endp *endp,
struct urb *urb, int status)
{
u132_endp_put_kref(u132, endp);
}
static void u132_hcd_abandon_urb(struct u132 *u132, struct u132_endp *endp,
struct urb *urb, int status)
{
unsigned long irqs;
struct usb_hcd *hcd = u132_to_hcd(u132);
urb->error_count = 0;
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
usb_hcd_unlink_urb_from_ep(hcd, urb);
endp->queue_next += 1;
if (ENDP_QUEUE_SIZE > --endp->queue_size) {
endp->active = 0;
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
} else {
struct list_head *next = endp->urb_more.next;
struct u132_urbq *urbq = list_entry(next, struct u132_urbq,
urb_more);
list_del(next);
endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] =
urbq->urb;
endp->active = 0;
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
kfree(urbq);
}
usb_hcd_giveback_urb(hcd, urb, status);
}
static inline int edset_input(struct u132 *u132, struct u132_ring *ring,
struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
int toggle_bits, int error_count, int condition_code, int repeat_number,
int halted, int skipped, int actual, int non_null))
{
return usb_ftdi_elan_edset_input(u132->platform_dev, ring->number, endp,
urb, address, endp->usb_endp, toggle_bits, callback);
}
static inline int edset_setup(struct u132 *u132, struct u132_ring *ring,
struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
int toggle_bits, int error_count, int condition_code, int repeat_number,
int halted, int skipped, int actual, int non_null))
{
return usb_ftdi_elan_edset_setup(u132->platform_dev, ring->number, endp,
urb, address, endp->usb_endp, toggle_bits, callback);
}
static inline int edset_single(struct u132 *u132, struct u132_ring *ring,
struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
int toggle_bits, int error_count, int condition_code, int repeat_number,
int halted, int skipped, int actual, int non_null))
{
return usb_ftdi_elan_edset_single(u132->platform_dev, ring->number,
endp, urb, address, endp->usb_endp, toggle_bits, callback);
}
static inline int edset_output(struct u132 *u132, struct u132_ring *ring,
struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
int toggle_bits, int error_count, int condition_code, int repeat_number,
int halted, int skipped, int actual, int non_null))
{
return usb_ftdi_elan_edset_output(u132->platform_dev, ring->number,
endp, urb, address, endp->usb_endp, toggle_bits, callback);
}
/*
* must not LOCK sw_lock
*
*/
static void u132_hcd_interrupt_recv(void *data, struct urb *urb, u8 *buf,
int len, int toggle_bits, int error_count, int condition_code,
int repeat_number, int halted, int skipped, int actual, int non_null)
{
struct u132_endp *endp = data;
struct u132 *u132 = endp->u132;
u8 address = u132->addr[endp->usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
mutex_lock(&u132->scheduler_lock);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
return;
} else if (endp->dequeueing) {
endp->dequeueing = 0;
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
return;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed "
"urb=%p\n", urb);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
return;
} else if (!urb->unlinked) {
struct u132_ring *ring = endp->ring;
u8 *u = urb->transfer_buffer + urb->actual_length;
u8 *b = buf;
int L = len;
while (L-- > 0)
*u++ = *b++;
urb->actual_length += len;
if ((condition_code == TD_CC_NOERROR) &&
(urb->transfer_buffer_length > urb->actual_length)) {
endp->toggle_bits = toggle_bits;
usb_settoggle(udev->usb_device, endp->usb_endp, 0,
1 & toggle_bits);
if (urb->actual_length > 0) {
int retval;
mutex_unlock(&u132->scheduler_lock);
retval = edset_single(u132, ring, endp, urb,
address, endp->toggle_bits,
u132_hcd_interrupt_recv);
if (retval != 0)
u132_hcd_giveback_urb(u132, endp, urb,
retval);
} else {
ring->in_use = 0;
endp->active = 0;
endp->jiffies = jiffies +
msecs_to_jiffies(urb->interval);
u132_ring_cancel_work(u132, ring);
u132_ring_queue_work(u132, ring, 0);
mutex_unlock(&u132->scheduler_lock);
u132_endp_put_kref(u132, endp);
}
return;
} else if ((condition_code == TD_DATAUNDERRUN) &&
((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)) {
endp->toggle_bits = toggle_bits;
usb_settoggle(udev->usb_device, endp->usb_endp, 0,
1 & toggle_bits);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
} else {
if (condition_code == TD_CC_NOERROR) {
endp->toggle_bits = toggle_bits;
usb_settoggle(udev->usb_device, endp->usb_endp,
0, 1 & toggle_bits);
} else if (condition_code == TD_CC_STALL) {
endp->toggle_bits = 0x2;
usb_settoggle(udev->usb_device, endp->usb_endp,
0, 0);
} else {
endp->toggle_bits = 0x2;
usb_settoggle(udev->usb_device, endp->usb_endp,
0, 0);
dev_err(&u132->platform_dev->dev, "urb=%p givin"
"g back INTERRUPT %s\n", urb,
cc_to_text[condition_code]);
}
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb,
cc_to_error[condition_code]);
return;
}
} else {
dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
"unlinked=%d\n", urb, urb->unlinked);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
}
}
static void u132_hcd_bulk_output_sent(void *data, struct urb *urb, u8 *buf,
int len, int toggle_bits, int error_count, int condition_code,
int repeat_number, int halted, int skipped, int actual, int non_null)
{
struct u132_endp *endp = data;
struct u132 *u132 = endp->u132;
u8 address = u132->addr[endp->usb_addr].address;
mutex_lock(&u132->scheduler_lock);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
return;
} else if (endp->dequeueing) {
endp->dequeueing = 0;
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
return;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed "
"urb=%p\n", urb);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
return;
} else if (!urb->unlinked) {
struct u132_ring *ring = endp->ring;
urb->actual_length += len;
endp->toggle_bits = toggle_bits;
if (urb->transfer_buffer_length > urb->actual_length) {
int retval;
mutex_unlock(&u132->scheduler_lock);
retval = edset_output(u132, ring, endp, urb, address,
endp->toggle_bits, u132_hcd_bulk_output_sent);
if (retval != 0)
u132_hcd_giveback_urb(u132, endp, urb, retval);
return;
} else {
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
}
} else {
dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
"unlinked=%d\n", urb, urb->unlinked);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
}
}
static void u132_hcd_bulk_input_recv(void *data, struct urb *urb, u8 *buf,
int len, int toggle_bits, int error_count, int condition_code,
int repeat_number, int halted, int skipped, int actual, int non_null)
{
struct u132_endp *endp = data;
struct u132 *u132 = endp->u132;
u8 address = u132->addr[endp->usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
mutex_lock(&u132->scheduler_lock);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
return;
} else if (endp->dequeueing) {
endp->dequeueing = 0;
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
return;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed "
"urb=%p\n", urb);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
return;
} else if (!urb->unlinked) {
struct u132_ring *ring = endp->ring;
u8 *u = urb->transfer_buffer + urb->actual_length;
u8 *b = buf;
int L = len;
while (L-- > 0)
*u++ = *b++;
urb->actual_length += len;
if ((condition_code == TD_CC_NOERROR) &&
(urb->transfer_buffer_length > urb->actual_length)) {
int retval;
endp->toggle_bits = toggle_bits;
usb_settoggle(udev->usb_device, endp->usb_endp, 0,
1 & toggle_bits);
mutex_unlock(&u132->scheduler_lock);
retval = usb_ftdi_elan_edset_input(u132->platform_dev,
ring->number, endp, urb, address,
endp->usb_endp, endp->toggle_bits,
u132_hcd_bulk_input_recv);
if (retval != 0)
u132_hcd_giveback_urb(u132, endp, urb, retval);
return;
} else if (condition_code == TD_CC_NOERROR) {
endp->toggle_bits = toggle_bits;
usb_settoggle(udev->usb_device, endp->usb_endp, 0,
1 & toggle_bits);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb,
cc_to_error[condition_code]);
return;
} else if ((condition_code == TD_DATAUNDERRUN) &&
((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)) {
endp->toggle_bits = toggle_bits;
usb_settoggle(udev->usb_device, endp->usb_endp, 0,
1 & toggle_bits);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
} else if (condition_code == TD_DATAUNDERRUN) {
endp->toggle_bits = toggle_bits;
usb_settoggle(udev->usb_device, endp->usb_endp, 0,
1 & toggle_bits);
dev_warn(&u132->platform_dev->dev, "urb=%p(SHORT NOT OK"
") giving back BULK IN %s\n", urb,
cc_to_text[condition_code]);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
} else if (condition_code == TD_CC_STALL) {
endp->toggle_bits = 0x2;
usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb,
cc_to_error[condition_code]);
return;
} else {
endp->toggle_bits = 0x2;
usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0);
dev_err(&u132->platform_dev->dev, "urb=%p giving back B"
"ULK IN code=%d %s\n", urb, condition_code,
cc_to_text[condition_code]);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb,
cc_to_error[condition_code]);
return;
}
} else {
dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
"unlinked=%d\n", urb, urb->unlinked);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
}
}
static void u132_hcd_configure_empty_sent(void *data, struct urb *urb, u8 *buf,
int len, int toggle_bits, int error_count, int condition_code,
int repeat_number, int halted, int skipped, int actual, int non_null)
{
struct u132_endp *endp = data;
struct u132 *u132 = endp->u132;
mutex_lock(&u132->scheduler_lock);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
return;
} else if (endp->dequeueing) {
endp->dequeueing = 0;
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
return;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed "
"urb=%p\n", urb);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
return;
} else if (!urb->unlinked) {
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
} else {
dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
"unlinked=%d\n", urb, urb->unlinked);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
}
}
static void u132_hcd_configure_input_recv(void *data, struct urb *urb, u8 *buf,
int len, int toggle_bits, int error_count, int condition_code,
int repeat_number, int halted, int skipped, int actual, int non_null)
{
struct u132_endp *endp = data;
struct u132 *u132 = endp->u132;
u8 address = u132->addr[endp->usb_addr].address;
mutex_lock(&u132->scheduler_lock);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
return;
} else if (endp->dequeueing) {
endp->dequeueing = 0;
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
return;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed "
"urb=%p\n", urb);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
return;
} else if (!urb->unlinked) {
struct u132_ring *ring = endp->ring;
u8 *u = urb->transfer_buffer;
u8 *b = buf;
int L = len;
while (L-- > 0)
*u++ = *b++;
urb->actual_length = len;
if ((condition_code == TD_CC_NOERROR) || ((condition_code ==
TD_DATAUNDERRUN) && ((urb->transfer_flags &
URB_SHORT_NOT_OK) == 0))) {
int retval;
mutex_unlock(&u132->scheduler_lock);
retval = usb_ftdi_elan_edset_empty(u132->platform_dev,
ring->number, endp, urb, address,
endp->usb_endp, 0x3,
u132_hcd_configure_empty_sent);
if (retval != 0)
u132_hcd_giveback_urb(u132, endp, urb, retval);
return;
} else if (condition_code == TD_CC_STALL) {
mutex_unlock(&u132->scheduler_lock);
dev_warn(&u132->platform_dev->dev, "giving back SETUP I"
"NPUT STALL urb %p\n", urb);
u132_hcd_giveback_urb(u132, endp, urb,
cc_to_error[condition_code]);
return;
} else {
mutex_unlock(&u132->scheduler_lock);
dev_err(&u132->platform_dev->dev, "giving back SETUP IN"
"PUT %s urb %p\n", cc_to_text[condition_code],
urb);
u132_hcd_giveback_urb(u132, endp, urb,
cc_to_error[condition_code]);
return;
}
} else {
dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
"unlinked=%d\n", urb, urb->unlinked);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
}
}
static void u132_hcd_configure_empty_recv(void *data, struct urb *urb, u8 *buf,
int len, int toggle_bits, int error_count, int condition_code,
int repeat_number, int halted, int skipped, int actual, int non_null)
{
struct u132_endp *endp = data;
struct u132 *u132 = endp->u132;
mutex_lock(&u132->scheduler_lock);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
return;
} else if (endp->dequeueing) {
endp->dequeueing = 0;
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
return;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed "
"urb=%p\n", urb);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
return;
} else if (!urb->unlinked) {
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
} else {
dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
"unlinked=%d\n", urb, urb->unlinked);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
}
}
static void u132_hcd_configure_setup_sent(void *data, struct urb *urb, u8 *buf,
int len, int toggle_bits, int error_count, int condition_code,
int repeat_number, int halted, int skipped, int actual, int non_null)
{
struct u132_endp *endp = data;
struct u132 *u132 = endp->u132;
u8 address = u132->addr[endp->usb_addr].address;
mutex_lock(&u132->scheduler_lock);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
return;
} else if (endp->dequeueing) {
endp->dequeueing = 0;
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
return;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed "
"urb=%p\n", urb);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
return;
} else if (!urb->unlinked) {
if (usb_pipein(urb->pipe)) {
int retval;
struct u132_ring *ring = endp->ring;
mutex_unlock(&u132->scheduler_lock);
retval = usb_ftdi_elan_edset_input(u132->platform_dev,
ring->number, endp, urb, address,
endp->usb_endp, 0,
u132_hcd_configure_input_recv);
if (retval != 0)
u132_hcd_giveback_urb(u132, endp, urb, retval);
return;
} else {
int retval;
struct u132_ring *ring = endp->ring;
mutex_unlock(&u132->scheduler_lock);
retval = usb_ftdi_elan_edset_input(u132->platform_dev,
ring->number, endp, urb, address,
endp->usb_endp, 0,
u132_hcd_configure_empty_recv);
if (retval != 0)
u132_hcd_giveback_urb(u132, endp, urb, retval);
return;
}
} else {
dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
"unlinked=%d\n", urb, urb->unlinked);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
}
}
static void u132_hcd_enumeration_empty_recv(void *data, struct urb *urb,
u8 *buf, int len, int toggle_bits, int error_count, int condition_code,
int repeat_number, int halted, int skipped, int actual, int non_null)
{
struct u132_endp *endp = data;
struct u132 *u132 = endp->u132;
u8 address = u132->addr[endp->usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
mutex_lock(&u132->scheduler_lock);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
return;
} else if (endp->dequeueing) {
endp->dequeueing = 0;
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
return;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed "
"urb=%p\n", urb);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
return;
} else if (!urb->unlinked) {
u132->addr[0].address = 0;
endp->usb_addr = udev->usb_addr;
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
} else {
dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
"unlinked=%d\n", urb, urb->unlinked);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
}
}
static void u132_hcd_enumeration_address_sent(void *data, struct urb *urb,
u8 *buf, int len, int toggle_bits, int error_count, int condition_code,
int repeat_number, int halted, int skipped, int actual, int non_null)
{
struct u132_endp *endp = data;
struct u132 *u132 = endp->u132;
mutex_lock(&u132->scheduler_lock);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
return;
} else if (endp->dequeueing) {
endp->dequeueing = 0;
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
return;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed "
"urb=%p\n", urb);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
return;
} else if (!urb->unlinked) {
int retval;
struct u132_ring *ring = endp->ring;
mutex_unlock(&u132->scheduler_lock);
retval = usb_ftdi_elan_edset_input(u132->platform_dev,
ring->number, endp, urb, 0, endp->usb_endp, 0,
u132_hcd_enumeration_empty_recv);
if (retval != 0)
u132_hcd_giveback_urb(u132, endp, urb, retval);
return;
} else {
dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
"unlinked=%d\n", urb, urb->unlinked);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
}
}
static void u132_hcd_initial_empty_sent(void *data, struct urb *urb, u8 *buf,
int len, int toggle_bits, int error_count, int condition_code,
int repeat_number, int halted, int skipped, int actual, int non_null)
{
struct u132_endp *endp = data;
struct u132 *u132 = endp->u132;
mutex_lock(&u132->scheduler_lock);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
return;
} else if (endp->dequeueing) {
endp->dequeueing = 0;
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
return;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed "
"urb=%p\n", urb);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
return;
} else if (!urb->unlinked) {
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
} else {
dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
"unlinked=%d\n", urb, urb->unlinked);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
}
}
static void u132_hcd_initial_input_recv(void *data, struct urb *urb, u8 *buf,
int len, int toggle_bits, int error_count, int condition_code,
int repeat_number, int halted, int skipped, int actual, int non_null)
{
struct u132_endp *endp = data;
struct u132 *u132 = endp->u132;
u8 address = u132->addr[endp->usb_addr].address;
mutex_lock(&u132->scheduler_lock);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
return;
} else if (endp->dequeueing) {
endp->dequeueing = 0;
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
return;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed "
"urb=%p\n", urb);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
return;
} else if (!urb->unlinked) {
int retval;
struct u132_ring *ring = endp->ring;
u8 *u = urb->transfer_buffer;
u8 *b = buf;
int L = len;
while (L-- > 0)
*u++ = *b++;
urb->actual_length = len;
mutex_unlock(&u132->scheduler_lock);
retval = usb_ftdi_elan_edset_empty(u132->platform_dev,
ring->number, endp, urb, address, endp->usb_endp, 0x3,
u132_hcd_initial_empty_sent);
if (retval != 0)
u132_hcd_giveback_urb(u132, endp, urb, retval);
return;
} else {
dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
"unlinked=%d\n", urb, urb->unlinked);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
}
}
static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf,
int len, int toggle_bits, int error_count, int condition_code,
int repeat_number, int halted, int skipped, int actual, int non_null)
{
struct u132_endp *endp = data;
struct u132 *u132 = endp->u132;
u8 address = u132->addr[endp->usb_addr].address;
mutex_lock(&u132->scheduler_lock);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
return;
} else if (endp->dequeueing) {
endp->dequeueing = 0;
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
return;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed "
"urb=%p\n", urb);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
return;
} else if (!urb->unlinked) {
int retval;
struct u132_ring *ring = endp->ring;
mutex_unlock(&u132->scheduler_lock);
retval = usb_ftdi_elan_edset_input(u132->platform_dev,
ring->number, endp, urb, address, endp->usb_endp, 0,
u132_hcd_initial_input_recv);
if (retval != 0)
u132_hcd_giveback_urb(u132, endp, urb, retval);
return;
} else {
dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
"unlinked=%d\n", urb, urb->unlinked);
mutex_unlock(&u132->scheduler_lock);
u132_hcd_giveback_urb(u132, endp, urb, 0);
return;
}
}
/*
* this work function is only executed from the work queue
*
*/
static void u132_hcd_ring_work_scheduler(struct work_struct *work)
{
struct u132_ring *ring =
container_of(work, struct u132_ring, scheduler.work);
struct u132 *u132 = ring->u132;
mutex_lock(&u132->scheduler_lock);
if (ring->in_use) {
mutex_unlock(&u132->scheduler_lock);
u132_ring_put_kref(u132, ring);
return;
} else if (ring->curr_endp) {
struct u132_endp *last_endp = ring->curr_endp;
struct list_head *scan;
struct list_head *head = &last_endp->endp_ring;
unsigned long wakeup = 0;
list_for_each(scan, head) {
struct u132_endp *endp = list_entry(scan,
struct u132_endp, endp_ring);
if (endp->queue_next == endp->queue_last) {
} else if ((endp->delayed == 0)
|| time_after_eq(jiffies, endp->jiffies)) {
ring->curr_endp = endp;
u132_endp_cancel_work(u132, last_endp);
u132_endp_queue_work(u132, last_endp, 0);
mutex_unlock(&u132->scheduler_lock);
u132_ring_put_kref(u132, ring);
return;
} else {
unsigned long delta = endp->jiffies - jiffies;
if (delta > wakeup)
wakeup = delta;
}
}
if (last_endp->queue_next == last_endp->queue_last) {
} else if ((last_endp->delayed == 0) || time_after_eq(jiffies,
last_endp->jiffies)) {
u132_endp_cancel_work(u132, last_endp);
u132_endp_queue_work(u132, last_endp, 0);
mutex_unlock(&u132->scheduler_lock);
u132_ring_put_kref(u132, ring);
return;
} else {
unsigned long delta = last_endp->jiffies - jiffies;
if (delta > wakeup)
wakeup = delta;
}
if (wakeup > 0) {
u132_ring_requeue_work(u132, ring, wakeup);
mutex_unlock(&u132->scheduler_lock);
return;
} else {
mutex_unlock(&u132->scheduler_lock);
u132_ring_put_kref(u132, ring);
return;
}
} else {
mutex_unlock(&u132->scheduler_lock);
u132_ring_put_kref(u132, ring);
return;
}
}
static void u132_hcd_endp_work_scheduler(struct work_struct *work)
{
struct u132_ring *ring;
struct u132_endp *endp =
container_of(work, struct u132_endp, scheduler.work);
struct u132 *u132 = endp->u132;
mutex_lock(&u132->scheduler_lock);
ring = endp->ring;
if (endp->edset_flush) {
endp->edset_flush = 0;
if (endp->dequeueing)
usb_ftdi_elan_edset_flush(u132->platform_dev,
ring->number, endp);
mutex_unlock(&u132->scheduler_lock);
u132_endp_put_kref(u132, endp);
return;
} else if (endp->active) {
mutex_unlock(&u132->scheduler_lock);
u132_endp_put_kref(u132, endp);
return;
} else if (ring->in_use) {
mutex_unlock(&u132->scheduler_lock);
u132_endp_put_kref(u132, endp);
return;
} else if (endp->queue_next == endp->queue_last) {
mutex_unlock(&u132->scheduler_lock);
u132_endp_put_kref(u132, endp);
return;
} else if (endp->pipetype == PIPE_INTERRUPT) {
u8 address = u132->addr[endp->usb_addr].address;
if (ring->in_use) {
mutex_unlock(&u132->scheduler_lock);
u132_endp_put_kref(u132, endp);
return;
} else {
int retval;
struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
endp->queue_next];
endp->active = 1;
ring->curr_endp = endp;
ring->in_use = 1;
mutex_unlock(&u132->scheduler_lock);
retval = edset_single(u132, ring, endp, urb, address,
endp->toggle_bits, u132_hcd_interrupt_recv);
if (retval != 0)
u132_hcd_giveback_urb(u132, endp, urb, retval);
return;
}
} else if (endp->pipetype == PIPE_CONTROL) {
u8 address = u132->addr[endp->usb_addr].address;
if (ring->in_use) {
mutex_unlock(&u132->scheduler_lock);
u132_endp_put_kref(u132, endp);
return;
} else if (address == 0) {
int retval;
struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
endp->queue_next];
endp->active = 1;
ring->curr_endp = endp;
ring->in_use = 1;
mutex_unlock(&u132->scheduler_lock);
retval = edset_setup(u132, ring, endp, urb, address,
0x2, u132_hcd_initial_setup_sent);
if (retval != 0)
u132_hcd_giveback_urb(u132, endp, urb, retval);
return;
} else if (endp->usb_addr == 0) {
int retval;
struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
endp->queue_next];
endp->active = 1;
ring->curr_endp = endp;
ring->in_use = 1;
mutex_unlock(&u132->scheduler_lock);
retval = edset_setup(u132, ring, endp, urb, 0, 0x2,
u132_hcd_enumeration_address_sent);
if (retval != 0)
u132_hcd_giveback_urb(u132, endp, urb, retval);
return;
} else {
int retval;
struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
endp->queue_next];
address = u132->addr[endp->usb_addr].address;
endp->active = 1;
ring->curr_endp = endp;
ring->in_use = 1;
mutex_unlock(&u132->scheduler_lock);
retval = edset_setup(u132, ring, endp, urb, address,
0x2, u132_hcd_configure_setup_sent);
if (retval != 0)
u132_hcd_giveback_urb(u132, endp, urb, retval);
return;
}
} else {
if (endp->input) {
u8 address = u132->addr[endp->usb_addr].address;
if (ring->in_use) {
mutex_unlock(&u132->scheduler_lock);
u132_endp_put_kref(u132, endp);
return;
} else {
int retval;
struct urb *urb = endp->urb_list[
ENDP_QUEUE_MASK & endp->queue_next];
endp->active = 1;
ring->curr_endp = endp;
ring->in_use = 1;
mutex_unlock(&u132->scheduler_lock);
retval = edset_input(u132, ring, endp, urb,
address, endp->toggle_bits,
u132_hcd_bulk_input_recv);
if (retval == 0) {
} else
u132_hcd_giveback_urb(u132, endp, urb,
retval);
return;
}
} else { /* output pipe */
u8 address = u132->addr[endp->usb_addr].address;
if (ring->in_use) {
mutex_unlock(&u132->scheduler_lock);
u132_endp_put_kref(u132, endp);
return;
} else {
int retval;
struct urb *urb = endp->urb_list[
ENDP_QUEUE_MASK & endp->queue_next];
endp->active = 1;
ring->curr_endp = endp;
ring->in_use = 1;
mutex_unlock(&u132->scheduler_lock);
retval = edset_output(u132, ring, endp, urb,
address, endp->toggle_bits,
u132_hcd_bulk_output_sent);
if (retval == 0) {
} else
u132_hcd_giveback_urb(u132, endp, urb,
retval);
return;
}
}
}
}
#ifdef CONFIG_PM
static void port_power(struct u132 *u132, int pn, int is_on)
{
u132->port[pn].power = is_on;
}
#endif
static void u132_power(struct u132 *u132, int is_on)
{
struct usb_hcd *hcd = u132_to_hcd(u132)
; /* hub is inactive unless the port is powered */
if (is_on) {
if (u132->power)
return;
u132->power = 1;
} else {
u132->power = 0;
hcd->state = HC_STATE_HALT;
}
}
static int u132_periodic_reinit(struct u132 *u132)
{
int retval;
u32 fi = u132->hc_fminterval & 0x03fff;
u32 fit;
u32 fminterval;
retval = u132_read_pcimem(u132, fminterval, &fminterval);
if (retval)
return retval;
fit = fminterval & FIT;
retval = u132_write_pcimem(u132, fminterval,
(fit ^ FIT) | u132->hc_fminterval);
if (retval)
return retval;
retval = u132_write_pcimem(u132, periodicstart,
((9 * fi) / 10) & 0x3fff);
if (retval)
return retval;
return 0;
}
static char *hcfs2string(int state)
{
switch (state) {
case OHCI_USB_RESET:
return "reset";
case OHCI_USB_RESUME:
return "resume";
case OHCI_USB_OPER:
return "operational";
case OHCI_USB_SUSPEND:
return "suspend";
}
return "?";
}
static int u132_init(struct u132 *u132)
{
int retval;
u32 control;
u132_disable(u132);
u132->next_statechange = jiffies;
retval = u132_write_pcimem(u132, intrdisable, OHCI_INTR_MIE);
if (retval)
return retval;
retval = u132_read_pcimem(u132, control, &control);
if (retval)
return retval;
if (u132->num_ports == 0) {
u32 rh_a = -1;
retval = u132_read_pcimem(u132, roothub.a, &rh_a);
if (retval)
return retval;
u132->num_ports = rh_a & RH_A_NDP;
retval = read_roothub_info(u132);
if (retval)
return retval;
}
if (u132->num_ports > MAX_U132_PORTS)
return -EINVAL;
return 0;
}
/* Start an OHCI controller, set the BUS operational
* resets USB and controller
* enable interrupts
*/
static int u132_run(struct u132 *u132)
{
int retval;
u32 control;
u32 status;
u32 fminterval;
u32 periodicstart;
u32 cmdstatus;
u32 roothub_a;
int mask = OHCI_INTR_INIT;
int first = u132->hc_fminterval == 0;
int sleep_time = 0;
int reset_timeout = 30; /* ... allow extra time */
u132_disable(u132);
if (first) {
u32 temp;
retval = u132_read_pcimem(u132, fminterval, &temp);
if (retval)
return retval;
u132->hc_fminterval = temp & 0x3fff;
u132->hc_fminterval |= FSMP(u132->hc_fminterval) << 16;
}
retval = u132_read_pcimem(u132, control, &u132->hc_control);
if (retval)
return retval;
dev_info(&u132->platform_dev->dev, "resetting from state '%s', control "
"= %08X\n", hcfs2string(u132->hc_control & OHCI_CTRL_HCFS),
u132->hc_control);
switch (u132->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_OPER:
sleep_time = 0;
break;
case OHCI_USB_SUSPEND:
case OHCI_USB_RESUME:
u132->hc_control &= OHCI_CTRL_RWC;
u132->hc_control |= OHCI_USB_RESUME;
sleep_time = 10;
break;
default:
u132->hc_control &= OHCI_CTRL_RWC;
u132->hc_control |= OHCI_USB_RESET;
sleep_time = 50;
break;
}
retval = u132_write_pcimem(u132, control, u132->hc_control);
if (retval)
return retval;
retval = u132_read_pcimem(u132, control, &control);
if (retval)
return retval;
msleep(sleep_time);
retval = u132_read_pcimem(u132, roothub.a, &roothub_a);
if (retval)
return retval;
if (!(roothub_a & RH_A_NPS)) {
int temp; /* power down each port */
for (temp = 0; temp < u132->num_ports; temp++) {
retval = u132_write_pcimem(u132,
roothub.portstatus[temp], RH_PS_LSDA);
if (retval)
return retval;
}
}
retval = u132_read_pcimem(u132, control, &control);
if (retval)
return retval;
retry:
retval = u132_read_pcimem(u132, cmdstatus, &status);
if (retval)
return retval;
retval = u132_write_pcimem(u132, cmdstatus, OHCI_HCR);
if (retval)
return retval;
extra: {
retval = u132_read_pcimem(u132, cmdstatus, &status);
if (retval)
return retval;
if (0 != (status & OHCI_HCR)) {
if (--reset_timeout == 0) {
dev_err(&u132->platform_dev->dev, "USB HC reset"
" timed out!\n");
return -ENODEV;
} else {
msleep(5);
goto extra;
}
}
}
if (u132->flags & OHCI_QUIRK_INITRESET) {
retval = u132_write_pcimem(u132, control, u132->hc_control);
if (retval)
return retval;
retval = u132_read_pcimem(u132, control, &control);
if (retval)
return retval;
}
retval = u132_write_pcimem(u132, ed_controlhead, 0x00000000);
if (retval)
return retval;
retval = u132_write_pcimem(u132, ed_bulkhead, 0x11000000);
if (retval)
return retval;
retval = u132_write_pcimem(u132, hcca, 0x00000000);
if (retval)
return retval;
retval = u132_periodic_reinit(u132);
if (retval)
return retval;
retval = u132_read_pcimem(u132, fminterval, &fminterval);
if (retval)
return retval;
retval = u132_read_pcimem(u132, periodicstart, &periodicstart);
if (retval)
return retval;
if (0 == (fminterval & 0x3fff0000) || 0 == periodicstart) {
if (!(u132->flags & OHCI_QUIRK_INITRESET)) {
u132->flags |= OHCI_QUIRK_INITRESET;
goto retry;
} else
dev_err(&u132->platform_dev->dev, "init err(%08x %04x)"
"\n", fminterval, periodicstart);
} /* start controller operations */
u132->hc_control &= OHCI_CTRL_RWC;
u132->hc_control |= OHCI_CONTROL_INIT | OHCI_CTRL_BLE | OHCI_USB_OPER;
retval = u132_write_pcimem(u132, control, u132->hc_control);
if (retval)
return retval;
retval = u132_write_pcimem(u132, cmdstatus, OHCI_BLF);
if (retval)
return retval;
retval = u132_read_pcimem(u132, cmdstatus, &cmdstatus);
if (retval)
return retval;
retval = u132_read_pcimem(u132, control, &control);
if (retval)
return retval;
u132_to_hcd(u132)->state = HC_STATE_RUNNING;
retval = u132_write_pcimem(u132, roothub.status, RH_HS_DRWE);
if (retval)
return retval;
retval = u132_write_pcimem(u132, intrstatus, mask);
if (retval)
return retval;
retval = u132_write_pcimem(u132, intrdisable,
OHCI_INTR_MIE | OHCI_INTR_OC | OHCI_INTR_RHSC | OHCI_INTR_FNO |
OHCI_INTR_UE | OHCI_INTR_RD | OHCI_INTR_SF | OHCI_INTR_WDH |
OHCI_INTR_SO);
if (retval)
return retval; /* handle root hub init quirks ... */
retval = u132_read_pcimem(u132, roothub.a, &roothub_a);
if (retval)
return retval;
roothub_a &= ~(RH_A_PSM | RH_A_OCPM);
if (u132->flags & OHCI_QUIRK_SUPERIO) {
roothub_a |= RH_A_NOCP;
roothub_a &= ~(RH_A_POTPGT | RH_A_NPS);
retval = u132_write_pcimem(u132, roothub.a, roothub_a);
if (retval)
return retval;
} else if ((u132->flags & OHCI_QUIRK_AMD756) || distrust_firmware) {
roothub_a |= RH_A_NPS;
retval = u132_write_pcimem(u132, roothub.a, roothub_a);
if (retval)
return retval;
}
retval = u132_write_pcimem(u132, roothub.status, RH_HS_LPSC);
if (retval)
return retval;
retval = u132_write_pcimem(u132, roothub.b,
(roothub_a & RH_A_NPS) ? 0 : RH_B_PPCM);
if (retval)
return retval;
retval = u132_read_pcimem(u132, control, &control);
if (retval)
return retval;
mdelay((roothub_a >> 23) & 0x1fe);
u132_to_hcd(u132)->state = HC_STATE_RUNNING;
return 0;
}
static void u132_hcd_stop(struct usb_hcd *hcd)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "u132 device %p(hcd=%p) has b"
"een removed %d\n", u132, hcd, u132->going);
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device hcd=%p is being remov"
"ed\n", hcd);
} else {
mutex_lock(&u132->sw_lock);
msleep(100);
u132_power(u132, 0);
mutex_unlock(&u132->sw_lock);
}
}
static int u132_hcd_start(struct usb_hcd *hcd)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
return -ENODEV;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed\n");
return -ESHUTDOWN;
} else if (hcd->self.controller) {
int retval;
struct platform_device *pdev =
to_platform_device(hcd->self.controller);
u16 vendor = ((struct u132_platform_data *)
(pdev->dev.platform_data))->vendor;
u16 device = ((struct u132_platform_data *)
(pdev->dev.platform_data))->device;
mutex_lock(&u132->sw_lock);
msleep(10);
if (vendor == PCI_VENDOR_ID_AMD && device == 0x740c) {
u132->flags = OHCI_QUIRK_AMD756;
} else if (vendor == PCI_VENDOR_ID_OPTI && device == 0xc861) {
dev_err(&u132->platform_dev->dev, "WARNING: OPTi workar"
"ounds unavailable\n");
} else if (vendor == PCI_VENDOR_ID_COMPAQ && device == 0xa0f8)
u132->flags |= OHCI_QUIRK_ZFMICRO;
retval = u132_run(u132);
if (retval) {
u132_disable(u132);
u132->going = 1;
}
msleep(100);
mutex_unlock(&u132->sw_lock);
return retval;
} else {
dev_err(&u132->platform_dev->dev, "platform_device missing\n");
return -ENODEV;
}
}
static int u132_hcd_reset(struct usb_hcd *hcd)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
return -ENODEV;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed\n");
return -ESHUTDOWN;
} else {
int retval;
mutex_lock(&u132->sw_lock);
retval = u132_init(u132);
if (retval) {
u132_disable(u132);
u132->going = 1;
}
mutex_unlock(&u132->sw_lock);
return retval;
}
}
static int create_endpoint_and_queue_int(struct u132 *u132,
struct u132_udev *udev, struct urb *urb,
struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address,
gfp_t mem_flags)
{
struct u132_ring *ring;
unsigned long irqs;
int rc;
u8 endp_number;
struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
if (!endp)
return -ENOMEM;
spin_lock_init(&endp->queue_lock.slock);
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
if (rc) {
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
kfree(endp);
return rc;
}
endp_number = ++u132->num_endpoints;
urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
INIT_LIST_HEAD(&endp->urb_more);
ring = endp->ring = &u132->ring[0];
if (ring->curr_endp) {
list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
} else {
INIT_LIST_HEAD(&endp->endp_ring);
ring->curr_endp = endp;
}
ring->length += 1;
endp->dequeueing = 0;
endp->edset_flush = 0;
endp->active = 0;
endp->delayed = 0;
endp->endp_number = endp_number;
endp->u132 = u132;
endp->hep = urb->ep;
endp->pipetype = usb_pipetype(urb->pipe);
u132_endp_init_kref(u132, endp);
if (usb_pipein(urb->pipe)) {
endp->toggle_bits = 0x2;
usb_settoggle(udev->usb_device, usb_endp, 0, 0);
endp->input = 1;
endp->output = 0;
udev->endp_number_in[usb_endp] = endp_number;
u132_udev_get_kref(u132, udev);
} else {
endp->toggle_bits = 0x2;
usb_settoggle(udev->usb_device, usb_endp, 1, 0);
endp->input = 0;
endp->output = 1;
udev->endp_number_out[usb_endp] = endp_number;
u132_udev_get_kref(u132, udev);
}
urb->hcpriv = u132;
endp->delayed = 1;
endp->jiffies = jiffies + msecs_to_jiffies(urb->interval);
endp->udev_number = address;
endp->usb_addr = usb_addr;
endp->usb_endp = usb_endp;
endp->queue_size = 1;
endp->queue_last = 0;
endp->queue_next = 0;
endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
u132_endp_queue_work(u132, endp, msecs_to_jiffies(urb->interval));
return 0;
}
static int queue_int_on_old_endpoint(struct u132 *u132,
struct u132_udev *udev, struct urb *urb,
struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
u8 usb_endp, u8 address)
{
urb->hcpriv = u132;
endp->delayed = 1;
endp->jiffies = jiffies + msecs_to_jiffies(urb->interval);
if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
} else {
struct u132_urbq *urbq = kmalloc(sizeof(struct u132_urbq),
GFP_ATOMIC);
if (urbq == NULL) {
endp->queue_size -= 1;
return -ENOMEM;
} else {
list_add_tail(&urbq->urb_more, &endp->urb_more);
urbq->urb = urb;
}
}
return 0;
}
static int create_endpoint_and_queue_bulk(struct u132 *u132,
struct u132_udev *udev, struct urb *urb,
struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address,
gfp_t mem_flags)
{
int ring_number;
struct u132_ring *ring;
unsigned long irqs;
int rc;
u8 endp_number;
struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
if (!endp)
return -ENOMEM;
spin_lock_init(&endp->queue_lock.slock);
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
if (rc) {
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
kfree(endp);
return rc;
}
endp_number = ++u132->num_endpoints;
urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
INIT_LIST_HEAD(&endp->urb_more);
endp->dequeueing = 0;
endp->edset_flush = 0;
endp->active = 0;
endp->delayed = 0;
endp->endp_number = endp_number;
endp->u132 = u132;
endp->hep = urb->ep;
endp->pipetype = usb_pipetype(urb->pipe);
u132_endp_init_kref(u132, endp);
if (usb_pipein(urb->pipe)) {
endp->toggle_bits = 0x2;
usb_settoggle(udev->usb_device, usb_endp, 0, 0);
ring_number = 3;
endp->input = 1;
endp->output = 0;
udev->endp_number_in[usb_endp] = endp_number;
u132_udev_get_kref(u132, udev);
} else {
endp->toggle_bits = 0x2;
usb_settoggle(udev->usb_device, usb_endp, 1, 0);
ring_number = 2;
endp->input = 0;
endp->output = 1;
udev->endp_number_out[usb_endp] = endp_number;
u132_udev_get_kref(u132, udev);
}
ring = endp->ring = &u132->ring[ring_number - 1];
if (ring->curr_endp) {
list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
} else {
INIT_LIST_HEAD(&endp->endp_ring);
ring->curr_endp = endp;
}
ring->length += 1;
urb->hcpriv = u132;
endp->udev_number = address;
endp->usb_addr = usb_addr;
endp->usb_endp = usb_endp;
endp->queue_size = 1;
endp->queue_last = 0;
endp->queue_next = 0;
endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
u132_endp_queue_work(u132, endp, 0);
return 0;
}
static int queue_bulk_on_old_endpoint(struct u132 *u132, struct u132_udev *udev,
struct urb *urb,
struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
u8 usb_endp, u8 address)
{
urb->hcpriv = u132;
if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
} else {
struct u132_urbq *urbq = kmalloc(sizeof(struct u132_urbq),
GFP_ATOMIC);
if (urbq == NULL) {
endp->queue_size -= 1;
return -ENOMEM;
} else {
list_add_tail(&urbq->urb_more, &endp->urb_more);
urbq->urb = urb;
}
}
return 0;
}
static int create_endpoint_and_queue_control(struct u132 *u132,
struct urb *urb,
struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp,
gfp_t mem_flags)
{
struct u132_ring *ring;
unsigned long irqs;
int rc;
u8 endp_number;
struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
if (!endp)
return -ENOMEM;
spin_lock_init(&endp->queue_lock.slock);
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
if (rc) {
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
kfree(endp);
return rc;
}
endp_number = ++u132->num_endpoints;
urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
INIT_LIST_HEAD(&endp->urb_more);
ring = endp->ring = &u132->ring[0];
if (ring->curr_endp) {
list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
} else {
INIT_LIST_HEAD(&endp->endp_ring);
ring->curr_endp = endp;
}
ring->length += 1;
endp->dequeueing = 0;
endp->edset_flush = 0;
endp->active = 0;
endp->delayed = 0;
endp->endp_number = endp_number;
endp->u132 = u132;
endp->hep = urb->ep;
u132_endp_init_kref(u132, endp);
u132_endp_get_kref(u132, endp);
if (usb_addr == 0) {
u8 address = u132->addr[usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
endp->udev_number = address;
endp->usb_addr = usb_addr;
endp->usb_endp = usb_endp;
endp->input = 1;
endp->output = 1;
endp->pipetype = usb_pipetype(urb->pipe);
u132_udev_init_kref(u132, udev);
u132_udev_get_kref(u132, udev);
udev->endp_number_in[usb_endp] = endp_number;
udev->endp_number_out[usb_endp] = endp_number;
urb->hcpriv = u132;
endp->queue_size = 1;
endp->queue_last = 0;
endp->queue_next = 0;
endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
u132_endp_queue_work(u132, endp, 0);
return 0;
} else { /*(usb_addr > 0) */
u8 address = u132->addr[usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
endp->udev_number = address;
endp->usb_addr = usb_addr;
endp->usb_endp = usb_endp;
endp->input = 1;
endp->output = 1;
endp->pipetype = usb_pipetype(urb->pipe);
u132_udev_get_kref(u132, udev);
udev->enumeration = 2;
udev->endp_number_in[usb_endp] = endp_number;
udev->endp_number_out[usb_endp] = endp_number;
urb->hcpriv = u132;
endp->queue_size = 1;
endp->queue_last = 0;
endp->queue_next = 0;
endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
u132_endp_queue_work(u132, endp, 0);
return 0;
}
}
static int queue_control_on_old_endpoint(struct u132 *u132,
struct urb *urb,
struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
u8 usb_endp)
{
if (usb_addr == 0) {
if (usb_pipein(urb->pipe)) {
urb->hcpriv = u132;
if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
endp->urb_list[ENDP_QUEUE_MASK &
endp->queue_last++] = urb;
} else {
struct u132_urbq *urbq =
kmalloc(sizeof(struct u132_urbq),
GFP_ATOMIC);
if (urbq == NULL) {
endp->queue_size -= 1;
return -ENOMEM;
} else {
list_add_tail(&urbq->urb_more,
&endp->urb_more);
urbq->urb = urb;
}
}
return 0;
} else { /* usb_pipeout(urb->pipe) */
struct u132_addr *addr = &u132->addr[usb_dev->devnum];
int I = MAX_U132_UDEVS;
int i = 0;
while (--I > 0) {
struct u132_udev *udev = &u132->udev[++i];
if (udev->usb_device) {
continue;
} else {
udev->enumeration = 1;
u132->addr[0].address = i;
endp->udev_number = i;
udev->udev_number = i;
udev->usb_addr = usb_dev->devnum;
u132_udev_init_kref(u132, udev);
udev->endp_number_in[usb_endp] =
endp->endp_number;
u132_udev_get_kref(u132, udev);
udev->endp_number_out[usb_endp] =
endp->endp_number;
udev->usb_device = usb_dev;
((u8 *) (urb->setup_packet))[2] =
addr->address = i;
u132_udev_get_kref(u132, udev);
break;
}
}
if (I == 0) {
dev_err(&u132->platform_dev->dev, "run out of d"
"evice space\n");
return -EINVAL;
}
urb->hcpriv = u132;
if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
endp->urb_list[ENDP_QUEUE_MASK &
endp->queue_last++] = urb;
} else {
struct u132_urbq *urbq =
kmalloc(sizeof(struct u132_urbq),
GFP_ATOMIC);
if (urbq == NULL) {
endp->queue_size -= 1;
return -ENOMEM;
} else {
list_add_tail(&urbq->urb_more,
&endp->urb_more);
urbq->urb = urb;
}
}
return 0;
}
} else { /*(usb_addr > 0) */
u8 address = u132->addr[usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
urb->hcpriv = u132;
if (udev->enumeration != 2)
udev->enumeration = 2;
if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] =
urb;
} else {
struct u132_urbq *urbq =
kmalloc(sizeof(struct u132_urbq), GFP_ATOMIC);
if (urbq == NULL) {
endp->queue_size -= 1;
return -ENOMEM;
} else {
list_add_tail(&urbq->urb_more, &endp->urb_more);
urbq->urb = urb;
}
}
return 0;
}
}
static int u132_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (irqs_disabled()) {
if (__GFP_WAIT & mem_flags) {
printk(KERN_ERR "invalid context for function that migh"
"t sleep\n");
return -EINVAL;
}
}
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
return -ENODEV;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed "
"urb=%p\n", urb);
return -ESHUTDOWN;
} else {
u8 usb_addr = usb_pipedevice(urb->pipe);
u8 usb_endp = usb_pipeendpoint(urb->pipe);
struct usb_device *usb_dev = urb->dev;
if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
u8 address = u132->addr[usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
struct u132_endp *endp = urb->ep->hcpriv;
urb->actual_length = 0;
if (endp) {
unsigned long irqs;
int retval;
spin_lock_irqsave(&endp->queue_lock.slock,
irqs);
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval == 0) {
retval = queue_int_on_old_endpoint(
u132, udev, urb,
usb_dev, endp,
usb_addr, usb_endp,
address);
if (retval)
usb_hcd_unlink_urb_from_ep(
hcd, urb);
}
spin_unlock_irqrestore(&endp->queue_lock.slock,
irqs);
if (retval) {
return retval;
} else {
u132_endp_queue_work(u132, endp,
msecs_to_jiffies(urb->interval))
;
return 0;
}
} else if (u132->num_endpoints == MAX_U132_ENDPS) {
return -EINVAL;
} else { /*(endp == NULL) */
return create_endpoint_and_queue_int(u132, udev,
urb, usb_dev, usb_addr,
usb_endp, address, mem_flags);
}
} else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
dev_err(&u132->platform_dev->dev, "the hardware does no"
"t support PIPE_ISOCHRONOUS\n");
return -EINVAL;
} else if (usb_pipetype(urb->pipe) == PIPE_BULK) {
u8 address = u132->addr[usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
struct u132_endp *endp = urb->ep->hcpriv;
urb->actual_length = 0;
if (endp) {
unsigned long irqs;
int retval;
spin_lock_irqsave(&endp->queue_lock.slock,
irqs);
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval == 0) {
retval = queue_bulk_on_old_endpoint(
u132, udev, urb,
usb_dev, endp,
usb_addr, usb_endp,
address);
if (retval)
usb_hcd_unlink_urb_from_ep(
hcd, urb);
}
spin_unlock_irqrestore(&endp->queue_lock.slock,
irqs);
if (retval) {
return retval;
} else {
u132_endp_queue_work(u132, endp, 0);
return 0;
}
} else if (u132->num_endpoints == MAX_U132_ENDPS) {
return -EINVAL;
} else
return create_endpoint_and_queue_bulk(u132,
udev, urb, usb_dev, usb_addr,
usb_endp, address, mem_flags);
} else {
struct u132_endp *endp = urb->ep->hcpriv;
u16 urb_size = 8;
u8 *b = urb->setup_packet;
int i = 0;
char data[30 * 3 + 4];
char *d = data;
int m = (sizeof(data) - 1) / 3;
int l = 0;
data[0] = 0;
while (urb_size-- > 0) {
if (i > m) {
} else if (i++ < m) {
int w = sprintf(d, " %02X", *b++);
d += w;
l += w;
} else
d += sprintf(d, " ..");
}
if (endp) {
unsigned long irqs;
int retval;
spin_lock_irqsave(&endp->queue_lock.slock,
irqs);
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval == 0) {
retval = queue_control_on_old_endpoint(
u132, urb, usb_dev,
endp, usb_addr,
usb_endp);
if (retval)
usb_hcd_unlink_urb_from_ep(
hcd, urb);
}
spin_unlock_irqrestore(&endp->queue_lock.slock,
irqs);
if (retval) {
return retval;
} else {
u132_endp_queue_work(u132, endp, 0);
return 0;
}
} else if (u132->num_endpoints == MAX_U132_ENDPS) {
return -EINVAL;
} else
return create_endpoint_and_queue_control(u132,
urb, usb_dev, usb_addr, usb_endp,
mem_flags);
}
}
}
static int dequeue_from_overflow_chain(struct u132 *u132,
struct u132_endp *endp, struct urb *urb)
{
struct list_head *scan;
struct list_head *head = &endp->urb_more;
list_for_each(scan, head) {
struct u132_urbq *urbq = list_entry(scan, struct u132_urbq,
urb_more);
if (urbq->urb == urb) {
struct usb_hcd *hcd = u132_to_hcd(u132);
list_del(scan);
endp->queue_size -= 1;
urb->error_count = 0;
usb_hcd_giveback_urb(hcd, urb, 0);
return 0;
} else
continue;
}
dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]=%p ring"
"[%d] %c%c usb_endp=%d usb_addr=%d size=%d next=%04X last=%04X"
"\n", urb, endp->endp_number, endp, endp->ring->number,
endp->input ? 'I' : ' ', endp->output ? 'O' : ' ',
endp->usb_endp, endp->usb_addr, endp->queue_size,
endp->queue_next, endp->queue_last);
return -EINVAL;
}
static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
struct urb *urb, int status)
{
unsigned long irqs;
int rc;
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
rc = usb_hcd_check_unlink_urb(u132_to_hcd(u132), urb, status);
if (rc) {
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
return rc;
}
if (endp->queue_size == 0) {
dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]"
"=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb,
endp->endp_number, endp, endp->ring->number,
endp->input ? 'I' : ' ', endp->output ? 'O' : ' ',
endp->usb_endp, endp->usb_addr);
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
return -EINVAL;
}
if (urb == endp->urb_list[ENDP_QUEUE_MASK & endp->queue_next]) {
if (endp->active) {
endp->dequeueing = 1;
endp->edset_flush = 1;
u132_endp_queue_work(u132, endp, 0);
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
return 0;
} else {
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
u132_hcd_abandon_urb(u132, endp, urb, status);
return 0;
}
} else {
u16 queue_list = 0;
u16 queue_size = endp->queue_size;
u16 queue_scan = endp->queue_next;
struct urb **urb_slot = NULL;
while (++queue_list < ENDP_QUEUE_SIZE && --queue_size > 0) {
if (urb == endp->urb_list[ENDP_QUEUE_MASK &
++queue_scan]) {
urb_slot = &endp->urb_list[ENDP_QUEUE_MASK &
queue_scan];
break;
} else
continue;
}
while (++queue_list < ENDP_QUEUE_SIZE && --queue_size > 0) {
*urb_slot = endp->urb_list[ENDP_QUEUE_MASK &
++queue_scan];
urb_slot = &endp->urb_list[ENDP_QUEUE_MASK &
queue_scan];
}
if (urb_slot) {
struct usb_hcd *hcd = u132_to_hcd(u132);
usb_hcd_unlink_urb_from_ep(hcd, urb);
endp->queue_size -= 1;
if (list_empty(&endp->urb_more)) {
spin_unlock_irqrestore(&endp->queue_lock.slock,
irqs);
} else {
struct list_head *next = endp->urb_more.next;
struct u132_urbq *urbq = list_entry(next,
struct u132_urbq, urb_more);
list_del(next);
*urb_slot = urbq->urb;
spin_unlock_irqrestore(&endp->queue_lock.slock,
irqs);
kfree(urbq);
} urb->error_count = 0;
usb_hcd_giveback_urb(hcd, urb, status);
return 0;
} else if (list_empty(&endp->urb_more)) {
dev_err(&u132->platform_dev->dev, "urb=%p not found in "
"endp[%d]=%p ring[%d] %c%c usb_endp=%d usb_addr"
"=%d size=%d next=%04X last=%04X\n", urb,
endp->endp_number, endp, endp->ring->number,
endp->input ? 'I' : ' ',
endp->output ? 'O' : ' ', endp->usb_endp,
endp->usb_addr, endp->queue_size,
endp->queue_next, endp->queue_last);
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
return -EINVAL;
} else {
int retval;
usb_hcd_unlink_urb_from_ep(u132_to_hcd(u132), urb);
retval = dequeue_from_overflow_chain(u132, endp,
urb);
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
return retval;
}
}
}
static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 2) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
return -ENODEV;
} else {
u8 usb_addr = usb_pipedevice(urb->pipe);
u8 usb_endp = usb_pipeendpoint(urb->pipe);
u8 address = u132->addr[usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
if (usb_pipein(urb->pipe)) {
u8 endp_number = udev->endp_number_in[usb_endp];
struct u132_endp *endp = u132->endp[endp_number - 1];
return u132_endp_urb_dequeue(u132, endp, urb, status);
} else {
u8 endp_number = udev->endp_number_out[usb_endp];
struct u132_endp *endp = u132->endp[endp_number - 1];
return u132_endp_urb_dequeue(u132, endp, urb, status);
}
}
}
static void u132_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *hep)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 2) {
dev_err(&u132->platform_dev->dev, "u132 device %p(hcd=%p hep=%p"
") has been removed %d\n", u132, hcd, hep,
u132->going);
} else {
struct u132_endp *endp = hep->hcpriv;
if (endp)
u132_endp_put_kref(u132, endp);
}
}
static int u132_get_frame(struct usb_hcd *hcd)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
return -ENODEV;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed\n");
return -ESHUTDOWN;
} else {
int frame = 0;
dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n");
msleep(100);
return frame;
}
}
static int u132_roothub_descriptor(struct u132 *u132,
struct usb_hub_descriptor *desc)
{
int retval;
u16 temp;
u32 rh_a = -1;
u32 rh_b = -1;
retval = u132_read_pcimem(u132, roothub.a, &rh_a);
if (retval)
return retval;
desc->bDescriptorType = 0x29;
desc->bPwrOn2PwrGood = (rh_a & RH_A_POTPGT) >> 24;
desc->bHubContrCurrent = 0;
desc->bNbrPorts = u132->num_ports;
temp = 1 + (u132->num_ports / 8);
desc->bDescLength = 7 + 2 * temp;
temp = 0;
if (rh_a & RH_A_NPS)
temp |= 0x0002;
if (rh_a & RH_A_PSM)
temp |= 0x0001;
if (rh_a & RH_A_NOCP)
temp |= 0x0010;
else if (rh_a & RH_A_OCPM)
temp |= 0x0008;
desc->wHubCharacteristics = cpu_to_le16(temp);
retval = u132_read_pcimem(u132, roothub.b, &rh_b);
if (retval)
return retval;
memset(desc->u.hs.DeviceRemovable, 0xff,
sizeof(desc->u.hs.DeviceRemovable));
desc->u.hs.DeviceRemovable[0] = rh_b & RH_B_DR;
if (u132->num_ports > 7) {
desc->u.hs.DeviceRemovable[1] = (rh_b & RH_B_DR) >> 8;
desc->u.hs.DeviceRemovable[2] = 0xff;
} else
desc->u.hs.DeviceRemovable[1] = 0xff;
return 0;
}
static int u132_roothub_status(struct u132 *u132, __le32 *desc)
{
u32 rh_status = -1;
int ret_status = u132_read_pcimem(u132, roothub.status, &rh_status);
*desc = cpu_to_le32(rh_status);
return ret_status;
}
static int u132_roothub_portstatus(struct u132 *u132, __le32 *desc, u16 wIndex)
{
if (wIndex == 0 || wIndex > u132->num_ports) {
return -EINVAL;
} else {
int port = wIndex - 1;
u32 rh_portstatus = -1;
int ret_portstatus = u132_read_pcimem(u132,
roothub.portstatus[port], &rh_portstatus);
*desc = cpu_to_le32(rh_portstatus);
if (*(u16 *) (desc + 2)) {
dev_info(&u132->platform_dev->dev, "Port %d Status Chan"
"ge = %08X\n", port, *desc);
}
return ret_portstatus;
}
}
/* this timer value might be vendor-specific ... */
#define PORT_RESET_HW_MSEC 10
#define PORT_RESET_MSEC 10
/* wrap-aware logic morphed from <linux/jiffies.h> */
#define tick_before(t1, t2) ((s16)(((s16)(t1))-((s16)(t2))) < 0)
static int u132_roothub_portreset(struct u132 *u132, int port_index)
{
int retval;
u32 fmnumber;
u16 now;
u16 reset_done;
retval = u132_read_pcimem(u132, fmnumber, &fmnumber);
if (retval)
return retval;
now = fmnumber;
reset_done = now + PORT_RESET_MSEC;
do {
u32 portstat;
do {
retval = u132_read_pcimem(u132,
roothub.portstatus[port_index], &portstat);
if (retval)
return retval;
if (RH_PS_PRS & portstat)
continue;
else
break;
} while (tick_before(now, reset_done));
if (RH_PS_PRS & portstat)
return -ENODEV;
if (RH_PS_CCS & portstat) {
if (RH_PS_PRSC & portstat) {
retval = u132_write_pcimem(u132,
roothub.portstatus[port_index],
RH_PS_PRSC);
if (retval)
return retval;
}
} else
break; /* start the next reset,
sleep till it's probably done */
retval = u132_write_pcimem(u132, roothub.portstatus[port_index],
RH_PS_PRS);
if (retval)
return retval;
msleep(PORT_RESET_HW_MSEC);
retval = u132_read_pcimem(u132, fmnumber, &fmnumber);
if (retval)
return retval;
now = fmnumber;
} while (tick_before(now, reset_done));
return 0;
}
static int u132_roothub_setportfeature(struct u132 *u132, u16 wValue,
u16 wIndex)
{
if (wIndex == 0 || wIndex > u132->num_ports) {
return -EINVAL;
} else {
int retval;
int port_index = wIndex - 1;
struct u132_port *port = &u132->port[port_index];
port->Status &= ~(1 << wValue);
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
retval = u132_write_pcimem(u132,
roothub.portstatus[port_index], RH_PS_PSS);
if (retval)
return retval;
return 0;
case USB_PORT_FEAT_POWER:
retval = u132_write_pcimem(u132,
roothub.portstatus[port_index], RH_PS_PPS);
if (retval)
return retval;
return 0;
case USB_PORT_FEAT_RESET:
retval = u132_roothub_portreset(u132, port_index);
if (retval)
return retval;
return 0;
default:
return -EPIPE;
}
}
}
static int u132_roothub_clearportfeature(struct u132 *u132, u16 wValue,
u16 wIndex)
{
if (wIndex == 0 || wIndex > u132->num_ports) {
return -EINVAL;
} else {
int port_index = wIndex - 1;
u32 temp;
int retval;
struct u132_port *port = &u132->port[port_index];
port->Status &= ~(1 << wValue);
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
temp = RH_PS_CCS;
break;
case USB_PORT_FEAT_C_ENABLE:
temp = RH_PS_PESC;
break;
case USB_PORT_FEAT_SUSPEND:
temp = RH_PS_POCI;
if ((u132->hc_control & OHCI_CTRL_HCFS)
!= OHCI_USB_OPER) {
dev_err(&u132->platform_dev->dev, "TODO resume_"
"root_hub\n");
}
break;
case USB_PORT_FEAT_C_SUSPEND:
temp = RH_PS_PSSC;
break;
case USB_PORT_FEAT_POWER:
temp = RH_PS_LSDA;
break;
case USB_PORT_FEAT_C_CONNECTION:
temp = RH_PS_CSC;
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
temp = RH_PS_OCIC;
break;
case USB_PORT_FEAT_C_RESET:
temp = RH_PS_PRSC;
break;
default:
return -EPIPE;
}
retval = u132_write_pcimem(u132, roothub.portstatus[port_index],
temp);
if (retval)
return retval;
return 0;
}
}
/* the virtual root hub timer IRQ checks for hub status*/
static int u132_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device hcd=%p has been remov"
"ed %d\n", hcd, u132->going);
return -ENODEV;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device hcd=%p is being remov"
"ed\n", hcd);
return -ESHUTDOWN;
} else {
int i, changed = 0, length = 1;
if (u132->flags & OHCI_QUIRK_AMD756) {
if ((u132->hc_roothub_a & RH_A_NDP) > MAX_ROOT_PORTS) {
dev_err(&u132->platform_dev->dev, "bogus NDP, r"
"ereads as NDP=%d\n",
u132->hc_roothub_a & RH_A_NDP);
goto done;
}
}
if (u132->hc_roothub_status & (RH_HS_LPSC | RH_HS_OCIC))
buf[0] = changed = 1;
else
buf[0] = 0;
if (u132->num_ports > 7) {
buf[1] = 0;
length++;
}
for (i = 0; i < u132->num_ports; i++) {
if (u132->hc_roothub_portstatus[i] & (RH_PS_CSC |
RH_PS_PESC | RH_PS_PSSC | RH_PS_OCIC |
RH_PS_PRSC)) {
changed = 1;
if (i < 7)
buf[0] |= 1 << (i + 1);
else
buf[1] |= 1 << (i - 7);
continue;
}
if (!(u132->hc_roothub_portstatus[i] & RH_PS_CCS))
continue;
if ((u132->hc_roothub_portstatus[i] & RH_PS_PSS))
continue;
}
done:
return changed ? length : 0;
}
}
static int u132_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
return -ENODEV;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed\n");
return -ESHUTDOWN;
} else {
int retval = 0;
mutex_lock(&u132->sw_lock);
switch (typeReq) {
case ClearHubFeature:
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
break;
default:
goto stall;
}
break;
case SetHubFeature:
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
break;
default:
goto stall;
}
break;
case ClearPortFeature:{
retval = u132_roothub_clearportfeature(u132,
wValue, wIndex);
if (retval)
goto error;
break;
}
case GetHubDescriptor:{
retval = u132_roothub_descriptor(u132,
(struct usb_hub_descriptor *)buf);
if (retval)
goto error;
break;
}
case GetHubStatus:{
retval = u132_roothub_status(u132,
(__le32 *) buf);
if (retval)
goto error;
break;
}
case GetPortStatus:{
retval = u132_roothub_portstatus(u132,
(__le32 *) buf, wIndex);
if (retval)
goto error;
break;
}
case SetPortFeature:{
retval = u132_roothub_setportfeature(u132,
wValue, wIndex);
if (retval)
goto error;
break;
}
default:
goto stall;
error:
u132_disable(u132);
u132->going = 1;
break;
stall:
retval = -EPIPE;
break;
}
mutex_unlock(&u132->sw_lock);
return retval;
}
}
static int u132_start_port_reset(struct usb_hcd *hcd, unsigned port_num)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
return -ENODEV;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed\n");
return -ESHUTDOWN;
} else
return 0;
}
#ifdef CONFIG_PM
static int u132_bus_suspend(struct usb_hcd *hcd)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
return -ENODEV;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed\n");
return -ESHUTDOWN;
} else
return 0;
}
static int u132_bus_resume(struct usb_hcd *hcd)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
return -ENODEV;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed\n");
return -ESHUTDOWN;
} else
return 0;
}
#else
#define u132_bus_suspend NULL
#define u132_bus_resume NULL
#endif
static struct hc_driver u132_hc_driver = {
.description = hcd_name,
.hcd_priv_size = sizeof(struct u132),
.irq = NULL,
.flags = HCD_USB11 | HCD_MEMORY,
.reset = u132_hcd_reset,
.start = u132_hcd_start,
.stop = u132_hcd_stop,
.urb_enqueue = u132_urb_enqueue,
.urb_dequeue = u132_urb_dequeue,
.endpoint_disable = u132_endpoint_disable,
.get_frame_number = u132_get_frame,
.hub_status_data = u132_hub_status_data,
.hub_control = u132_hub_control,
.bus_suspend = u132_bus_suspend,
.bus_resume = u132_bus_resume,
.start_port_reset = u132_start_port_reset,
};
/*
* This function may be called by the USB core whilst the "usb_all_devices_rwsem"
* is held for writing, thus this module must not call usb_remove_hcd()
* synchronously - but instead should immediately stop activity to the
* device and asynchronously call usb_remove_hcd()
*/
static int __devexit u132_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
if (hcd) {
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going++ > 1) {
dev_err(&u132->platform_dev->dev, "already being remove"
"d\n");
return -ENODEV;
} else {
int rings = MAX_U132_RINGS;
int endps = MAX_U132_ENDPS;
dev_err(&u132->platform_dev->dev, "removing device u132"
".%d\n", u132->sequence_num);
msleep(100);
mutex_lock(&u132->sw_lock);
u132_monitor_cancel_work(u132);
while (rings-- > 0) {
struct u132_ring *ring = &u132->ring[rings];
u132_ring_cancel_work(u132, ring);
} while (endps-- > 0) {
struct u132_endp *endp = u132->endp[endps];
if (endp)
u132_endp_cancel_work(u132, endp);
}
u132->going += 1;
printk(KERN_INFO "removing device u132.%d\n",
u132->sequence_num);
mutex_unlock(&u132->sw_lock);
usb_remove_hcd(hcd);
u132_u132_put_kref(u132);
return 0;
}
} else
return 0;
}
static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
{
int rings = MAX_U132_RINGS;
int ports = MAX_U132_PORTS;
int addrs = MAX_U132_ADDRS;
int udevs = MAX_U132_UDEVS;
int endps = MAX_U132_ENDPS;
u132->board = pdev->dev.platform_data;
u132->platform_dev = pdev;
u132->power = 0;
u132->reset = 0;
mutex_init(&u132->sw_lock);
mutex_init(&u132->scheduler_lock);
while (rings-- > 0) {
struct u132_ring *ring = &u132->ring[rings];
ring->u132 = u132;
ring->number = rings + 1;
ring->length = 0;
ring->curr_endp = NULL;
INIT_DELAYED_WORK(&ring->scheduler,
u132_hcd_ring_work_scheduler);
}
mutex_lock(&u132->sw_lock);
INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
while (ports-- > 0) {
struct u132_port *port = &u132->port[ports];
port->u132 = u132;
port->reset = 0;
port->enable = 0;
port->power = 0;
port->Status = 0;
}
while (addrs-- > 0) {
struct u132_addr *addr = &u132->addr[addrs];
addr->address = 0;
}
while (udevs-- > 0) {
struct u132_udev *udev = &u132->udev[udevs];
int i = ARRAY_SIZE(udev->endp_number_in);
int o = ARRAY_SIZE(udev->endp_number_out);
udev->usb_device = NULL;
udev->udev_number = 0;
udev->usb_addr = 0;
udev->portnumber = 0;
while (i-- > 0)
udev->endp_number_in[i] = 0;
while (o-- > 0)
udev->endp_number_out[o] = 0;
}
while (endps-- > 0)
u132->endp[endps] = NULL;
mutex_unlock(&u132->sw_lock);
}
static int __devinit u132_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
int retval;
u32 control;
u32 rh_a = -1;
u32 num_ports;
msleep(100);
if (u132_exiting > 0)
return -ENODEV;
retval = ftdi_write_pcimem(pdev, intrdisable, OHCI_INTR_MIE);
if (retval)
return retval;
retval = ftdi_read_pcimem(pdev, control, &control);
if (retval)
return retval;
retval = ftdi_read_pcimem(pdev, roothub.a, &rh_a);
if (retval)
return retval;
num_ports = rh_a & RH_A_NDP; /* refuse to confuse usbcore */
if (pdev->dev.dma_mask)
return -EINVAL;
hcd = usb_create_hcd(&u132_hc_driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
printk(KERN_ERR "failed to create the usb hcd struct for U132\n"
);
ftdi_elan_gone_away(pdev);
return -ENOMEM;
} else {
struct u132 *u132 = hcd_to_u132(hcd);
retval = 0;
hcd->rsrc_start = 0;
mutex_lock(&u132_module_lock);
list_add_tail(&u132->u132_list, &u132_static_list);
u132->sequence_num = ++u132_instances;
mutex_unlock(&u132_module_lock);
u132_u132_init_kref(u132);
u132_initialise(u132, pdev);
hcd->product_desc = "ELAN U132 Host Controller";
retval = usb_add_hcd(hcd, 0, 0);
if (retval != 0) {
dev_err(&u132->platform_dev->dev, "init error %d\n",
retval);
u132_u132_put_kref(u132);
return retval;
} else {
u132_monitor_queue_work(u132, 100);
return 0;
}
}
}
#ifdef CONFIG_PM
/* for this device there's no useful distinction between the controller
* and its root hub, except that the root hub only gets direct PM calls
* when CONFIG_USB_SUSPEND is enabled.
*/
static int u132_suspend(struct platform_device *pdev, pm_message_t state)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
return -ENODEV;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed\n");
return -ESHUTDOWN;
} else {
int retval = 0, ports;
switch (state.event) {
case PM_EVENT_FREEZE:
retval = u132_bus_suspend(hcd);
break;
case PM_EVENT_SUSPEND:
case PM_EVENT_HIBERNATE:
ports = MAX_U132_PORTS;
while (ports-- > 0) {
port_power(u132, ports, 0);
}
break;
}
return retval;
}
}
static int u132_resume(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 1) {
dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
, u132->going);
return -ENODEV;
} else if (u132->going > 0) {
dev_err(&u132->platform_dev->dev, "device is being removed\n");
return -ESHUTDOWN;
} else {
int retval = 0;
if (!u132->port[0].power) {
int ports = MAX_U132_PORTS;
while (ports-- > 0) {
port_power(u132, ports, 1);
}
retval = 0;
} else {
retval = u132_bus_resume(hcd);
}
return retval;
}
}
#else
#define u132_suspend NULL
#define u132_resume NULL
#endif
/*
* this driver is loaded explicitly by ftdi_u132
*
* the platform_driver struct is static because it is per type of module
*/
static struct platform_driver u132_platform_driver = {
.probe = u132_probe,
.remove = __devexit_p(u132_remove),
.suspend = u132_suspend,
.resume = u132_resume,
.driver = {
.name = (char *)hcd_name,
.owner = THIS_MODULE,
},
};
static int __init u132_hcd_init(void)
{
int retval;
INIT_LIST_HEAD(&u132_static_list);
u132_instances = 0;
u132_exiting = 0;
mutex_init(&u132_module_lock);
if (usb_disabled())
return -ENODEV;
printk(KERN_INFO "driver %s\n", hcd_name);
workqueue = create_singlethread_workqueue("u132");
retval = platform_driver_register(&u132_platform_driver);
return retval;
}
module_init(u132_hcd_init);
static void __exit u132_hcd_exit(void)
{
struct u132 *u132;
struct u132 *temp;
mutex_lock(&u132_module_lock);
u132_exiting += 1;
mutex_unlock(&u132_module_lock);
list_for_each_entry_safe(u132, temp, &u132_static_list, u132_list) {
platform_device_unregister(u132->platform_dev);
}
platform_driver_unregister(&u132_platform_driver);
printk(KERN_INFO "u132-hcd driver deregistered\n");
wait_event(u132_hcd_wait, u132_instances == 0);
flush_workqueue(workqueue);
destroy_workqueue(workqueue);
}
module_exit(u132_hcd_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:u132_hcd");
| gpl-2.0 |
SlimLP-Y300/chil360-kernel | drivers/net/wan/hd64572.c | 4979 | 18001 | /*
* Hitachi (now Renesas) SCA-II HD64572 driver for Linux
*
* Copyright (C) 1998-2008 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* Source of information: HD64572 SCA-II User's Manual
*
* We use the following SCA memory map:
*
* Packet buffer descriptor rings - starting from card->rambase:
* rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring
* tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring
* rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used)
* tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used)
*
* Packet data buffers - starting from card->rambase + buff_offset:
* rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers
* tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers
* rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers (if used)
* tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used)
*/
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/hdlc.h>
#include <linux/in.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include "hd64572.h"
#define NAPI_WEIGHT 16
#define get_msci(port) (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET)
#define get_dmac_rx(port) (port->chan ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
#define get_dmac_tx(port) (port->chan ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
#define sca_in(reg, card) readb(card->scabase + (reg))
#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
#define sca_inw(reg, card) readw(card->scabase + (reg))
#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
#define sca_inl(reg, card) readl(card->scabase + (reg))
#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
static int sca_poll(struct napi_struct *napi, int budget);
static inline port_t* dev_to_port(struct net_device *dev)
{
return dev_to_hdlc(dev)->priv;
}
static inline void enable_intr(port_t *port)
{
/* enable DMIB and MSCI RXINTA interrupts */
sca_outl(sca_inl(IER0, port->card) |
(port->chan ? 0x08002200 : 0x00080022), IER0, port->card);
}
static inline void disable_intr(port_t *port)
{
sca_outl(sca_inl(IER0, port->card) &
(port->chan ? 0x00FF00FF : 0xFF00FF00), IER0, port->card);
}
static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
{
u16 rx_buffs = port->card->rx_ring_buffers;
u16 tx_buffs = port->card->tx_ring_buffers;
desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc.
return port->chan * (rx_buffs + tx_buffs) + transmit * rx_buffs + desc;
}
static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
{
/* Descriptor offset always fits in 16 bits */
return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
}
static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
int transmit)
{
return (pkt_desc __iomem *)(port->card->rambase +
desc_offset(port, desc, transmit));
}
static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
{
return port->card->buff_offset +
desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
}
static inline void sca_set_carrier(port_t *port)
{
if (!(sca_in(get_msci(port) + ST3, port->card) & ST3_DCD)) {
#ifdef DEBUG_LINK
printk(KERN_DEBUG "%s: sca_set_carrier on\n",
port->netdev.name);
#endif
netif_carrier_on(port->netdev);
} else {
#ifdef DEBUG_LINK
printk(KERN_DEBUG "%s: sca_set_carrier off\n",
port->netdev.name);
#endif
netif_carrier_off(port->netdev);
}
}
static void sca_init_port(port_t *port)
{
card_t *card = port->card;
u16 dmac_rx = get_dmac_rx(port), dmac_tx = get_dmac_tx(port);
int transmit, i;
port->rxin = 0;
port->txin = 0;
port->txlast = 0;
for (transmit = 0; transmit < 2; transmit++) {
u16 buffs = transmit ? card->tx_ring_buffers
: card->rx_ring_buffers;
for (i = 0; i < buffs; i++) {
pkt_desc __iomem *desc = desc_address(port, i, transmit);
u16 chain_off = desc_offset(port, i + 1, transmit);
u32 buff_off = buffer_offset(port, i, transmit);
writel(chain_off, &desc->cp);
writel(buff_off, &desc->bp);
writew(0, &desc->len);
writeb(0, &desc->stat);
}
}
/* DMA disable - to halt state */
sca_out(0, DSR_RX(port->chan), card);
sca_out(0, DSR_TX(port->chan), card);
/* software ABORT - to initial state */
sca_out(DCR_ABORT, DCR_RX(port->chan), card);
sca_out(DCR_ABORT, DCR_TX(port->chan), card);
/* current desc addr */
sca_outl(desc_offset(port, 0, 0), dmac_rx + CDAL, card);
sca_outl(desc_offset(port, card->tx_ring_buffers - 1, 0),
dmac_rx + EDAL, card);
sca_outl(desc_offset(port, 0, 1), dmac_tx + CDAL, card);
sca_outl(desc_offset(port, 0, 1), dmac_tx + EDAL, card);
/* clear frame end interrupt counter */
sca_out(DCR_CLEAR_EOF, DCR_RX(port->chan), card);
sca_out(DCR_CLEAR_EOF, DCR_TX(port->chan), card);
/* Receive */
sca_outw(HDLC_MAX_MRU, dmac_rx + BFLL, card); /* set buffer length */
sca_out(0x14, DMR_RX(port->chan), card); /* Chain mode, Multi-frame */
sca_out(DIR_EOME, DIR_RX(port->chan), card); /* enable interrupts */
sca_out(DSR_DE, DSR_RX(port->chan), card); /* DMA enable */
/* Transmit */
sca_out(0x14, DMR_TX(port->chan), card); /* Chain mode, Multi-frame */
sca_out(DIR_EOME, DIR_TX(port->chan), card); /* enable interrupts */
sca_set_carrier(port);
netif_napi_add(port->netdev, &port->napi, sca_poll, NAPI_WEIGHT);
}
/* MSCI interrupt service */
static inline void sca_msci_intr(port_t *port)
{
u16 msci = get_msci(port);
card_t* card = port->card;
if (sca_in(msci + ST1, card) & ST1_CDCD) {
/* Reset MSCI CDCD status bit */
sca_out(ST1_CDCD, msci + ST1, card);
sca_set_carrier(port);
}
}
static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
u16 rxin)
{
struct net_device *dev = port->netdev;
struct sk_buff *skb;
u16 len;
u32 buff;
len = readw(&desc->len);
skb = dev_alloc_skb(len);
if (!skb) {
dev->stats.rx_dropped++;
return;
}
buff = buffer_offset(port, rxin, 0);
memcpy_fromio(skb->data, card->rambase + buff, len);
skb_put(skb, len);
#ifdef DEBUG_PKT
printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
debug_frame(skb);
#endif
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
skb->protocol = hdlc_type_trans(skb, dev);
netif_receive_skb(skb);
}
/* Receive DMA service */
static inline int sca_rx_done(port_t *port, int budget)
{
struct net_device *dev = port->netdev;
u16 dmac = get_dmac_rx(port);
card_t *card = port->card;
u8 stat = sca_in(DSR_RX(port->chan), card); /* read DMA Status */
int received = 0;
/* Reset DSR status bits */
sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
DSR_RX(port->chan), card);
if (stat & DSR_BOF)
/* Dropped one or more frames */
dev->stats.rx_over_errors++;
while (received < budget) {
u32 desc_off = desc_offset(port, port->rxin, 0);
pkt_desc __iomem *desc;
u32 cda = sca_inl(dmac + CDAL, card);
if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
break; /* No frame received */
desc = desc_address(port, port->rxin, 0);
stat = readb(&desc->stat);
if (!(stat & ST_RX_EOM))
port->rxpart = 1; /* partial frame received */
else if ((stat & ST_ERROR_MASK) || port->rxpart) {
dev->stats.rx_errors++;
if (stat & ST_RX_OVERRUN)
dev->stats.rx_fifo_errors++;
else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
ST_RX_RESBIT)) || port->rxpart)
dev->stats.rx_frame_errors++;
else if (stat & ST_RX_CRC)
dev->stats.rx_crc_errors++;
if (stat & ST_RX_EOM)
port->rxpart = 0; /* received last fragment */
} else {
sca_rx(card, port, desc, port->rxin);
received++;
}
/* Set new error descriptor address */
sca_outl(desc_off, dmac + EDAL, card);
port->rxin = (port->rxin + 1) % card->rx_ring_buffers;
}
/* make sure RX DMA is enabled */
sca_out(DSR_DE, DSR_RX(port->chan), card);
return received;
}
/* Transmit DMA service */
static inline void sca_tx_done(port_t *port)
{
struct net_device *dev = port->netdev;
card_t* card = port->card;
u8 stat;
unsigned count = 0;
spin_lock(&port->lock);
stat = sca_in(DSR_TX(port->chan), card); /* read DMA Status */
/* Reset DSR status bits */
sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
DSR_TX(port->chan), card);
while (1) {
pkt_desc __iomem *desc = desc_address(port, port->txlast, 1);
u8 stat = readb(&desc->stat);
if (!(stat & ST_TX_OWNRSHP))
break; /* not yet transmitted */
if (stat & ST_TX_UNDRRUN) {
dev->stats.tx_errors++;
dev->stats.tx_fifo_errors++;
} else {
dev->stats.tx_packets++;
dev->stats.tx_bytes += readw(&desc->len);
}
writeb(0, &desc->stat); /* Free descriptor */
count++;
port->txlast = (port->txlast + 1) % card->tx_ring_buffers;
}
if (count)
netif_wake_queue(dev);
spin_unlock(&port->lock);
}
static int sca_poll(struct napi_struct *napi, int budget)
{
port_t *port = container_of(napi, port_t, napi);
u32 isr0 = sca_inl(ISR0, port->card);
int received = 0;
if (isr0 & (port->chan ? 0x08000000 : 0x00080000))
sca_msci_intr(port);
if (isr0 & (port->chan ? 0x00002000 : 0x00000020))
sca_tx_done(port);
if (isr0 & (port->chan ? 0x00000200 : 0x00000002))
received = sca_rx_done(port, budget);
if (received < budget) {
napi_complete(napi);
enable_intr(port);
}
return received;
}
static irqreturn_t sca_intr(int irq, void *dev_id)
{
card_t *card = dev_id;
u32 isr0 = sca_inl(ISR0, card);
int i, handled = 0;
for (i = 0; i < 2; i++) {
port_t *port = get_port(card, i);
if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) {
handled = 1;
disable_intr(port);
napi_schedule(&port->napi);
}
}
return IRQ_RETVAL(handled);
}
static void sca_set_port(port_t *port)
{
card_t* card = port->card;
u16 msci = get_msci(port);
u8 md2 = sca_in(msci + MD2, card);
unsigned int tmc, br = 10, brv = 1024;
if (port->settings.clock_rate > 0) {
/* Try lower br for better accuracy*/
do {
br--;
brv >>= 1; /* brv = 2^9 = 512 max in specs */
/* Baud Rate = CLOCK_BASE / TMC / 2^BR */
tmc = CLOCK_BASE / brv / port->settings.clock_rate;
}while (br > 1 && tmc <= 128);
if (tmc < 1) {
tmc = 1;
br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
brv = 1;
} else if (tmc > 255)
tmc = 256; /* tmc=0 means 256 - low baud rates */
port->settings.clock_rate = CLOCK_BASE / brv / tmc;
} else {
br = 9; /* Minimum clock rate */
tmc = 256; /* 8bit = 0 */
port->settings.clock_rate = CLOCK_BASE / (256 * 512);
}
port->rxs = (port->rxs & ~CLK_BRG_MASK) | br;
port->txs = (port->txs & ~CLK_BRG_MASK) | br;
port->tmc = tmc;
/* baud divisor - time constant*/
sca_out(port->tmc, msci + TMCR, card);
sca_out(port->tmc, msci + TMCT, card);
/* Set BRG bits */
sca_out(port->rxs, msci + RXS, card);
sca_out(port->txs, msci + TXS, card);
if (port->settings.loopback)
md2 |= MD2_LOOPBACK;
else
md2 &= ~MD2_LOOPBACK;
sca_out(md2, msci + MD2, card);
}
static void sca_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
card_t* card = port->card;
u16 msci = get_msci(port);
u8 md0, md2;
switch(port->encoding) {
case ENCODING_NRZ: md2 = MD2_NRZ; break;
case ENCODING_NRZI: md2 = MD2_NRZI; break;
case ENCODING_FM_MARK: md2 = MD2_FM_MARK; break;
case ENCODING_FM_SPACE: md2 = MD2_FM_SPACE; break;
default: md2 = MD2_MANCHESTER;
}
if (port->settings.loopback)
md2 |= MD2_LOOPBACK;
switch(port->parity) {
case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break;
case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break;
case PARITY_CRC32_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU32; break;
case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break;
default: md0 = MD0_HDLC | MD0_CRC_NONE;
}
sca_out(CMD_RESET, msci + CMD, card);
sca_out(md0, msci + MD0, card);
sca_out(0x00, msci + MD1, card); /* no address field check */
sca_out(md2, msci + MD2, card);
sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
/* Skip the rest of underrun frame */
sca_out(CTL_IDLE | CTL_URCT | CTL_URSKP, msci + CTL, card);
sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */
sca_out(0x3C, msci + TFS, card); /* +1 = TX start */
sca_out(0x38, msci + TCR, card); /* =Critical TX DMA activ condition */
sca_out(0x38, msci + TNR0, card); /* =TX DMA activation condition */
sca_out(0x3F, msci + TNR1, card); /* +1=TX DMA deactivation condition*/
/* We're using the following interrupts:
- RXINTA (DCD changes only)
- DMIB (EOM - single frame transfer complete)
*/
sca_outl(IE0_RXINTA | IE0_CDCD, msci + IE0, card);
sca_out(port->tmc, msci + TMCR, card);
sca_out(port->tmc, msci + TMCT, card);
sca_out(port->rxs, msci + RXS, card);
sca_out(port->txs, msci + TXS, card);
sca_out(CMD_TX_ENABLE, msci + CMD, card);
sca_out(CMD_RX_ENABLE, msci + CMD, card);
sca_set_carrier(port);
enable_intr(port);
napi_enable(&port->napi);
netif_start_queue(dev);
}
static void sca_close(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
/* reset channel */
sca_out(CMD_RESET, get_msci(port) + CMD, port->card);
disable_intr(port);
napi_disable(&port->napi);
netif_stop_queue(dev);
}
static int sca_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
if (encoding != ENCODING_NRZ &&
encoding != ENCODING_NRZI &&
encoding != ENCODING_FM_MARK &&
encoding != ENCODING_FM_SPACE &&
encoding != ENCODING_MANCHESTER)
return -EINVAL;
if (parity != PARITY_NONE &&
parity != PARITY_CRC16_PR0 &&
parity != PARITY_CRC16_PR1 &&
parity != PARITY_CRC32_PR1_CCITT &&
parity != PARITY_CRC16_PR1_CCITT)
return -EINVAL;
dev_to_port(dev)->encoding = encoding;
dev_to_port(dev)->parity = parity;
return 0;
}
#ifdef DEBUG_RINGS
static void sca_dump_rings(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
card_t *card = port->card;
u16 cnt;
printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
sca_inl(get_dmac_rx(port) + CDAL, card),
sca_inl(get_dmac_rx(port) + EDAL, card),
sca_in(DSR_RX(port->chan), card), port->rxin,
sca_in(DSR_RX(port->chan), card) & DSR_DE ? "" : "in");
for (cnt = 0; cnt < port->card->rx_ring_buffers; cnt++)
pr_cont(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
pr_cont("\n");
printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
"last=%u %sactive",
sca_inl(get_dmac_tx(port) + CDAL, card),
sca_inl(get_dmac_tx(port) + EDAL, card),
sca_in(DSR_TX(port->chan), card), port->txin, port->txlast,
sca_in(DSR_TX(port->chan), card) & DSR_DE ? "" : "in");
for (cnt = 0; cnt < port->card->tx_ring_buffers; cnt++)
pr_cont(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
pr_cont("\n");
printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x,"
" ST: %02x %02x %02x %02x %02x, FST: %02x CST: %02x %02x\n",
sca_in(get_msci(port) + MD0, card),
sca_in(get_msci(port) + MD1, card),
sca_in(get_msci(port) + MD2, card),
sca_in(get_msci(port) + ST0, card),
sca_in(get_msci(port) + ST1, card),
sca_in(get_msci(port) + ST2, card),
sca_in(get_msci(port) + ST3, card),
sca_in(get_msci(port) + ST4, card),
sca_in(get_msci(port) + FST, card),
sca_in(get_msci(port) + CST0, card),
sca_in(get_msci(port) + CST1, card));
printk(KERN_DEBUG "ILAR: %02x ISR: %08x %08x\n", sca_in(ILAR, card),
sca_inl(ISR0, card), sca_inl(ISR1, card));
}
#endif /* DEBUG_RINGS */
static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
{
port_t *port = dev_to_port(dev);
card_t *card = port->card;
pkt_desc __iomem *desc;
u32 buff, len;
spin_lock_irq(&port->lock);
desc = desc_address(port, port->txin + 1, 1);
BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */
#ifdef DEBUG_PKT
printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
debug_frame(skb);
#endif
desc = desc_address(port, port->txin, 1);
buff = buffer_offset(port, port->txin, 1);
len = skb->len;
memcpy_toio(card->rambase + buff, skb->data, len);
writew(len, &desc->len);
writeb(ST_TX_EOM, &desc->stat);
port->txin = (port->txin + 1) % card->tx_ring_buffers;
sca_outl(desc_offset(port, port->txin, 1),
get_dmac_tx(port) + EDAL, card);
sca_out(DSR_DE, DSR_TX(port->chan), card); /* Enable TX DMA */
desc = desc_address(port, port->txin + 1, 1);
if (readb(&desc->stat)) /* allow 1 packet gap */
netif_stop_queue(dev);
spin_unlock_irq(&port->lock);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase,
u32 ramsize)
{
/* Round RAM size to 32 bits, fill from end to start */
u32 i = ramsize &= ~3;
do {
i -= 4;
writel(i ^ 0x12345678, rambase + i);
} while (i > 0);
for (i = 0; i < ramsize ; i += 4) {
if (readl(rambase + i) != (i ^ 0x12345678))
break;
}
return i;
}
static void __devinit sca_init(card_t *card, int wait_states)
{
sca_out(wait_states, WCRL, card); /* Wait Control */
sca_out(wait_states, WCRM, card);
sca_out(wait_states, WCRH, card);
sca_out(0, DMER, card); /* DMA Master disable */
sca_out(0x03, PCR, card); /* DMA priority */
sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */
sca_out(0, DSR_TX(0), card);
sca_out(0, DSR_RX(1), card);
sca_out(0, DSR_TX(1), card);
sca_out(DMER_DME, DMER, card); /* DMA Master enable */
}
| gpl-2.0 |
jfdsmabalot/kernel_samsung_msm8974pro | drivers/net/ethernet/xilinx/xilinx_emaclite.c | 4979 | 38145 | /*
* Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
*
* This is a new flat driver which is based on the original emac_lite
* driver from John Williams <john.williams@petalogix.com>.
*
* 2007-2009 (c) Xilinx, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/interrupt.h>
#define DRIVER_NAME "xilinx_emaclite"
/* Register offsets for the EmacLite Core */
#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */
#define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */
#define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */
#define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */
#define XEL_MDIOCTRL_OFFSET 0x07F0 /* MDIO Control Register */
#define XEL_GIER_OFFSET 0x07F8 /* GIE Register */
#define XEL_TSR_OFFSET 0x07FC /* Tx status */
#define XEL_TPLR_OFFSET 0x07F4 /* Tx packet length */
#define XEL_RXBUFF_OFFSET 0x1000 /* Receive Buffer */
#define XEL_RPLR_OFFSET 0x100C /* Rx packet length */
#define XEL_RSR_OFFSET 0x17FC /* Rx status */
#define XEL_BUFFER_OFFSET 0x0800 /* Next Tx/Rx buffer's offset */
/* MDIO Address Register Bit Masks */
#define XEL_MDIOADDR_REGADR_MASK 0x0000001F /* Register Address */
#define XEL_MDIOADDR_PHYADR_MASK 0x000003E0 /* PHY Address */
#define XEL_MDIOADDR_PHYADR_SHIFT 5
#define XEL_MDIOADDR_OP_MASK 0x00000400 /* RD/WR Operation */
/* MDIO Write Data Register Bit Masks */
#define XEL_MDIOWR_WRDATA_MASK 0x0000FFFF /* Data to be Written */
/* MDIO Read Data Register Bit Masks */
#define XEL_MDIORD_RDDATA_MASK 0x0000FFFF /* Data to be Read */
/* MDIO Control Register Bit Masks */
#define XEL_MDIOCTRL_MDIOSTS_MASK 0x00000001 /* MDIO Status Mask */
#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */
/* Global Interrupt Enable Register (GIER) Bit Masks */
#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */
/* Transmit Status Register (TSR) Bit Masks */
#define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */
#define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */
#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */
#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit
* only. This is not documented
* in the HW spec */
/* Define for programming the MAC address into the EmacLite */
#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
/* Receive Status Register (RSR) */
#define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */
#define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */
/* Transmit Packet Length Register (TPLR) */
#define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */
/* Receive Packet Length Register (RPLR) */
#define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */
#define XEL_HEADER_OFFSET 12 /* Offset to length field */
#define XEL_HEADER_SHIFT 16 /* Shift value for length */
/* General Ethernet Definitions */
#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
#define TX_TIMEOUT (60*HZ) /* Tx timeout is 60 seconds. */
#define ALIGNMENT 4
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
/**
* struct net_local - Our private per device data
* @ndev: instance of the network device
* @tx_ping_pong: indicates whether Tx Pong buffer is configured in HW
* @rx_ping_pong: indicates whether Rx Pong buffer is configured in HW
* @next_tx_buf_to_use: next Tx buffer to write to
* @next_rx_buf_to_use: next Rx buffer to read from
* @base_addr: base address of the Emaclite device
* @reset_lock: lock used for synchronization
* @deferred_skb: holds an skb (for transmission at a later time) when the
* Tx buffer is not free
* @phy_dev: pointer to the PHY device
* @phy_node: pointer to the PHY device node
* @mii_bus: pointer to the MII bus
* @mdio_irqs: IRQs table for MDIO bus
* @last_link: last link status
* @has_mdio: indicates whether MDIO is included in the HW
*/
struct net_local {
struct net_device *ndev;
bool tx_ping_pong;
bool rx_ping_pong;
u32 next_tx_buf_to_use;
u32 next_rx_buf_to_use;
void __iomem *base_addr;
spinlock_t reset_lock;
struct sk_buff *deferred_skb;
struct phy_device *phy_dev;
struct device_node *phy_node;
struct mii_bus *mii_bus;
int mdio_irqs[PHY_MAX_ADDR];
int last_link;
bool has_mdio;
};
/*************************/
/* EmacLite driver calls */
/*************************/
/**
* xemaclite_enable_interrupts - Enable the interrupts for the EmacLite device
* @drvdata: Pointer to the Emaclite device private data
*
* This function enables the Tx and Rx interrupts for the Emaclite device along
* with the Global Interrupt Enable.
*/
static void xemaclite_enable_interrupts(struct net_local *drvdata)
{
u32 reg_data;
/* Enable the Tx interrupts for the first Buffer */
reg_data = in_be32(drvdata->base_addr + XEL_TSR_OFFSET);
out_be32(drvdata->base_addr + XEL_TSR_OFFSET,
reg_data | XEL_TSR_XMIT_IE_MASK);
/* Enable the Tx interrupts for the second Buffer if
* configured in HW */
if (drvdata->tx_ping_pong != 0) {
reg_data = in_be32(drvdata->base_addr +
XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET,
reg_data | XEL_TSR_XMIT_IE_MASK);
}
/* Enable the Rx interrupts for the first buffer */
out_be32(drvdata->base_addr + XEL_RSR_OFFSET,
XEL_RSR_RECV_IE_MASK);
/* Enable the Rx interrupts for the second Buffer if
* configured in HW */
if (drvdata->rx_ping_pong != 0) {
out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
XEL_RSR_OFFSET,
XEL_RSR_RECV_IE_MASK);
}
/* Enable the Global Interrupt Enable */
out_be32(drvdata->base_addr + XEL_GIER_OFFSET, XEL_GIER_GIE_MASK);
}
/**
* xemaclite_disable_interrupts - Disable the interrupts for the EmacLite device
* @drvdata: Pointer to the Emaclite device private data
*
* This function disables the Tx and Rx interrupts for the Emaclite device,
* along with the Global Interrupt Enable.
*/
static void xemaclite_disable_interrupts(struct net_local *drvdata)
{
u32 reg_data;
/* Disable the Global Interrupt Enable */
out_be32(drvdata->base_addr + XEL_GIER_OFFSET, XEL_GIER_GIE_MASK);
/* Disable the Tx interrupts for the first buffer */
reg_data = in_be32(drvdata->base_addr + XEL_TSR_OFFSET);
out_be32(drvdata->base_addr + XEL_TSR_OFFSET,
reg_data & (~XEL_TSR_XMIT_IE_MASK));
/* Disable the Tx interrupts for the second Buffer
* if configured in HW */
if (drvdata->tx_ping_pong != 0) {
reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET,
reg_data & (~XEL_TSR_XMIT_IE_MASK));
}
/* Disable the Rx interrupts for the first buffer */
reg_data = in_be32(drvdata->base_addr + XEL_RSR_OFFSET);
out_be32(drvdata->base_addr + XEL_RSR_OFFSET,
reg_data & (~XEL_RSR_RECV_IE_MASK));
/* Disable the Rx interrupts for the second buffer
* if configured in HW */
if (drvdata->rx_ping_pong != 0) {
reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
XEL_RSR_OFFSET);
out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
XEL_RSR_OFFSET,
reg_data & (~XEL_RSR_RECV_IE_MASK));
}
}
/**
* xemaclite_aligned_write - Write from 16-bit aligned to 32-bit aligned address
* @src_ptr: Void pointer to the 16-bit aligned source address
* @dest_ptr: Pointer to the 32-bit aligned destination address
* @length: Number bytes to write from source to destination
*
* This function writes data from a 16-bit aligned buffer to a 32-bit aligned
* address in the EmacLite device.
*/
static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
unsigned length)
{
u32 align_buffer;
u32 *to_u32_ptr;
u16 *from_u16_ptr, *to_u16_ptr;
to_u32_ptr = dest_ptr;
from_u16_ptr = src_ptr;
align_buffer = 0;
for (; length > 3; length -= 4) {
to_u16_ptr = (u16 *)&align_buffer;
*to_u16_ptr++ = *from_u16_ptr++;
*to_u16_ptr++ = *from_u16_ptr++;
/* Output a word */
*to_u32_ptr++ = align_buffer;
}
if (length) {
u8 *from_u8_ptr, *to_u8_ptr;
/* Set up to output the remaining data */
align_buffer = 0;
to_u8_ptr = (u8 *) &align_buffer;
from_u8_ptr = (u8 *) from_u16_ptr;
/* Output the remaining data */
for (; length > 0; length--)
*to_u8_ptr++ = *from_u8_ptr++;
*to_u32_ptr = align_buffer;
}
}
/**
* xemaclite_aligned_read - Read from 32-bit aligned to 16-bit aligned buffer
* @src_ptr: Pointer to the 32-bit aligned source address
* @dest_ptr: Pointer to the 16-bit aligned destination address
* @length: Number bytes to read from source to destination
*
* This function reads data from a 32-bit aligned address in the EmacLite device
* to a 16-bit aligned buffer.
*/
static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
unsigned length)
{
u16 *to_u16_ptr, *from_u16_ptr;
u32 *from_u32_ptr;
u32 align_buffer;
from_u32_ptr = src_ptr;
to_u16_ptr = (u16 *) dest_ptr;
for (; length > 3; length -= 4) {
/* Copy each word into the temporary buffer */
align_buffer = *from_u32_ptr++;
from_u16_ptr = (u16 *)&align_buffer;
/* Read data from source */
*to_u16_ptr++ = *from_u16_ptr++;
*to_u16_ptr++ = *from_u16_ptr++;
}
if (length) {
u8 *to_u8_ptr, *from_u8_ptr;
/* Set up to read the remaining data */
to_u8_ptr = (u8 *) to_u16_ptr;
align_buffer = *from_u32_ptr++;
from_u8_ptr = (u8 *) &align_buffer;
/* Read the remaining data */
for (; length > 0; length--)
*to_u8_ptr = *from_u8_ptr;
}
}
/**
* xemaclite_send_data - Send an Ethernet frame
* @drvdata: Pointer to the Emaclite device private data
* @data: Pointer to the data to be sent
* @byte_count: Total frame size, including header
*
* This function checks if the Tx buffer of the Emaclite device is free to send
* data. If so, it fills the Tx buffer with data for transmission. Otherwise, it
* returns an error.
*
* Return: 0 upon success or -1 if the buffer(s) are full.
*
* Note: The maximum Tx packet size can not be more than Ethernet header
* (14 Bytes) + Maximum MTU (1500 bytes). This is excluding FCS.
*/
static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
unsigned int byte_count)
{
u32 reg_data;
void __iomem *addr;
/* Determine the expected Tx buffer address */
addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
/* If the length is too large, truncate it */
if (byte_count > ETH_FRAME_LEN)
byte_count = ETH_FRAME_LEN;
/* Check if the expected buffer is available */
reg_data = in_be32(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
/* Switch to next buffer if configured */
if (drvdata->tx_ping_pong != 0)
drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
} else if (drvdata->tx_ping_pong != 0) {
/* If the expected buffer is full, try the other buffer,
* if it is configured in HW */
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
reg_data = in_be32(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
return -1; /* Buffers were full, return failure */
} else
return -1; /* Buffer was full, return failure */
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
out_be32(addr + XEL_TPLR_OFFSET, (byte_count & XEL_TPLR_LENGTH_MASK));
/* Update the Tx Status Register to indicate that there is a
* frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
* is used by the interrupt handler to check whether a frame
* has been transmitted */
reg_data = in_be32(addr + XEL_TSR_OFFSET);
reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
out_be32(addr + XEL_TSR_OFFSET, reg_data);
return 0;
}
/**
* xemaclite_recv_data - Receive a frame
* @drvdata: Pointer to the Emaclite device private data
* @data: Address where the data is to be received
*
* This function is intended to be called from the interrupt context or
* with a wrapper which waits for the receive frame to be available.
*
* Return: Total number of bytes received
*/
static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
{
void __iomem *addr;
u16 length, proto_type;
u32 reg_data;
/* Determine the expected buffer address */
addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
/* Verify which buffer has valid data */
reg_data = in_be32(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
if (drvdata->rx_ping_pong != 0)
drvdata->next_rx_buf_to_use ^= XEL_BUFFER_OFFSET;
} else {
/* The instance is out of sync, try other buffer if other
* buffer is configured, return 0 otherwise. If the instance is
* out of sync, do not update the 'next_rx_buf_to_use' since it
* will correct on subsequent calls */
if (drvdata->rx_ping_pong != 0)
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
else
return 0; /* No data was available */
/* Verify that buffer has valid data */
reg_data = in_be32(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
XEL_RSR_RECV_DONE_MASK)
return 0; /* No data was available */
}
/* Get the protocol type of the ethernet frame that arrived */
proto_type = ((ntohl(in_be32(addr + XEL_HEADER_OFFSET +
XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
/* Check if received ethernet frame is a raw ethernet frame
* or an IP packet or an ARP packet */
if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
if (proto_type == ETH_P_IP) {
length = ((ntohl(in_be32(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
XEL_RXBUFF_OFFSET)) >>
XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
length += ETH_HLEN + ETH_FCS_LEN;
} else if (proto_type == ETH_P_ARP)
length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
else
/* Field contains type other than IP or ARP, use max
* frame size and let user parse it */
length = ETH_FRAME_LEN + ETH_FCS_LEN;
} else
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
data, length);
/* Acknowledge the frame */
reg_data = in_be32(addr + XEL_RSR_OFFSET);
reg_data &= ~XEL_RSR_RECV_DONE_MASK;
out_be32(addr + XEL_RSR_OFFSET, reg_data);
return length;
}
/**
* xemaclite_update_address - Update the MAC address in the device
* @drvdata: Pointer to the Emaclite device private data
* @address_ptr:Pointer to the MAC address (MAC address is a 48-bit value)
*
* Tx must be idle and Rx should be idle for deterministic results.
* It is recommended that this function should be called after the
* initialization and before transmission of any packets from the device.
* The MAC address can be programmed using any of the two transmit
* buffers (if configured).
*/
static void xemaclite_update_address(struct net_local *drvdata,
u8 *address_ptr)
{
void __iomem *addr;
u32 reg_data;
/* Determine the expected Tx buffer address */
addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
out_be32(addr + XEL_TPLR_OFFSET, ETH_ALEN);
/* Update the MAC address in the EmacLite */
reg_data = in_be32(addr + XEL_TSR_OFFSET);
out_be32(addr + XEL_TSR_OFFSET, reg_data | XEL_TSR_PROG_MAC_ADDR);
/* Wait for EmacLite to finish with the MAC address update */
while ((in_be32(addr + XEL_TSR_OFFSET) &
XEL_TSR_PROG_MAC_ADDR) != 0)
;
}
/**
* xemaclite_set_mac_address - Set the MAC address for this device
* @dev: Pointer to the network device instance
* @addr: Void pointer to the sockaddr structure
*
* This function copies the HW address from the sockaddr strucutre to the
* net_device structure and updates the address in HW.
*
* Return: Error if the net device is busy or 0 if the addr is set
* successfully
*/
static int xemaclite_set_mac_address(struct net_device *dev, void *address)
{
struct net_local *lp = netdev_priv(dev);
struct sockaddr *addr = address;
if (netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
xemaclite_update_address(lp, dev->dev_addr);
return 0;
}
/**
* xemaclite_tx_timeout - Callback for Tx Timeout
* @dev: Pointer to the network device
*
* This function is called when Tx time out occurs for Emaclite device.
*/
static void xemaclite_tx_timeout(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
unsigned long flags;
dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n",
TX_TIMEOUT * 1000UL / HZ);
dev->stats.tx_errors++;
/* Reset the device */
spin_lock_irqsave(&lp->reset_lock, flags);
/* Shouldn't really be necessary, but shouldn't hurt */
netif_stop_queue(dev);
xemaclite_disable_interrupts(lp);
xemaclite_enable_interrupts(lp);
if (lp->deferred_skb) {
dev_kfree_skb(lp->deferred_skb);
lp->deferred_skb = NULL;
dev->stats.tx_errors++;
}
/* To exclude tx timeout */
dev->trans_start = jiffies; /* prevent tx timeout */
/* We're all ready to go. Start the queue */
netif_wake_queue(dev);
spin_unlock_irqrestore(&lp->reset_lock, flags);
}
/**********************/
/* Interrupt Handlers */
/**********************/
/**
* xemaclite_tx_handler - Interrupt handler for frames sent
* @dev: Pointer to the network device
*
* This function updates the number of packets transmitted and handles the
* deferred skb, if there is one.
*/
static void xemaclite_tx_handler(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
dev->stats.tx_packets++;
if (lp->deferred_skb) {
if (xemaclite_send_data(lp,
(u8 *) lp->deferred_skb->data,
lp->deferred_skb->len) != 0)
return;
else {
dev->stats.tx_bytes += lp->deferred_skb->len;
dev_kfree_skb_irq(lp->deferred_skb);
lp->deferred_skb = NULL;
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
}
}
/**
* xemaclite_rx_handler- Interrupt handler for frames received
* @dev: Pointer to the network device
*
* This function allocates memory for a socket buffer, fills it with data
* received and hands it over to the TCP/IP stack.
*/
static void xemaclite_rx_handler(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
struct sk_buff *skb;
unsigned int align;
u32 len;
len = ETH_FRAME_LEN + ETH_FCS_LEN;
skb = netdev_alloc_skb(dev, len + ALIGNMENT);
if (!skb) {
/* Couldn't get memory. */
dev->stats.rx_dropped++;
dev_err(&lp->ndev->dev, "Could not allocate receive buffer\n");
return;
}
/*
* A new skb should have the data halfword aligned, but this code is
* here just in case that isn't true. Calculate how many
* bytes we should reserve to get the data to start on a word
* boundary */
align = BUFFER_ALIGN(skb->data);
if (align)
skb_reserve(skb, align);
skb_reserve(skb, 2);
len = xemaclite_recv_data(lp, (u8 *) skb->data);
if (!len) {
dev->stats.rx_errors++;
dev_kfree_skb_irq(skb);
return;
}
skb_put(skb, len); /* Tell the skb how much data we got */
skb->protocol = eth_type_trans(skb, dev);
skb_checksum_none_assert(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
if (!skb_defer_rx_timestamp(skb))
netif_rx(skb); /* Send the packet upstream */
}
/**
* xemaclite_interrupt - Interrupt handler for this driver
* @irq: Irq of the Emaclite device
* @dev_id: Void pointer to the network device instance used as callback
* reference
*
* This function handles the Tx and Rx interrupts of the EmacLite device.
*/
static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
{
bool tx_complete = false;
struct net_device *dev = dev_id;
struct net_local *lp = netdev_priv(dev);
void __iomem *base_addr = lp->base_addr;
u32 tx_status;
/* Check if there is Rx Data available */
if ((in_be32(base_addr + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK) ||
(in_be32(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
& XEL_RSR_RECV_DONE_MASK))
xemaclite_rx_handler(dev);
/* Check if the Transmission for the first buffer is completed */
tx_status = in_be32(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
out_be32(base_addr + XEL_TSR_OFFSET, tx_status);
tx_complete = true;
}
/* Check if the Transmission for the second buffer is completed */
tx_status = in_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET,
tx_status);
tx_complete = true;
}
/* If there was a Tx interrupt, call the Tx Handler */
if (tx_complete != 0)
xemaclite_tx_handler(dev);
return IRQ_HANDLED;
}
/**********************/
/* MDIO Bus functions */
/**********************/
/**
* xemaclite_mdio_wait - Wait for the MDIO to be ready to use
* @lp: Pointer to the Emaclite device private data
*
* This function waits till the device is ready to accept a new MDIO
* request.
*
* Return: 0 for success or ETIMEDOUT for a timeout
*/
static int xemaclite_mdio_wait(struct net_local *lp)
{
long end = jiffies + 2;
/* wait for the MDIO interface to not be busy or timeout
after some time.
*/
while (in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) {
if (end - jiffies <= 0) {
WARN_ON(1);
return -ETIMEDOUT;
}
msleep(1);
}
return 0;
}
/**
* xemaclite_mdio_read - Read from a given MII management register
* @bus: the mii_bus struct
* @phy_id: the phy address
* @reg: register number to read from
*
* This function waits till the device is ready to accept a new MDIO
* request and then writes the phy address to the MDIO Address register
* and reads data from MDIO Read Data register, when its available.
*
* Return: Value read from the MII management register
*/
static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
{
struct net_local *lp = bus->priv;
u32 ctrl_reg;
u32 rc;
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
/* Write the PHY address, register number and set the OP bit in the
* MDIO Address register. Set the Status bit in the MDIO Control
* register to start a MDIO read transaction.
*/
ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
XEL_MDIOADDR_OP_MASK |
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
rc = in_be32(lp->base_addr + XEL_MDIORD_OFFSET);
dev_dbg(&lp->ndev->dev,
"xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
phy_id, reg, rc);
return rc;
}
/**
* xemaclite_mdio_write - Write to a given MII management register
* @bus: the mii_bus struct
* @phy_id: the phy address
* @reg: register number to write to
* @val: value to write to the register number specified by reg
*
* This function waits till the device is ready to accept a new MDIO
* request and then writes the val to the MDIO Write Data register.
*/
static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
u16 val)
{
struct net_local *lp = bus->priv;
u32 ctrl_reg;
dev_dbg(&lp->ndev->dev,
"xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
phy_id, reg, val);
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
/* Write the PHY address, register number and clear the OP bit in the
* MDIO Address register and then write the value into the MDIO Write
* Data register. Finally, set the Status bit in the MDIO Control
* register to start a MDIO write transaction.
*/
ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
~XEL_MDIOADDR_OP_MASK &
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
out_be32(lp->base_addr + XEL_MDIOWR_OFFSET, val);
out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
return 0;
}
/**
* xemaclite_mdio_reset - Reset the mdio bus.
* @bus: Pointer to the MII bus
*
* This function is required(?) as per Documentation/networking/phy.txt.
* There is no reset in this device; this function always returns 0.
*/
static int xemaclite_mdio_reset(struct mii_bus *bus)
{
return 0;
}
/**
* xemaclite_mdio_setup - Register mii_bus for the Emaclite device
* @lp: Pointer to the Emaclite device private data
* @ofdev: Pointer to OF device structure
*
* This function enables MDIO bus in the Emaclite device and registers a
* mii_bus.
*
* Return: 0 upon success or a negative error upon failure
*/
static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
{
struct mii_bus *bus;
int rc;
struct resource res;
struct device_node *np = of_get_parent(lp->phy_node);
/* Don't register the MDIO bus if the phy_node or its parent node
* can't be found.
*/
if (!np)
return -ENODEV;
/* Enable the MDIO bus by asserting the enable bit in MDIO Control
* register.
*/
out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
XEL_MDIOCTRL_MDIOEN_MASK);
bus = mdiobus_alloc();
if (!bus)
return -ENOMEM;
of_address_to_resource(np, 0, &res);
snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
(unsigned long long)res.start);
bus->priv = lp;
bus->name = "Xilinx Emaclite MDIO";
bus->read = xemaclite_mdio_read;
bus->write = xemaclite_mdio_write;
bus->reset = xemaclite_mdio_reset;
bus->parent = dev;
bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
lp->mii_bus = bus;
rc = of_mdiobus_register(bus, np);
if (rc)
goto err_register;
return 0;
err_register:
mdiobus_free(bus);
return rc;
}
/**
* xemaclite_adjust_link - Link state callback for the Emaclite device
* @ndev: pointer to net_device struct
*
* There's nothing in the Emaclite device to be configured when the link
* state changes. We just print the status.
*/
void xemaclite_adjust_link(struct net_device *ndev)
{
struct net_local *lp = netdev_priv(ndev);
struct phy_device *phy = lp->phy_dev;
int link_state;
/* hash together the state values to decide if something has changed */
link_state = phy->speed | (phy->duplex << 1) | phy->link;
if (lp->last_link != link_state) {
lp->last_link = link_state;
phy_print_status(phy);
}
}
/**
* xemaclite_open - Open the network device
* @dev: Pointer to the network device
*
* This function sets the MAC address, requests an IRQ and enables interrupts
* for the Emaclite device and starts the Tx queue.
* It also connects to the phy device, if MDIO is included in Emaclite device.
*/
static int xemaclite_open(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
int retval;
/* Just to be safe, stop the device first */
xemaclite_disable_interrupts(lp);
if (lp->phy_node) {
u32 bmcr;
lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
xemaclite_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
if (!lp->phy_dev) {
dev_err(&lp->ndev->dev, "of_phy_connect() failed\n");
return -ENODEV;
}
/* EmacLite doesn't support giga-bit speeds */
lp->phy_dev->supported &= (PHY_BASIC_FEATURES);
lp->phy_dev->advertising = lp->phy_dev->supported;
/* Don't advertise 1000BASE-T Full/Half duplex speeds */
phy_write(lp->phy_dev, MII_CTRL1000, 0);
/* Advertise only 10 and 100mbps full/half duplex speeds */
phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL);
/* Restart auto negotiation */
bmcr = phy_read(lp->phy_dev, MII_BMCR);
bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
phy_write(lp->phy_dev, MII_BMCR, bmcr);
phy_start(lp->phy_dev);
}
/* Set the MAC address each time opened */
xemaclite_update_address(lp, dev->dev_addr);
/* Grab the IRQ */
retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev);
if (retval) {
dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n",
dev->irq);
if (lp->phy_dev)
phy_disconnect(lp->phy_dev);
lp->phy_dev = NULL;
return retval;
}
/* Enable Interrupts */
xemaclite_enable_interrupts(lp);
/* We're ready to go */
netif_start_queue(dev);
return 0;
}
/**
* xemaclite_close - Close the network device
* @dev: Pointer to the network device
*
* This function stops the Tx queue, disables interrupts and frees the IRQ for
* the Emaclite device.
* It also disconnects the phy device associated with the Emaclite device.
*/
static int xemaclite_close(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
netif_stop_queue(dev);
xemaclite_disable_interrupts(lp);
free_irq(dev->irq, dev);
if (lp->phy_dev)
phy_disconnect(lp->phy_dev);
lp->phy_dev = NULL;
return 0;
}
/**
* xemaclite_send - Transmit a frame
* @orig_skb: Pointer to the socket buffer to be transmitted
* @dev: Pointer to the network device
*
* This function checks if the Tx buffer of the Emaclite device is free to send
* data. If so, it fills the Tx buffer with data from socket buffer data,
* updates the stats and frees the socket buffer. The Tx completion is signaled
* by an interrupt. If the Tx buffer isn't free, then the socket buffer is
* deferred and the Tx queue is stopped so that the deferred socket buffer can
* be transmitted when the Emaclite device is free to transmit data.
*
* Return: 0, always.
*/
static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
struct sk_buff *new_skb;
unsigned int len;
unsigned long flags;
len = orig_skb->len;
new_skb = orig_skb;
spin_lock_irqsave(&lp->reset_lock, flags);
if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) {
/* If the Emaclite Tx buffer is busy, stop the Tx queue and
* defer the skb for transmission during the ISR, after the
* current transmission is complete */
netif_stop_queue(dev);
lp->deferred_skb = new_skb;
/* Take the time stamp now, since we can't do this in an ISR. */
skb_tx_timestamp(new_skb);
spin_unlock_irqrestore(&lp->reset_lock, flags);
return 0;
}
spin_unlock_irqrestore(&lp->reset_lock, flags);
skb_tx_timestamp(new_skb);
dev->stats.tx_bytes += len;
dev_kfree_skb(new_skb);
return 0;
}
/**
* xemaclite_remove_ndev - Free the network device
* @ndev: Pointer to the network device to be freed
*
* This function un maps the IO region of the Emaclite device and frees the net
* device.
*/
static void xemaclite_remove_ndev(struct net_device *ndev)
{
if (ndev) {
struct net_local *lp = netdev_priv(ndev);
if (lp->base_addr)
iounmap((void __iomem __force *) (lp->base_addr));
free_netdev(ndev);
}
}
/**
* get_bool - Get a parameter from the OF device
* @ofdev: Pointer to OF device structure
* @s: Property to be retrieved
*
* This function looks for a property in the device node and returns the value
* of the property if its found or 0 if the property is not found.
*
* Return: Value of the parameter if the parameter is found, or 0 otherwise
*/
static bool get_bool(struct platform_device *ofdev, const char *s)
{
u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
if (p) {
return (bool)*p;
} else {
dev_warn(&ofdev->dev, "Parameter %s not found,"
"defaulting to false\n", s);
return 0;
}
}
static struct net_device_ops xemaclite_netdev_ops;
/**
* xemaclite_of_probe - Probe method for the Emaclite device.
* @ofdev: Pointer to OF device structure
* @match: Pointer to the structure used for matching a device
*
* This function probes for the Emaclite device in the device tree.
* It initializes the driver data structure and the hardware, sets the MAC
* address and registers the network device.
* It also registers a mii_bus for the Emaclite device, if MDIO is included
* in the device.
*
* Return: 0, if the driver is bound to the Emaclite device, or
* a negative error if there is failure.
*/
static int __devinit xemaclite_of_probe(struct platform_device *ofdev)
{
struct resource r_irq; /* Interrupt resources */
struct resource r_mem; /* IO mem resources */
struct net_device *ndev = NULL;
struct net_local *lp = NULL;
struct device *dev = &ofdev->dev;
const void *mac_address;
int rc = 0;
dev_info(dev, "Device Tree Probing\n");
/* Get iospace for the device */
rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
if (rc) {
dev_err(dev, "invalid address\n");
return rc;
}
/* Get IRQ for the device */
rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
if (!rc) {
dev_err(dev, "no IRQ found\n");
return rc;
}
/* Create an ethernet device instance */
ndev = alloc_etherdev(sizeof(struct net_local));
if (!ndev)
return -ENOMEM;
dev_set_drvdata(dev, ndev);
SET_NETDEV_DEV(ndev, &ofdev->dev);
ndev->irq = r_irq.start;
ndev->mem_start = r_mem.start;
ndev->mem_end = r_mem.end;
lp = netdev_priv(ndev);
lp->ndev = ndev;
if (!request_mem_region(ndev->mem_start,
ndev->mem_end - ndev->mem_start + 1,
DRIVER_NAME)) {
dev_err(dev, "Couldn't lock memory region at %p\n",
(void *)ndev->mem_start);
rc = -EBUSY;
goto error2;
}
/* Get the virtual base address for the device */
lp->base_addr = ioremap(r_mem.start, resource_size(&r_mem));
if (NULL == lp->base_addr) {
dev_err(dev, "EmacLite: Could not allocate iomem\n");
rc = -EIO;
goto error1;
}
spin_lock_init(&lp->reset_lock);
lp->next_tx_buf_to_use = 0x0;
lp->next_rx_buf_to_use = 0x0;
lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
mac_address = of_get_mac_address(ofdev->dev.of_node);
if (mac_address)
/* Set the MAC address. */
memcpy(ndev->dev_addr, mac_address, 6);
else
dev_warn(dev, "No MAC address found\n");
/* Clear the Tx CSR's in case this is a restart */
out_be32(lp->base_addr + XEL_TSR_OFFSET, 0);
out_be32(lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, 0);
/* Set the MAC address in the EmacLite device */
xemaclite_update_address(lp, ndev->dev_addr);
lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
rc = xemaclite_mdio_setup(lp, &ofdev->dev);
if (rc)
dev_warn(&ofdev->dev, "error registering MDIO bus\n");
dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
ndev->netdev_ops = &xemaclite_netdev_ops;
ndev->flags &= ~IFF_MULTICAST;
ndev->watchdog_timeo = TX_TIMEOUT;
/* Finally, register the device */
rc = register_netdev(ndev);
if (rc) {
dev_err(dev,
"Cannot register network device, aborting\n");
goto error1;
}
dev_info(dev,
"Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n",
(unsigned int __force)ndev->mem_start,
(unsigned int __force)lp->base_addr, ndev->irq);
return 0;
error1:
release_mem_region(ndev->mem_start, resource_size(&r_mem));
error2:
xemaclite_remove_ndev(ndev);
return rc;
}
/**
* xemaclite_of_remove - Unbind the driver from the Emaclite device.
* @of_dev: Pointer to OF device structure
*
* This function is called if a device is physically removed from the system or
* if the driver module is being unloaded. It frees any resources allocated to
* the device.
*
* Return: 0, always.
*/
static int __devexit xemaclite_of_remove(struct platform_device *of_dev)
{
struct device *dev = &of_dev->dev;
struct net_device *ndev = dev_get_drvdata(dev);
struct net_local *lp = netdev_priv(ndev);
/* Un-register the mii_bus, if configured */
if (lp->has_mdio) {
mdiobus_unregister(lp->mii_bus);
kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
lp->mii_bus = NULL;
}
unregister_netdev(ndev);
if (lp->phy_node)
of_node_put(lp->phy_node);
lp->phy_node = NULL;
release_mem_region(ndev->mem_start, ndev->mem_end-ndev->mem_start + 1);
xemaclite_remove_ndev(ndev);
dev_set_drvdata(dev, NULL);
return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void
xemaclite_poll_controller(struct net_device *ndev)
{
disable_irq(ndev->irq);
xemaclite_interrupt(ndev->irq, ndev);
enable_irq(ndev->irq);
}
#endif
static struct net_device_ops xemaclite_netdev_ops = {
.ndo_open = xemaclite_open,
.ndo_stop = xemaclite_close,
.ndo_start_xmit = xemaclite_send,
.ndo_set_mac_address = xemaclite_set_mac_address,
.ndo_tx_timeout = xemaclite_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = xemaclite_poll_controller,
#endif
};
/* Match table for OF platform binding */
static struct of_device_id xemaclite_of_match[] __devinitdata = {
{ .compatible = "xlnx,opb-ethernetlite-1.01.a", },
{ .compatible = "xlnx,opb-ethernetlite-1.01.b", },
{ .compatible = "xlnx,xps-ethernetlite-1.00.a", },
{ .compatible = "xlnx,xps-ethernetlite-2.00.a", },
{ .compatible = "xlnx,xps-ethernetlite-2.01.a", },
{ .compatible = "xlnx,xps-ethernetlite-3.00.a", },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, xemaclite_of_match);
static struct platform_driver xemaclite_of_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = xemaclite_of_match,
},
.probe = xemaclite_of_probe,
.remove = __devexit_p(xemaclite_of_remove),
};
module_platform_driver(xemaclite_of_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
tommytarts/QuantumKernelM8-GPe | drivers/usb/gadget/net2280.c | 4979 | 76768 | /*
* Driver for the PLX NET2280 USB device controller.
* Specs and errata are available from <http://www.plxtech.com>.
*
* PLX Technology Inc. (formerly NetChip Technology) supported the
* development of this driver.
*
*
* CODE STATUS HIGHLIGHTS
*
* This driver should work well with most "gadget" drivers, including
* the File Storage, Serial, and Ethernet/RNDIS gadget drivers
* as well as Gadget Zero and Gadgetfs.
*
* DMA is enabled by default. Drivers using transfer queues might use
* DMA chaining to remove IRQ latencies between transfers. (Except when
* short OUT transfers happen.) Drivers can use the req->no_interrupt
* hint to completely eliminate some IRQs, if a later IRQ is guaranteed
* and DMA chaining is enabled.
*
* Note that almost all the errata workarounds here are only needed for
* rev1 chips. Rev1a silicon (0110) fixes almost all of them.
*/
/*
* Copyright (C) 2003 David Brownell
* Copyright (C) 2003-2005 PLX Technology, Inc.
*
* Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
* with 2282 chip
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#undef DEBUG /* messages on error and most fault paths */
#undef VERBOSE /* extra debug messages (success too) */
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/prefetch.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#define DRIVER_DESC "PLX NET228x USB Peripheral Controller"
#define DRIVER_VERSION "2005 Sept 27"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
#define EP_DONTUSE 13 /* nonzero */
#define USE_RDK_LEDS /* GPIO pins control three LEDs */
static const char driver_name [] = "net2280";
static const char driver_desc [] = DRIVER_DESC;
static const char ep0name [] = "ep0";
static const char *const ep_name [] = {
ep0name,
"ep-a", "ep-b", "ep-c", "ep-d",
"ep-e", "ep-f",
};
/* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO)
* use_dma_chaining -- dma descriptor queueing gives even more irq reduction
*
* The net2280 DMA engines are not tightly integrated with their FIFOs;
* not all cases are (yet) handled well in this driver or the silicon.
* Some gadget drivers work better with the dma support here than others.
* These two parameters let you use PIO or more aggressive DMA.
*/
static bool use_dma = 1;
static bool use_dma_chaining = 0;
/* "modprobe net2280 use_dma=n" etc */
module_param (use_dma, bool, S_IRUGO);
module_param (use_dma_chaining, bool, S_IRUGO);
/* mode 0 == ep-{a,b,c,d} 1K fifo each
* mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
* mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
*/
static ushort fifo_mode = 0;
/* "modprobe net2280 fifo_mode=1" etc */
module_param (fifo_mode, ushort, 0644);
/* enable_suspend -- When enabled, the driver will respond to
* USB suspend requests by powering down the NET2280. Otherwise,
* USB suspend requests will be ignored. This is acceptable for
* self-powered devices
*/
static bool enable_suspend = 0;
/* "modprobe net2280 enable_suspend=1" etc */
module_param (enable_suspend, bool, S_IRUGO);
#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
#if defined(CONFIG_USB_GADGET_DEBUG_FILES) || defined (DEBUG)
static char *type_string (u8 bmAttributes)
{
switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_BULK: return "bulk";
case USB_ENDPOINT_XFER_ISOC: return "iso";
case USB_ENDPOINT_XFER_INT: return "intr";
};
return "control";
}
#endif
#include "net2280.h"
#define valid_bit cpu_to_le32 (1 << VALID_BIT)
#define dma_done_ie cpu_to_le32 (1 << DMA_DONE_INTERRUPT_ENABLE)
/*-------------------------------------------------------------------------*/
static int
net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct net2280 *dev;
struct net2280_ep *ep;
u32 max, tmp;
unsigned long flags;
ep = container_of (_ep, struct net2280_ep, ep);
if (!_ep || !desc || ep->desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dev = ep->dev;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
/* erratum 0119 workaround ties up an endpoint number */
if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
return -EDOM;
/* sanity check ep-e/ep-f since their fifos are small */
max = usb_endpoint_maxp (desc) & 0x1fff;
if (ep->num > 4 && max > 64)
return -ERANGE;
spin_lock_irqsave (&dev->lock, flags);
_ep->maxpacket = max & 0x7ff;
ep->desc = desc;
/* ep_reset() has already been called */
ep->stopped = 0;
ep->wedged = 0;
ep->out_overflow = 0;
/* set speed-dependent max packet; may kick in high bandwidth */
set_idx_reg (dev->regs, REG_EP_MAXPKT (dev, ep->num), max);
/* FIFO lines can't go to different packets. PIO is ok, so
* use it instead of troublesome (non-bulk) multi-packet DMA.
*/
if (ep->dma && (max % 4) != 0 && use_dma_chaining) {
DEBUG (ep->dev, "%s, no dma for maxpacket %d\n",
ep->ep.name, ep->ep.maxpacket);
ep->dma = NULL;
}
/* set type, direction, address; reset fifo counters */
writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
if (tmp == USB_ENDPOINT_XFER_INT) {
/* erratum 0105 workaround prevents hs NYET */
if (dev->chiprev == 0100
&& dev->gadget.speed == USB_SPEED_HIGH
&& !(desc->bEndpointAddress & USB_DIR_IN))
writel ((1 << CLEAR_NAK_OUT_PACKETS_MODE),
&ep->regs->ep_rsp);
} else if (tmp == USB_ENDPOINT_XFER_BULK) {
/* catch some particularly blatant driver bugs */
if ((dev->gadget.speed == USB_SPEED_HIGH
&& max != 512)
|| (dev->gadget.speed == USB_SPEED_FULL
&& max > 64)) {
spin_unlock_irqrestore (&dev->lock, flags);
return -ERANGE;
}
}
ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0;
tmp <<= ENDPOINT_TYPE;
tmp |= desc->bEndpointAddress;
tmp |= (4 << ENDPOINT_BYTE_COUNT); /* default full fifo lines */
tmp |= 1 << ENDPOINT_ENABLE;
wmb ();
/* for OUT transfers, block the rx fifo until a read is posted */
ep->is_in = (tmp & USB_DIR_IN) != 0;
if (!ep->is_in)
writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
else if (dev->pdev->device != 0x2280) {
/* Added for 2282, Don't use nak packets on an in endpoint,
* this was ignored on 2280
*/
writel ((1 << CLEAR_NAK_OUT_PACKETS)
| (1 << CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
}
writel (tmp, &ep->regs->ep_cfg);
/* enable irqs */
if (!ep->dma) { /* pio, per-packet */
tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
writel (tmp, &dev->regs->pciirqenb0);
tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
if (dev->pdev->device == 0x2280)
tmp |= readl (&ep->regs->ep_irqenb);
writel (tmp, &ep->regs->ep_irqenb);
} else { /* dma, per-request */
tmp = (1 << (8 + ep->num)); /* completion */
tmp |= readl (&dev->regs->pciirqenb1);
writel (tmp, &dev->regs->pciirqenb1);
/* for short OUT transfers, dma completions can't
* advance the queue; do it pio-style, by hand.
* NOTE erratum 0112 workaround #2
*/
if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
tmp = (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
writel (tmp, &ep->regs->ep_irqenb);
tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
writel (tmp, &dev->regs->pciirqenb0);
}
}
tmp = desc->bEndpointAddress;
DEBUG (dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
_ep->name, tmp & 0x0f, DIR_STRING (tmp),
type_string (desc->bmAttributes),
ep->dma ? "dma" : "pio", max);
/* pci writes may still be posted */
spin_unlock_irqrestore (&dev->lock, flags);
return 0;
}
static int handshake (u32 __iomem *ptr, u32 mask, u32 done, int usec)
{
u32 result;
do {
result = readl (ptr);
if (result == ~(u32)0) /* "device unplugged" */
return -ENODEV;
result &= mask;
if (result == done)
return 0;
udelay (1);
usec--;
} while (usec > 0);
return -ETIMEDOUT;
}
static const struct usb_ep_ops net2280_ep_ops;
static void ep_reset (struct net2280_regs __iomem *regs, struct net2280_ep *ep)
{
u32 tmp;
ep->desc = NULL;
INIT_LIST_HEAD (&ep->queue);
ep->ep.maxpacket = ~0;
ep->ep.ops = &net2280_ep_ops;
/* disable the dma, irqs, endpoint... */
if (ep->dma) {
writel (0, &ep->dma->dmactl);
writel ( (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
| (1 << DMA_TRANSACTION_DONE_INTERRUPT)
| (1 << DMA_ABORT)
, &ep->dma->dmastat);
tmp = readl (®s->pciirqenb0);
tmp &= ~(1 << ep->num);
writel (tmp, ®s->pciirqenb0);
} else {
tmp = readl (®s->pciirqenb1);
tmp &= ~(1 << (8 + ep->num)); /* completion */
writel (tmp, ®s->pciirqenb1);
}
writel (0, &ep->regs->ep_irqenb);
/* init to our chosen defaults, notably so that we NAK OUT
* packets until the driver queues a read (+note erratum 0112)
*/
if (!ep->is_in || ep->dev->pdev->device == 0x2280) {
tmp = (1 << SET_NAK_OUT_PACKETS_MODE)
| (1 << SET_NAK_OUT_PACKETS)
| (1 << CLEAR_EP_HIDE_STATUS_PHASE)
| (1 << CLEAR_INTERRUPT_MODE);
} else {
/* added for 2282 */
tmp = (1 << CLEAR_NAK_OUT_PACKETS_MODE)
| (1 << CLEAR_NAK_OUT_PACKETS)
| (1 << CLEAR_EP_HIDE_STATUS_PHASE)
| (1 << CLEAR_INTERRUPT_MODE);
}
if (ep->num != 0) {
tmp |= (1 << CLEAR_ENDPOINT_TOGGLE)
| (1 << CLEAR_ENDPOINT_HALT);
}
writel (tmp, &ep->regs->ep_rsp);
/* scrub most status bits, and flush any fifo state */
if (ep->dev->pdev->device == 0x2280)
tmp = (1 << FIFO_OVERFLOW)
| (1 << FIFO_UNDERFLOW);
else
tmp = 0;
writel (tmp | (1 << TIMEOUT)
| (1 << USB_STALL_SENT)
| (1 << USB_IN_NAK_SENT)
| (1 << USB_IN_ACK_RCVD)
| (1 << USB_OUT_PING_NAK_SENT)
| (1 << USB_OUT_ACK_SENT)
| (1 << FIFO_FLUSH)
| (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
| (1 << DATA_IN_TOKEN_INTERRUPT)
, &ep->regs->ep_stat);
/* fifo size is handled separately */
}
static void nuke (struct net2280_ep *);
static int net2280_disable (struct usb_ep *_ep)
{
struct net2280_ep *ep;
unsigned long flags;
ep = container_of (_ep, struct net2280_ep, ep);
if (!_ep || !ep->desc || _ep->name == ep0name)
return -EINVAL;
spin_lock_irqsave (&ep->dev->lock, flags);
nuke (ep);
ep_reset (ep->dev->regs, ep);
VDEBUG (ep->dev, "disabled %s %s\n",
ep->dma ? "dma" : "pio", _ep->name);
/* synch memory views with the device */
(void) readl (&ep->regs->ep_cfg);
if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4)
ep->dma = &ep->dev->dma [ep->num - 1];
spin_unlock_irqrestore (&ep->dev->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct usb_request *
net2280_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
{
struct net2280_ep *ep;
struct net2280_request *req;
if (!_ep)
return NULL;
ep = container_of (_ep, struct net2280_ep, ep);
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
req->req.dma = DMA_ADDR_INVALID;
INIT_LIST_HEAD (&req->queue);
/* this dma descriptor may be swapped with the previous dummy */
if (ep->dma) {
struct net2280_dma *td;
td = pci_pool_alloc (ep->dev->requests, gfp_flags,
&req->td_dma);
if (!td) {
kfree (req);
return NULL;
}
td->dmacount = 0; /* not VALID */
td->dmaaddr = cpu_to_le32 (DMA_ADDR_INVALID);
td->dmadesc = td->dmaaddr;
req->td = td;
}
return &req->req;
}
static void
net2280_free_request (struct usb_ep *_ep, struct usb_request *_req)
{
struct net2280_ep *ep;
struct net2280_request *req;
ep = container_of (_ep, struct net2280_ep, ep);
if (!_ep || !_req)
return;
req = container_of (_req, struct net2280_request, req);
WARN_ON (!list_empty (&req->queue));
if (req->td)
pci_pool_free (ep->dev->requests, req->td, req->td_dma);
kfree (req);
}
/*-------------------------------------------------------------------------*/
/* load a packet into the fifo we use for usb IN transfers.
* works for all endpoints.
*
* NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
* at a time, but this code is simpler because it knows it only writes
* one packet. ep-a..ep-d should use dma instead.
*/
static void
write_fifo (struct net2280_ep *ep, struct usb_request *req)
{
struct net2280_ep_regs __iomem *regs = ep->regs;
u8 *buf;
u32 tmp;
unsigned count, total;
/* INVARIANT: fifo is currently empty. (testable) */
if (req) {
buf = req->buf + req->actual;
prefetch (buf);
total = req->length - req->actual;
} else {
total = 0;
buf = NULL;
}
/* write just one packet at a time */
count = ep->ep.maxpacket;
if (count > total) /* min() cannot be used on a bitfield */
count = total;
VDEBUG (ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
ep->ep.name, count,
(count != ep->ep.maxpacket) ? " (short)" : "",
req);
while (count >= 4) {
/* NOTE be careful if you try to align these. fifo lines
* should normally be full (4 bytes) and successive partial
* lines are ok only in certain cases.
*/
tmp = get_unaligned ((u32 *)buf);
cpu_to_le32s (&tmp);
writel (tmp, ®s->ep_data);
buf += 4;
count -= 4;
}
/* last fifo entry is "short" unless we wrote a full packet.
* also explicitly validate last word in (periodic) transfers
* when maxpacket is not a multiple of 4 bytes.
*/
if (count || total < ep->ep.maxpacket) {
tmp = count ? get_unaligned ((u32 *)buf) : count;
cpu_to_le32s (&tmp);
set_fifo_bytecount (ep, count & 0x03);
writel (tmp, ®s->ep_data);
}
/* pci writes may still be posted */
}
/* work around erratum 0106: PCI and USB race over the OUT fifo.
* caller guarantees chiprev 0100, out endpoint is NAKing, and
* there's no real data in the fifo.
*
* NOTE: also used in cases where that erratum doesn't apply:
* where the host wrote "too much" data to us.
*/
static void out_flush (struct net2280_ep *ep)
{
u32 __iomem *statp;
u32 tmp;
ASSERT_OUT_NAKING (ep);
statp = &ep->regs->ep_stat;
writel ( (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
, statp);
writel ((1 << FIFO_FLUSH), statp);
mb ();
tmp = readl (statp);
if (tmp & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
/* high speed did bulk NYET; fifo isn't filling */
&& ep->dev->gadget.speed == USB_SPEED_FULL) {
unsigned usec;
usec = 50; /* 64 byte bulk/interrupt */
handshake (statp, (1 << USB_OUT_PING_NAK_SENT),
(1 << USB_OUT_PING_NAK_SENT), usec);
/* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
}
}
/* unload packet(s) from the fifo we use for usb OUT transfers.
* returns true iff the request completed, because of short packet
* or the request buffer having filled with full packets.
*
* for ep-a..ep-d this will read multiple packets out when they
* have been accepted.
*/
static int
read_fifo (struct net2280_ep *ep, struct net2280_request *req)
{
struct net2280_ep_regs __iomem *regs = ep->regs;
u8 *buf = req->req.buf + req->req.actual;
unsigned count, tmp, is_short;
unsigned cleanup = 0, prevent = 0;
/* erratum 0106 ... packets coming in during fifo reads might
* be incompletely rejected. not all cases have workarounds.
*/
if (ep->dev->chiprev == 0x0100
&& ep->dev->gadget.speed == USB_SPEED_FULL) {
udelay (1);
tmp = readl (&ep->regs->ep_stat);
if ((tmp & (1 << NAK_OUT_PACKETS)))
cleanup = 1;
else if ((tmp & (1 << FIFO_FULL))) {
start_out_naking (ep);
prevent = 1;
}
/* else: hope we don't see the problem */
}
/* never overflow the rx buffer. the fifo reads packets until
* it sees a short one; we might not be ready for them all.
*/
prefetchw (buf);
count = readl (®s->ep_avail);
if (unlikely (count == 0)) {
udelay (1);
tmp = readl (&ep->regs->ep_stat);
count = readl (®s->ep_avail);
/* handled that data already? */
if (count == 0 && (tmp & (1 << NAK_OUT_PACKETS)) == 0)
return 0;
}
tmp = req->req.length - req->req.actual;
if (count > tmp) {
/* as with DMA, data overflow gets flushed */
if ((tmp % ep->ep.maxpacket) != 0) {
ERROR (ep->dev,
"%s out fifo %d bytes, expected %d\n",
ep->ep.name, count, tmp);
req->req.status = -EOVERFLOW;
cleanup = 1;
/* NAK_OUT_PACKETS will be set, so flushing is safe;
* the next read will start with the next packet
*/
} /* else it's a ZLP, no worries */
count = tmp;
}
req->req.actual += count;
is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
VDEBUG (ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
ep->ep.name, count, is_short ? " (short)" : "",
cleanup ? " flush" : "", prevent ? " nak" : "",
req, req->req.actual, req->req.length);
while (count >= 4) {
tmp = readl (®s->ep_data);
cpu_to_le32s (&tmp);
put_unaligned (tmp, (u32 *)buf);
buf += 4;
count -= 4;
}
if (count) {
tmp = readl (®s->ep_data);
/* LE conversion is implicit here: */
do {
*buf++ = (u8) tmp;
tmp >>= 8;
} while (--count);
}
if (cleanup)
out_flush (ep);
if (prevent) {
writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
(void) readl (&ep->regs->ep_rsp);
}
return is_short || ((req->req.actual == req->req.length)
&& !req->req.zero);
}
/* fill out dma descriptor to match a given request */
static void
fill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid)
{
struct net2280_dma *td = req->td;
u32 dmacount = req->req.length;
/* don't let DMA continue after a short OUT packet,
* so overruns can't affect the next transfer.
* in case of overruns on max-size packets, we can't
* stop the fifo from filling but we can flush it.
*/
if (ep->is_in)
dmacount |= (1 << DMA_DIRECTION);
if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0)
|| ep->dev->pdev->device != 0x2280)
dmacount |= (1 << END_OF_CHAIN);
req->valid = valid;
if (valid)
dmacount |= (1 << VALID_BIT);
if (likely(!req->req.no_interrupt || !use_dma_chaining))
dmacount |= (1 << DMA_DONE_INTERRUPT_ENABLE);
/* td->dmadesc = previously set by caller */
td->dmaaddr = cpu_to_le32 (req->req.dma);
/* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
wmb ();
td->dmacount = cpu_to_le32(dmacount);
}
static const u32 dmactl_default =
(1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
| (1 << DMA_CLEAR_COUNT_ENABLE)
/* erratum 0116 workaround part 1 (use POLLING) */
| (POLL_100_USEC << DESCRIPTOR_POLLING_RATE)
| (1 << DMA_VALID_BIT_POLLING_ENABLE)
| (1 << DMA_VALID_BIT_ENABLE)
| (1 << DMA_SCATTER_GATHER_ENABLE)
/* erratum 0116 workaround part 2 (no AUTOSTART) */
| (1 << DMA_ENABLE);
static inline void spin_stop_dma (struct net2280_dma_regs __iomem *dma)
{
handshake (&dma->dmactl, (1 << DMA_ENABLE), 0, 50);
}
static inline void stop_dma (struct net2280_dma_regs __iomem *dma)
{
writel (readl (&dma->dmactl) & ~(1 << DMA_ENABLE), &dma->dmactl);
spin_stop_dma (dma);
}
static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma)
{
struct net2280_dma_regs __iomem *dma = ep->dma;
unsigned int tmp = (1 << VALID_BIT) | (ep->is_in << DMA_DIRECTION);
if (ep->dev->pdev->device != 0x2280)
tmp |= (1 << END_OF_CHAIN);
writel (tmp, &dma->dmacount);
writel (readl (&dma->dmastat), &dma->dmastat);
writel (td_dma, &dma->dmadesc);
writel (dmactl, &dma->dmactl);
/* erratum 0116 workaround part 3: pci arbiter away from net2280 */
(void) readl (&ep->dev->pci->pcimstctl);
writel ((1 << DMA_START), &dma->dmastat);
if (!ep->is_in)
stop_out_naking (ep);
}
static void start_dma (struct net2280_ep *ep, struct net2280_request *req)
{
u32 tmp;
struct net2280_dma_regs __iomem *dma = ep->dma;
/* FIXME can't use DMA for ZLPs */
/* on this path we "know" there's no dma active (yet) */
WARN_ON (readl (&dma->dmactl) & (1 << DMA_ENABLE));
writel (0, &ep->dma->dmactl);
/* previous OUT packet might have been short */
if (!ep->is_in && ((tmp = readl (&ep->regs->ep_stat))
& (1 << NAK_OUT_PACKETS)) != 0) {
writel ((1 << SHORT_PACKET_TRANSFERRED_INTERRUPT),
&ep->regs->ep_stat);
tmp = readl (&ep->regs->ep_avail);
if (tmp) {
writel (readl (&dma->dmastat), &dma->dmastat);
/* transfer all/some fifo data */
writel (req->req.dma, &dma->dmaaddr);
tmp = min (tmp, req->req.length);
/* dma irq, faking scatterlist status */
req->td->dmacount = cpu_to_le32 (req->req.length - tmp);
writel ((1 << DMA_DONE_INTERRUPT_ENABLE)
| tmp, &dma->dmacount);
req->td->dmadesc = 0;
req->valid = 1;
writel ((1 << DMA_ENABLE), &dma->dmactl);
writel ((1 << DMA_START), &dma->dmastat);
return;
}
}
tmp = dmactl_default;
/* force packet boundaries between dma requests, but prevent the
* controller from automagically writing a last "short" packet
* (zero length) unless the driver explicitly said to do that.
*/
if (ep->is_in) {
if (likely ((req->req.length % ep->ep.maxpacket) != 0
|| req->req.zero)) {
tmp |= (1 << DMA_FIFO_VALIDATE);
ep->in_fifo_validate = 1;
} else
ep->in_fifo_validate = 0;
}
/* init req->td, pointing to the current dummy */
req->td->dmadesc = cpu_to_le32 (ep->td_dma);
fill_dma_desc (ep, req, 1);
if (!use_dma_chaining)
req->td->dmacount |= cpu_to_le32 (1 << END_OF_CHAIN);
start_queue (ep, tmp, req->td_dma);
}
static inline void
queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid)
{
struct net2280_dma *end;
dma_addr_t tmp;
/* swap new dummy for old, link; fill and maybe activate */
end = ep->dummy;
ep->dummy = req->td;
req->td = end;
tmp = ep->td_dma;
ep->td_dma = req->td_dma;
req->td_dma = tmp;
end->dmadesc = cpu_to_le32 (ep->td_dma);
fill_dma_desc (ep, req, valid);
}
static void
done (struct net2280_ep *ep, struct net2280_request *req, int status)
{
struct net2280 *dev;
unsigned stopped = ep->stopped;
list_del_init (&req->queue);
if (req->req.status == -EINPROGRESS)
req->req.status = status;
else
status = req->req.status;
dev = ep->dev;
if (ep->dma)
usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
if (status && status != -ESHUTDOWN)
VDEBUG (dev, "complete %s req %p stat %d len %u/%u\n",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
/* don't modify queue heads during completion callback */
ep->stopped = 1;
spin_unlock (&dev->lock);
req->req.complete (&ep->ep, &req->req);
spin_lock (&dev->lock);
ep->stopped = stopped;
}
/*-------------------------------------------------------------------------*/
static int
net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct net2280_request *req;
struct net2280_ep *ep;
struct net2280 *dev;
unsigned long flags;
/* we always require a cpu-view buffer, so that we can
* always use pio (as fallback or whatever).
*/
req = container_of (_req, struct net2280_request, req);
if (!_req || !_req->complete || !_req->buf
|| !list_empty (&req->queue))
return -EINVAL;
if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
return -EDOM;
ep = container_of (_ep, struct net2280_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0))
return -EINVAL;
dev = ep->dev;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
/* FIXME implement PIO fallback for ZLPs with DMA */
if (ep->dma && _req->length == 0)
return -EOPNOTSUPP;
/* set up dma mapping in case the caller didn't */
if (ep->dma) {
int ret;
ret = usb_gadget_map_request(&dev->gadget, _req,
ep->is_in);
if (ret)
return ret;
}
#if 0
VDEBUG (dev, "%s queue req %p, len %d buf %p\n",
_ep->name, _req, _req->length, _req->buf);
#endif
spin_lock_irqsave (&dev->lock, flags);
_req->status = -EINPROGRESS;
_req->actual = 0;
/* kickstart this i/o queue? */
if (list_empty (&ep->queue) && !ep->stopped) {
/* use DMA if the endpoint supports it, else pio */
if (ep->dma)
start_dma (ep, req);
else {
/* maybe there's no control data, just status ack */
if (ep->num == 0 && _req->length == 0) {
allow_status (ep);
done (ep, req, 0);
VDEBUG (dev, "%s status ack\n", ep->ep.name);
goto done;
}
/* PIO ... stuff the fifo, or unblock it. */
if (ep->is_in)
write_fifo (ep, _req);
else if (list_empty (&ep->queue)) {
u32 s;
/* OUT FIFO might have packet(s) buffered */
s = readl (&ep->regs->ep_stat);
if ((s & (1 << FIFO_EMPTY)) == 0) {
/* note: _req->short_not_ok is
* ignored here since PIO _always_
* stops queue advance here, and
* _req->status doesn't change for
* short reads (only _req->actual)
*/
if (read_fifo (ep, req)) {
done (ep, req, 0);
if (ep->num == 0)
allow_status (ep);
/* don't queue it */
req = NULL;
} else
s = readl (&ep->regs->ep_stat);
}
/* don't NAK, let the fifo fill */
if (req && (s & (1 << NAK_OUT_PACKETS)))
writel ((1 << CLEAR_NAK_OUT_PACKETS),
&ep->regs->ep_rsp);
}
}
} else if (ep->dma) {
int valid = 1;
if (ep->is_in) {
int expect;
/* preventing magic zlps is per-engine state, not
* per-transfer; irq logic must recover hiccups.
*/
expect = likely (req->req.zero
|| (req->req.length % ep->ep.maxpacket) != 0);
if (expect != ep->in_fifo_validate)
valid = 0;
}
queue_dma (ep, req, valid);
} /* else the irq handler advances the queue. */
ep->responded = 1;
if (req)
list_add_tail (&req->queue, &ep->queue);
done:
spin_unlock_irqrestore (&dev->lock, flags);
/* pci writes may still be posted */
return 0;
}
static inline void
dma_done (
struct net2280_ep *ep,
struct net2280_request *req,
u32 dmacount,
int status
)
{
req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
done (ep, req, status);
}
static void restart_dma (struct net2280_ep *ep);
static void scan_dma_completions (struct net2280_ep *ep)
{
/* only look at descriptors that were "naturally" retired,
* so fifo and list head state won't matter
*/
while (!list_empty (&ep->queue)) {
struct net2280_request *req;
u32 tmp;
req = list_entry (ep->queue.next,
struct net2280_request, queue);
if (!req->valid)
break;
rmb ();
tmp = le32_to_cpup (&req->td->dmacount);
if ((tmp & (1 << VALID_BIT)) != 0)
break;
/* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
* cases where DMA must be aborted; this code handles
* all non-abort DMA completions.
*/
if (unlikely (req->td->dmadesc == 0)) {
/* paranoia */
tmp = readl (&ep->dma->dmacount);
if (tmp & DMA_BYTE_COUNT_MASK)
break;
/* single transfer mode */
dma_done (ep, req, tmp, 0);
break;
} else if (!ep->is_in
&& (req->req.length % ep->ep.maxpacket) != 0) {
tmp = readl (&ep->regs->ep_stat);
/* AVOID TROUBLE HERE by not issuing short reads from
* your gadget driver. That helps avoids errata 0121,
* 0122, and 0124; not all cases trigger the warning.
*/
if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
WARNING (ep->dev, "%s lost packet sync!\n",
ep->ep.name);
req->req.status = -EOVERFLOW;
} else if ((tmp = readl (&ep->regs->ep_avail)) != 0) {
/* fifo gets flushed later */
ep->out_overflow = 1;
DEBUG (ep->dev, "%s dma, discard %d len %d\n",
ep->ep.name, tmp,
req->req.length);
req->req.status = -EOVERFLOW;
}
}
dma_done (ep, req, tmp, 0);
}
}
static void restart_dma (struct net2280_ep *ep)
{
struct net2280_request *req;
u32 dmactl = dmactl_default;
if (ep->stopped)
return;
req = list_entry (ep->queue.next, struct net2280_request, queue);
if (!use_dma_chaining) {
start_dma (ep, req);
return;
}
/* the 2280 will be processing the queue unless queue hiccups after
* the previous transfer:
* IN: wanted automagic zlp, head doesn't (or vice versa)
* DMA_FIFO_VALIDATE doesn't init from dma descriptors.
* OUT: was "usb-short", we must restart.
*/
if (ep->is_in && !req->valid) {
struct net2280_request *entry, *prev = NULL;
int reqmode, done = 0;
DEBUG (ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td);
ep->in_fifo_validate = likely (req->req.zero
|| (req->req.length % ep->ep.maxpacket) != 0);
if (ep->in_fifo_validate)
dmactl |= (1 << DMA_FIFO_VALIDATE);
list_for_each_entry (entry, &ep->queue, queue) {
__le32 dmacount;
if (entry == req)
continue;
dmacount = entry->td->dmacount;
if (!done) {
reqmode = likely (entry->req.zero
|| (entry->req.length
% ep->ep.maxpacket) != 0);
if (reqmode == ep->in_fifo_validate) {
entry->valid = 1;
dmacount |= valid_bit;
entry->td->dmacount = dmacount;
prev = entry;
continue;
} else {
/* force a hiccup */
prev->td->dmacount |= dma_done_ie;
done = 1;
}
}
/* walk the rest of the queue so unlinks behave */
entry->valid = 0;
dmacount &= ~valid_bit;
entry->td->dmacount = dmacount;
prev = entry;
}
}
writel (0, &ep->dma->dmactl);
start_queue (ep, dmactl, req->td_dma);
}
static void abort_dma (struct net2280_ep *ep)
{
/* abort the current transfer */
if (likely (!list_empty (&ep->queue))) {
/* FIXME work around errata 0121, 0122, 0124 */
writel ((1 << DMA_ABORT), &ep->dma->dmastat);
spin_stop_dma (ep->dma);
} else
stop_dma (ep->dma);
scan_dma_completions (ep);
}
/* dequeue ALL requests */
static void nuke (struct net2280_ep *ep)
{
struct net2280_request *req;
/* called with spinlock held */
ep->stopped = 1;
if (ep->dma)
abort_dma (ep);
while (!list_empty (&ep->queue)) {
req = list_entry (ep->queue.next,
struct net2280_request,
queue);
done (ep, req, -ESHUTDOWN);
}
}
/* dequeue JUST ONE request */
static int net2280_dequeue (struct usb_ep *_ep, struct usb_request *_req)
{
struct net2280_ep *ep;
struct net2280_request *req;
unsigned long flags;
u32 dmactl;
int stopped;
ep = container_of (_ep, struct net2280_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0) || !_req)
return -EINVAL;
spin_lock_irqsave (&ep->dev->lock, flags);
stopped = ep->stopped;
/* quiesce dma while we patch the queue */
dmactl = 0;
ep->stopped = 1;
if (ep->dma) {
dmactl = readl (&ep->dma->dmactl);
/* WARNING erratum 0127 may kick in ... */
stop_dma (ep->dma);
scan_dma_completions (ep);
}
/* make sure it's still queued on this endpoint */
list_for_each_entry (req, &ep->queue, queue) {
if (&req->req == _req)
break;
}
if (&req->req != _req) {
spin_unlock_irqrestore (&ep->dev->lock, flags);
return -EINVAL;
}
/* queue head may be partially complete. */
if (ep->queue.next == &req->queue) {
if (ep->dma) {
DEBUG (ep->dev, "unlink (%s) dma\n", _ep->name);
_req->status = -ECONNRESET;
abort_dma (ep);
if (likely (ep->queue.next == &req->queue)) {
// NOTE: misreports single-transfer mode
req->td->dmacount = 0; /* invalidate */
dma_done (ep, req,
readl (&ep->dma->dmacount),
-ECONNRESET);
}
} else {
DEBUG (ep->dev, "unlink (%s) pio\n", _ep->name);
done (ep, req, -ECONNRESET);
}
req = NULL;
/* patch up hardware chaining data */
} else if (ep->dma && use_dma_chaining) {
if (req->queue.prev == ep->queue.next) {
writel (le32_to_cpu (req->td->dmadesc),
&ep->dma->dmadesc);
if (req->td->dmacount & dma_done_ie)
writel (readl (&ep->dma->dmacount)
| le32_to_cpu(dma_done_ie),
&ep->dma->dmacount);
} else {
struct net2280_request *prev;
prev = list_entry (req->queue.prev,
struct net2280_request, queue);
prev->td->dmadesc = req->td->dmadesc;
if (req->td->dmacount & dma_done_ie)
prev->td->dmacount |= dma_done_ie;
}
}
if (req)
done (ep, req, -ECONNRESET);
ep->stopped = stopped;
if (ep->dma) {
/* turn off dma on inactive queues */
if (list_empty (&ep->queue))
stop_dma (ep->dma);
else if (!ep->stopped) {
/* resume current request, or start new one */
if (req)
writel (dmactl, &ep->dma->dmactl);
else
start_dma (ep, list_entry (ep->queue.next,
struct net2280_request, queue));
}
}
spin_unlock_irqrestore (&ep->dev->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static int net2280_fifo_status (struct usb_ep *_ep);
static int
net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
{
struct net2280_ep *ep;
unsigned long flags;
int retval = 0;
ep = container_of (_ep, struct net2280_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0))
return -EINVAL;
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
== USB_ENDPOINT_XFER_ISOC)
return -EINVAL;
spin_lock_irqsave (&ep->dev->lock, flags);
if (!list_empty (&ep->queue))
retval = -EAGAIN;
else if (ep->is_in && value && net2280_fifo_status (_ep) != 0)
retval = -EAGAIN;
else {
VDEBUG (ep->dev, "%s %s %s\n", _ep->name,
value ? "set" : "clear",
wedged ? "wedge" : "halt");
/* set/clear, then synch memory views with the device */
if (value) {
if (ep->num == 0)
ep->dev->protocol_stall = 1;
else
set_halt (ep);
if (wedged)
ep->wedged = 1;
} else {
clear_halt (ep);
ep->wedged = 0;
}
(void) readl (&ep->regs->ep_rsp);
}
spin_unlock_irqrestore (&ep->dev->lock, flags);
return retval;
}
static int
net2280_set_halt(struct usb_ep *_ep, int value)
{
return net2280_set_halt_and_wedge(_ep, value, 0);
}
static int
net2280_set_wedge(struct usb_ep *_ep)
{
if (!_ep || _ep->name == ep0name)
return -EINVAL;
return net2280_set_halt_and_wedge(_ep, 1, 1);
}
static int
net2280_fifo_status (struct usb_ep *_ep)
{
struct net2280_ep *ep;
u32 avail;
ep = container_of (_ep, struct net2280_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0))
return -ENODEV;
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
avail = readl (&ep->regs->ep_avail) & ((1 << 12) - 1);
if (avail > ep->fifo_size)
return -EOVERFLOW;
if (ep->is_in)
avail = ep->fifo_size - avail;
return avail;
}
static void
net2280_fifo_flush (struct usb_ep *_ep)
{
struct net2280_ep *ep;
ep = container_of (_ep, struct net2280_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0))
return;
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
return;
writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
(void) readl (&ep->regs->ep_rsp);
}
static const struct usb_ep_ops net2280_ep_ops = {
.enable = net2280_enable,
.disable = net2280_disable,
.alloc_request = net2280_alloc_request,
.free_request = net2280_free_request,
.queue = net2280_queue,
.dequeue = net2280_dequeue,
.set_halt = net2280_set_halt,
.set_wedge = net2280_set_wedge,
.fifo_status = net2280_fifo_status,
.fifo_flush = net2280_fifo_flush,
};
/*-------------------------------------------------------------------------*/
static int net2280_get_frame (struct usb_gadget *_gadget)
{
struct net2280 *dev;
unsigned long flags;
u16 retval;
if (!_gadget)
return -ENODEV;
dev = container_of (_gadget, struct net2280, gadget);
spin_lock_irqsave (&dev->lock, flags);
retval = get_idx_reg (dev->regs, REG_FRAME) & 0x03ff;
spin_unlock_irqrestore (&dev->lock, flags);
return retval;
}
static int net2280_wakeup (struct usb_gadget *_gadget)
{
struct net2280 *dev;
u32 tmp;
unsigned long flags;
if (!_gadget)
return 0;
dev = container_of (_gadget, struct net2280, gadget);
spin_lock_irqsave (&dev->lock, flags);
tmp = readl (&dev->usb->usbctl);
if (tmp & (1 << DEVICE_REMOTE_WAKEUP_ENABLE))
writel (1 << GENERATE_RESUME, &dev->usb->usbstat);
spin_unlock_irqrestore (&dev->lock, flags);
/* pci writes may still be posted */
return 0;
}
static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value)
{
struct net2280 *dev;
u32 tmp;
unsigned long flags;
if (!_gadget)
return 0;
dev = container_of (_gadget, struct net2280, gadget);
spin_lock_irqsave (&dev->lock, flags);
tmp = readl (&dev->usb->usbctl);
if (value)
tmp |= (1 << SELF_POWERED_STATUS);
else
tmp &= ~(1 << SELF_POWERED_STATUS);
writel (tmp, &dev->usb->usbctl);
spin_unlock_irqrestore (&dev->lock, flags);
return 0;
}
static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
{
struct net2280 *dev;
u32 tmp;
unsigned long flags;
if (!_gadget)
return -ENODEV;
dev = container_of (_gadget, struct net2280, gadget);
spin_lock_irqsave (&dev->lock, flags);
tmp = readl (&dev->usb->usbctl);
dev->softconnect = (is_on != 0);
if (is_on)
tmp |= (1 << USB_DETECT_ENABLE);
else
tmp &= ~(1 << USB_DETECT_ENABLE);
writel (tmp, &dev->usb->usbctl);
spin_unlock_irqrestore (&dev->lock, flags);
return 0;
}
static int net2280_start(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver);
static int net2280_stop(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver);
static const struct usb_gadget_ops net2280_ops = {
.get_frame = net2280_get_frame,
.wakeup = net2280_wakeup,
.set_selfpowered = net2280_set_selfpowered,
.pullup = net2280_pullup,
.udc_start = net2280_start,
.udc_stop = net2280_stop,
};
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
/* FIXME move these into procfs, and use seq_file.
* Sysfs _still_ doesn't behave for arbitrarily sized files,
* and also doesn't help products using this with 2.4 kernels.
*/
/* "function" sysfs attribute */
static ssize_t
show_function (struct device *_dev, struct device_attribute *attr, char *buf)
{
struct net2280 *dev = dev_get_drvdata (_dev);
if (!dev->driver
|| !dev->driver->function
|| strlen (dev->driver->function) > PAGE_SIZE)
return 0;
return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
}
static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
static ssize_t net2280_show_registers(struct device *_dev,
struct device_attribute *attr, char *buf)
{
struct net2280 *dev;
char *next;
unsigned size, t;
unsigned long flags;
int i;
u32 t1, t2;
const char *s;
dev = dev_get_drvdata (_dev);
next = buf;
size = PAGE_SIZE;
spin_lock_irqsave (&dev->lock, flags);
if (dev->driver)
s = dev->driver->driver.name;
else
s = "(none)";
/* Main Control Registers */
t = scnprintf (next, size, "%s version " DRIVER_VERSION
", chiprev %04x, dma %s\n\n"
"devinit %03x fifoctl %08x gadget '%s'\n"
"pci irqenb0 %02x irqenb1 %08x "
"irqstat0 %04x irqstat1 %08x\n",
driver_name, dev->chiprev,
use_dma
? (use_dma_chaining ? "chaining" : "enabled")
: "disabled",
readl (&dev->regs->devinit),
readl (&dev->regs->fifoctl),
s,
readl (&dev->regs->pciirqenb0),
readl (&dev->regs->pciirqenb1),
readl (&dev->regs->irqstat0),
readl (&dev->regs->irqstat1));
size -= t;
next += t;
/* USB Control Registers */
t1 = readl (&dev->usb->usbctl);
t2 = readl (&dev->usb->usbstat);
if (t1 & (1 << VBUS_PIN)) {
if (t2 & (1 << HIGH_SPEED))
s = "high speed";
else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
s = "powered";
else
s = "full speed";
/* full speed bit (6) not working?? */
} else
s = "not attached";
t = scnprintf (next, size,
"stdrsp %08x usbctl %08x usbstat %08x "
"addr 0x%02x (%s)\n",
readl (&dev->usb->stdrsp), t1, t2,
readl (&dev->usb->ouraddr), s);
size -= t;
next += t;
/* PCI Master Control Registers */
/* DMA Control Registers */
/* Configurable EP Control Registers */
for (i = 0; i < 7; i++) {
struct net2280_ep *ep;
ep = &dev->ep [i];
if (i && !ep->desc)
continue;
t1 = readl (&ep->regs->ep_cfg);
t2 = readl (&ep->regs->ep_rsp) & 0xff;
t = scnprintf (next, size,
"\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
"irqenb %02x\n",
ep->ep.name, t1, t2,
(t2 & (1 << CLEAR_NAK_OUT_PACKETS))
? "NAK " : "",
(t2 & (1 << CLEAR_EP_HIDE_STATUS_PHASE))
? "hide " : "",
(t2 & (1 << CLEAR_EP_FORCE_CRC_ERROR))
? "CRC " : "",
(t2 & (1 << CLEAR_INTERRUPT_MODE))
? "interrupt " : "",
(t2 & (1<<CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
? "status " : "",
(t2 & (1 << CLEAR_NAK_OUT_PACKETS_MODE))
? "NAKmode " : "",
(t2 & (1 << CLEAR_ENDPOINT_TOGGLE))
? "DATA1 " : "DATA0 ",
(t2 & (1 << CLEAR_ENDPOINT_HALT))
? "HALT " : "",
readl (&ep->regs->ep_irqenb));
size -= t;
next += t;
t = scnprintf (next, size,
"\tstat %08x avail %04x "
"(ep%d%s-%s)%s\n",
readl (&ep->regs->ep_stat),
readl (&ep->regs->ep_avail),
t1 & 0x0f, DIR_STRING (t1),
type_string (t1 >> 8),
ep->stopped ? "*" : "");
size -= t;
next += t;
if (!ep->dma)
continue;
t = scnprintf (next, size,
" dma\tctl %08x stat %08x count %08x\n"
"\taddr %08x desc %08x\n",
readl (&ep->dma->dmactl),
readl (&ep->dma->dmastat),
readl (&ep->dma->dmacount),
readl (&ep->dma->dmaaddr),
readl (&ep->dma->dmadesc));
size -= t;
next += t;
}
/* Indexed Registers */
// none yet
/* Statistics */
t = scnprintf (next, size, "\nirqs: ");
size -= t;
next += t;
for (i = 0; i < 7; i++) {
struct net2280_ep *ep;
ep = &dev->ep [i];
if (i && !ep->irqs)
continue;
t = scnprintf (next, size, " %s/%lu", ep->ep.name, ep->irqs);
size -= t;
next += t;
}
t = scnprintf (next, size, "\n");
size -= t;
next += t;
spin_unlock_irqrestore (&dev->lock, flags);
return PAGE_SIZE - size;
}
static DEVICE_ATTR(registers, S_IRUGO, net2280_show_registers, NULL);
static ssize_t
show_queues (struct device *_dev, struct device_attribute *attr, char *buf)
{
struct net2280 *dev;
char *next;
unsigned size;
unsigned long flags;
int i;
dev = dev_get_drvdata (_dev);
next = buf;
size = PAGE_SIZE;
spin_lock_irqsave (&dev->lock, flags);
for (i = 0; i < 7; i++) {
struct net2280_ep *ep = &dev->ep [i];
struct net2280_request *req;
int t;
if (i != 0) {
const struct usb_endpoint_descriptor *d;
d = ep->desc;
if (!d)
continue;
t = d->bEndpointAddress;
t = scnprintf (next, size,
"\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
(t & USB_DIR_IN) ? "in" : "out",
({ char *val;
switch (d->bmAttributes & 0x03) {
case USB_ENDPOINT_XFER_BULK:
val = "bulk"; break;
case USB_ENDPOINT_XFER_INT:
val = "intr"; break;
default:
val = "iso"; break;
}; val; }),
usb_endpoint_maxp (d) & 0x1fff,
ep->dma ? "dma" : "pio", ep->fifo_size
);
} else /* ep0 should only have one transfer queued */
t = scnprintf (next, size, "ep0 max 64 pio %s\n",
ep->is_in ? "in" : "out");
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
if (list_empty (&ep->queue)) {
t = scnprintf (next, size, "\t(nothing queued)\n");
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
continue;
}
list_for_each_entry (req, &ep->queue, queue) {
if (ep->dma && req->td_dma == readl (&ep->dma->dmadesc))
t = scnprintf (next, size,
"\treq %p len %d/%d "
"buf %p (dmacount %08x)\n",
&req->req, req->req.actual,
req->req.length, req->req.buf,
readl (&ep->dma->dmacount));
else
t = scnprintf (next, size,
"\treq %p len %d/%d buf %p\n",
&req->req, req->req.actual,
req->req.length, req->req.buf);
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
if (ep->dma) {
struct net2280_dma *td;
td = req->td;
t = scnprintf (next, size, "\t td %08x "
" count %08x buf %08x desc %08x\n",
(u32) req->td_dma,
le32_to_cpu (td->dmacount),
le32_to_cpu (td->dmaaddr),
le32_to_cpu (td->dmadesc));
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
}
}
}
done:
spin_unlock_irqrestore (&dev->lock, flags);
return PAGE_SIZE - size;
}
static DEVICE_ATTR (queues, S_IRUGO, show_queues, NULL);
#else
#define device_create_file(a,b) (0)
#define device_remove_file(a,b) do { } while (0)
#endif
/*-------------------------------------------------------------------------*/
/* another driver-specific mode might be a request type doing dma
* to/from another device fifo instead of to/from memory.
*/
static void set_fifo_mode (struct net2280 *dev, int mode)
{
/* keeping high bits preserves BAR2 */
writel ((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
/* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
INIT_LIST_HEAD (&dev->gadget.ep_list);
list_add_tail (&dev->ep [1].ep.ep_list, &dev->gadget.ep_list);
list_add_tail (&dev->ep [2].ep.ep_list, &dev->gadget.ep_list);
switch (mode) {
case 0:
list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
list_add_tail (&dev->ep [4].ep.ep_list, &dev->gadget.ep_list);
dev->ep [1].fifo_size = dev->ep [2].fifo_size = 1024;
break;
case 1:
dev->ep [1].fifo_size = dev->ep [2].fifo_size = 2048;
break;
case 2:
list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
dev->ep [1].fifo_size = 2048;
dev->ep [2].fifo_size = 1024;
break;
}
/* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
list_add_tail (&dev->ep [5].ep.ep_list, &dev->gadget.ep_list);
list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list);
}
/* keeping it simple:
* - one bus driver, initted first;
* - one function driver, initted second
*
* most of the work to support multiple net2280 controllers would
* be to associate this gadget driver (yes?) with all of them, or
* perhaps to bind specific drivers to specific devices.
*/
static void usb_reset (struct net2280 *dev)
{
u32 tmp;
dev->gadget.speed = USB_SPEED_UNKNOWN;
(void) readl (&dev->usb->usbctl);
net2280_led_init (dev);
/* disable automatic responses, and irqs */
writel (0, &dev->usb->stdrsp);
writel (0, &dev->regs->pciirqenb0);
writel (0, &dev->regs->pciirqenb1);
/* clear old dma and irq state */
for (tmp = 0; tmp < 4; tmp++) {
struct net2280_ep *ep = &dev->ep [tmp + 1];
if (ep->dma)
abort_dma (ep);
}
writel (~0, &dev->regs->irqstat0),
writel (~(1 << SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
/* reset, and enable pci */
tmp = readl (&dev->regs->devinit)
| (1 << PCI_ENABLE)
| (1 << FIFO_SOFT_RESET)
| (1 << USB_SOFT_RESET)
| (1 << M8051_RESET);
writel (tmp, &dev->regs->devinit);
/* standard fifo and endpoint allocations */
set_fifo_mode (dev, (fifo_mode <= 2) ? fifo_mode : 0);
}
static void usb_reinit (struct net2280 *dev)
{
u32 tmp;
int init_dma;
/* use_dma changes are ignored till next device re-init */
init_dma = use_dma;
/* basic endpoint init */
for (tmp = 0; tmp < 7; tmp++) {
struct net2280_ep *ep = &dev->ep [tmp];
ep->ep.name = ep_name [tmp];
ep->dev = dev;
ep->num = tmp;
if (tmp > 0 && tmp <= 4) {
ep->fifo_size = 1024;
if (init_dma)
ep->dma = &dev->dma [tmp - 1];
} else
ep->fifo_size = 64;
ep->regs = &dev->epregs [tmp];
ep_reset (dev->regs, ep);
}
dev->ep [0].ep.maxpacket = 64;
dev->ep [5].ep.maxpacket = 64;
dev->ep [6].ep.maxpacket = 64;
dev->gadget.ep0 = &dev->ep [0].ep;
dev->ep [0].stopped = 0;
INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
/* we want to prevent lowlevel/insecure access from the USB host,
* but erratum 0119 means this enable bit is ignored
*/
for (tmp = 0; tmp < 5; tmp++)
writel (EP_DONTUSE, &dev->dep [tmp].dep_cfg);
}
static void ep0_start (struct net2280 *dev)
{
writel ( (1 << CLEAR_EP_HIDE_STATUS_PHASE)
| (1 << CLEAR_NAK_OUT_PACKETS)
| (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
, &dev->epregs [0].ep_rsp);
/*
* hardware optionally handles a bunch of standard requests
* that the API hides from drivers anyway. have it do so.
* endpoint status/features are handled in software, to
* help pass tests for some dubious behavior.
*/
writel ( (1 << SET_TEST_MODE)
| (1 << SET_ADDRESS)
| (1 << DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP)
| (1 << GET_DEVICE_STATUS)
| (1 << GET_INTERFACE_STATUS)
, &dev->usb->stdrsp);
writel ( (1 << USB_ROOT_PORT_WAKEUP_ENABLE)
| (1 << SELF_POWERED_USB_DEVICE)
| (1 << REMOTE_WAKEUP_SUPPORT)
| (dev->softconnect << USB_DETECT_ENABLE)
| (1 << SELF_POWERED_STATUS)
, &dev->usb->usbctl);
/* enable irqs so we can see ep0 and general operation */
writel ( (1 << SETUP_PACKET_INTERRUPT_ENABLE)
| (1 << ENDPOINT_0_INTERRUPT_ENABLE)
, &dev->regs->pciirqenb0);
writel ( (1 << PCI_INTERRUPT_ENABLE)
| (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE)
| (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE)
| (1 << PCI_RETRY_ABORT_INTERRUPT_ENABLE)
| (1 << VBUS_INTERRUPT_ENABLE)
| (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE)
| (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE)
, &dev->regs->pciirqenb1);
/* don't leave any writes posted */
(void) readl (&dev->usb->usbctl);
}
/* when a driver is successfully registered, it will receive
* control requests including set_configuration(), which enables
* non-control requests. then usb traffic follows until a
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
static int net2280_start(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver)
{
struct net2280 *dev;
int retval;
unsigned i;
/* insist on high speed support from the driver, since
* (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
* "must not be used in normal operation"
*/
if (!driver || driver->max_speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
dev = container_of (_gadget, struct net2280, gadget);
for (i = 0; i < 7; i++)
dev->ep [i].irqs = 0;
/* hook up the driver ... */
dev->softconnect = 1;
driver->driver.bus = NULL;
dev->driver = driver;
dev->gadget.dev.driver = &driver->driver;
retval = device_create_file (&dev->pdev->dev, &dev_attr_function);
if (retval) goto err_unbind;
retval = device_create_file (&dev->pdev->dev, &dev_attr_queues);
if (retval) goto err_func;
/* ... then enable host detection and ep0; and we're ready
* for set_configuration as well as eventual disconnect.
*/
net2280_led_active (dev, 1);
ep0_start (dev);
DEBUG (dev, "%s ready, usbctl %08x stdrsp %08x\n",
driver->driver.name,
readl (&dev->usb->usbctl),
readl (&dev->usb->stdrsp));
/* pci writes may still be posted */
return 0;
err_func:
device_remove_file (&dev->pdev->dev, &dev_attr_function);
err_unbind:
driver->unbind (&dev->gadget);
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
return retval;
}
static void
stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
{
int i;
/* don't disconnect if it's not connected */
if (dev->gadget.speed == USB_SPEED_UNKNOWN)
driver = NULL;
/* stop hardware; prevent new request submissions;
* and kill any outstanding requests.
*/
usb_reset (dev);
for (i = 0; i < 7; i++)
nuke (&dev->ep [i]);
usb_reinit (dev);
}
static int net2280_stop(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver)
{
struct net2280 *dev;
unsigned long flags;
dev = container_of (_gadget, struct net2280, gadget);
spin_lock_irqsave (&dev->lock, flags);
stop_activity (dev, driver);
spin_unlock_irqrestore (&dev->lock, flags);
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
net2280_led_active (dev, 0);
device_remove_file (&dev->pdev->dev, &dev_attr_function);
device_remove_file (&dev->pdev->dev, &dev_attr_queues);
DEBUG (dev, "unregistered driver '%s'\n", driver->driver.name);
return 0;
}
/*-------------------------------------------------------------------------*/
/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
* also works for dma-capable endpoints, in pio mode or just
* to manually advance the queue after short OUT transfers.
*/
static void handle_ep_small (struct net2280_ep *ep)
{
struct net2280_request *req;
u32 t;
/* 0 error, 1 mid-data, 2 done */
int mode = 1;
if (!list_empty (&ep->queue))
req = list_entry (ep->queue.next,
struct net2280_request, queue);
else
req = NULL;
/* ack all, and handle what we care about */
t = readl (&ep->regs->ep_stat);
ep->irqs++;
#if 0
VDEBUG (ep->dev, "%s ack ep_stat %08x, req %p\n",
ep->ep.name, t, req ? &req->req : 0);
#endif
if (!ep->is_in || ep->dev->pdev->device == 0x2280)
writel (t & ~(1 << NAK_OUT_PACKETS), &ep->regs->ep_stat);
else
/* Added for 2282 */
writel (t, &ep->regs->ep_stat);
/* for ep0, monitor token irqs to catch data stage length errors
* and to synchronize on status.
*
* also, to defer reporting of protocol stalls ... here's where
* data or status first appears, handling stalls here should never
* cause trouble on the host side..
*
* control requests could be slightly faster without token synch for
* status, but status can jam up that way.
*/
if (unlikely (ep->num == 0)) {
if (ep->is_in) {
/* status; stop NAKing */
if (t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)) {
if (ep->dev->protocol_stall) {
ep->stopped = 1;
set_halt (ep);
}
if (!req)
allow_status (ep);
mode = 2;
/* reply to extra IN data tokens with a zlp */
} else if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
if (ep->dev->protocol_stall) {
ep->stopped = 1;
set_halt (ep);
mode = 2;
} else if (ep->responded &&
!req && !ep->stopped)
write_fifo (ep, NULL);
}
} else {
/* status; stop NAKing */
if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
if (ep->dev->protocol_stall) {
ep->stopped = 1;
set_halt (ep);
}
mode = 2;
/* an extra OUT token is an error */
} else if (((t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT))
&& req
&& req->req.actual == req->req.length)
|| (ep->responded && !req)) {
ep->dev->protocol_stall = 1;
set_halt (ep);
ep->stopped = 1;
if (req)
done (ep, req, -EOVERFLOW);
req = NULL;
}
}
}
if (unlikely (!req))
return;
/* manual DMA queue advance after short OUT */
if (likely (ep->dma != 0)) {
if (t & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
u32 count;
int stopped = ep->stopped;
/* TRANSFERRED works around OUT_DONE erratum 0112.
* we expect (N <= maxpacket) bytes; host wrote M.
* iff (M < N) we won't ever see a DMA interrupt.
*/
ep->stopped = 1;
for (count = 0; ; t = readl (&ep->regs->ep_stat)) {
/* any preceding dma transfers must finish.
* dma handles (M >= N), may empty the queue
*/
scan_dma_completions (ep);
if (unlikely (list_empty (&ep->queue)
|| ep->out_overflow)) {
req = NULL;
break;
}
req = list_entry (ep->queue.next,
struct net2280_request, queue);
/* here either (M < N), a "real" short rx;
* or (M == N) and the queue didn't empty
*/
if (likely (t & (1 << FIFO_EMPTY))) {
count = readl (&ep->dma->dmacount);
count &= DMA_BYTE_COUNT_MASK;
if (readl (&ep->dma->dmadesc)
!= req->td_dma)
req = NULL;
break;
}
udelay(1);
}
/* stop DMA, leave ep NAKing */
writel ((1 << DMA_ABORT), &ep->dma->dmastat);
spin_stop_dma (ep->dma);
if (likely (req)) {
req->td->dmacount = 0;
t = readl (&ep->regs->ep_avail);
dma_done (ep, req, count,
(ep->out_overflow || t)
? -EOVERFLOW : 0);
}
/* also flush to prevent erratum 0106 trouble */
if (unlikely (ep->out_overflow
|| (ep->dev->chiprev == 0x0100
&& ep->dev->gadget.speed
== USB_SPEED_FULL))) {
out_flush (ep);
ep->out_overflow = 0;
}
/* (re)start dma if needed, stop NAKing */
ep->stopped = stopped;
if (!list_empty (&ep->queue))
restart_dma (ep);
} else
DEBUG (ep->dev, "%s dma ep_stat %08x ??\n",
ep->ep.name, t);
return;
/* data packet(s) received (in the fifo, OUT) */
} else if (t & (1 << DATA_PACKET_RECEIVED_INTERRUPT)) {
if (read_fifo (ep, req) && ep->num != 0)
mode = 2;
/* data packet(s) transmitted (IN) */
} else if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) {
unsigned len;
len = req->req.length - req->req.actual;
if (len > ep->ep.maxpacket)
len = ep->ep.maxpacket;
req->req.actual += len;
/* if we wrote it all, we're usually done */
if (req->req.actual == req->req.length) {
if (ep->num == 0) {
/* send zlps until the status stage */
} else if (!req->req.zero || len != ep->ep.maxpacket)
mode = 2;
}
/* there was nothing to do ... */
} else if (mode == 1)
return;
/* done */
if (mode == 2) {
/* stream endpoints often resubmit/unlink in completion */
done (ep, req, 0);
/* maybe advance queue to next request */
if (ep->num == 0) {
/* NOTE: net2280 could let gadget driver start the
* status stage later. since not all controllers let
* them control that, the api doesn't (yet) allow it.
*/
if (!ep->stopped)
allow_status (ep);
req = NULL;
} else {
if (!list_empty (&ep->queue) && !ep->stopped)
req = list_entry (ep->queue.next,
struct net2280_request, queue);
else
req = NULL;
if (req && !ep->is_in)
stop_out_naking (ep);
}
}
/* is there a buffer for the next packet?
* for best streaming performance, make sure there is one.
*/
if (req && !ep->stopped) {
/* load IN fifo with next packet (may be zlp) */
if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
write_fifo (ep, &req->req);
}
}
static struct net2280_ep *
get_ep_by_addr (struct net2280 *dev, u16 wIndex)
{
struct net2280_ep *ep;
if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
return &dev->ep [0];
list_for_each_entry (ep, &dev->gadget.ep_list, ep.ep_list) {
u8 bEndpointAddress;
if (!ep->desc)
continue;
bEndpointAddress = ep->desc->bEndpointAddress;
if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
continue;
if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
return ep;
}
return NULL;
}
static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
{
struct net2280_ep *ep;
u32 num, scratch;
/* most of these don't need individual acks */
stat &= ~(1 << INTA_ASSERTED);
if (!stat)
return;
// DEBUG (dev, "irqstat0 %04x\n", stat);
/* starting a control request? */
if (unlikely (stat & (1 << SETUP_PACKET_INTERRUPT))) {
union {
u32 raw [2];
struct usb_ctrlrequest r;
} u;
int tmp;
struct net2280_request *req;
if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
if (readl (&dev->usb->usbstat) & (1 << HIGH_SPEED))
dev->gadget.speed = USB_SPEED_HIGH;
else
dev->gadget.speed = USB_SPEED_FULL;
net2280_led_speed (dev, dev->gadget.speed);
DEBUG(dev, "%s\n", usb_speed_string(dev->gadget.speed));
}
ep = &dev->ep [0];
ep->irqs++;
/* make sure any leftover request state is cleared */
stat &= ~(1 << ENDPOINT_0_INTERRUPT);
while (!list_empty (&ep->queue)) {
req = list_entry (ep->queue.next,
struct net2280_request, queue);
done (ep, req, (req->req.actual == req->req.length)
? 0 : -EPROTO);
}
ep->stopped = 0;
dev->protocol_stall = 0;
if (ep->dev->pdev->device == 0x2280)
tmp = (1 << FIFO_OVERFLOW)
| (1 << FIFO_UNDERFLOW);
else
tmp = 0;
writel (tmp | (1 << TIMEOUT)
| (1 << USB_STALL_SENT)
| (1 << USB_IN_NAK_SENT)
| (1 << USB_IN_ACK_RCVD)
| (1 << USB_OUT_PING_NAK_SENT)
| (1 << USB_OUT_ACK_SENT)
| (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
| (1 << DATA_IN_TOKEN_INTERRUPT)
, &ep->regs->ep_stat);
u.raw [0] = readl (&dev->usb->setup0123);
u.raw [1] = readl (&dev->usb->setup4567);
cpu_to_le32s (&u.raw [0]);
cpu_to_le32s (&u.raw [1]);
tmp = 0;
#define w_value le16_to_cpu(u.r.wValue)
#define w_index le16_to_cpu(u.r.wIndex)
#define w_length le16_to_cpu(u.r.wLength)
/* ack the irq */
writel (1 << SETUP_PACKET_INTERRUPT, &dev->regs->irqstat0);
stat ^= (1 << SETUP_PACKET_INTERRUPT);
/* watch control traffic at the token level, and force
* synchronization before letting the status stage happen.
* FIXME ignore tokens we'll NAK, until driver responds.
* that'll mean a lot less irqs for some drivers.
*/
ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
if (ep->is_in) {
scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
| (1 << DATA_IN_TOKEN_INTERRUPT);
stop_out_naking (ep);
} else
scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT)
| (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
| (1 << DATA_IN_TOKEN_INTERRUPT);
writel (scratch, &dev->epregs [0].ep_irqenb);
/* we made the hardware handle most lowlevel requests;
* everything else goes uplevel to the gadget code.
*/
ep->responded = 1;
switch (u.r.bRequest) {
case USB_REQ_GET_STATUS: {
struct net2280_ep *e;
__le32 status;
/* hw handles device and interface status */
if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
goto delegate;
if ((e = get_ep_by_addr (dev, w_index)) == 0
|| w_length > 2)
goto do_stall;
if (readl (&e->regs->ep_rsp)
& (1 << SET_ENDPOINT_HALT))
status = cpu_to_le32 (1);
else
status = cpu_to_le32 (0);
/* don't bother with a request object! */
writel (0, &dev->epregs [0].ep_irqenb);
set_fifo_bytecount (ep, w_length);
writel ((__force u32)status, &dev->epregs [0].ep_data);
allow_status (ep);
VDEBUG (dev, "%s stat %02x\n", ep->ep.name, status);
goto next_endpoints;
}
break;
case USB_REQ_CLEAR_FEATURE: {
struct net2280_ep *e;
/* hw handles device features */
if (u.r.bRequestType != USB_RECIP_ENDPOINT)
goto delegate;
if (w_value != USB_ENDPOINT_HALT
|| w_length != 0)
goto do_stall;
if ((e = get_ep_by_addr (dev, w_index)) == 0)
goto do_stall;
if (e->wedged) {
VDEBUG(dev, "%s wedged, halt not cleared\n",
ep->ep.name);
} else {
VDEBUG(dev, "%s clear halt\n", ep->ep.name);
clear_halt(e);
}
allow_status (ep);
goto next_endpoints;
}
break;
case USB_REQ_SET_FEATURE: {
struct net2280_ep *e;
/* hw handles device features */
if (u.r.bRequestType != USB_RECIP_ENDPOINT)
goto delegate;
if (w_value != USB_ENDPOINT_HALT
|| w_length != 0)
goto do_stall;
if ((e = get_ep_by_addr (dev, w_index)) == 0)
goto do_stall;
if (e->ep.name == ep0name)
goto do_stall;
set_halt (e);
allow_status (ep);
VDEBUG (dev, "%s set halt\n", ep->ep.name);
goto next_endpoints;
}
break;
default:
delegate:
VDEBUG (dev, "setup %02x.%02x v%04x i%04x l%04x "
"ep_cfg %08x\n",
u.r.bRequestType, u.r.bRequest,
w_value, w_index, w_length,
readl (&ep->regs->ep_cfg));
ep->responded = 0;
spin_unlock (&dev->lock);
tmp = dev->driver->setup (&dev->gadget, &u.r);
spin_lock (&dev->lock);
}
/* stall ep0 on error */
if (tmp < 0) {
do_stall:
VDEBUG (dev, "req %02x.%02x protocol STALL; stat %d\n",
u.r.bRequestType, u.r.bRequest, tmp);
dev->protocol_stall = 1;
}
/* some in/out token irq should follow; maybe stall then.
* driver must queue a request (even zlp) or halt ep0
* before the host times out.
*/
}
#undef w_value
#undef w_index
#undef w_length
next_endpoints:
/* endpoint data irq ? */
scratch = stat & 0x7f;
stat &= ~0x7f;
for (num = 0; scratch; num++) {
u32 t;
/* do this endpoint's FIFO and queue need tending? */
t = 1 << num;
if ((scratch & t) == 0)
continue;
scratch ^= t;
ep = &dev->ep [num];
handle_ep_small (ep);
}
if (stat)
DEBUG (dev, "unhandled irqstat0 %08x\n", stat);
}
#define DMA_INTERRUPTS ( \
(1 << DMA_D_INTERRUPT) \
| (1 << DMA_C_INTERRUPT) \
| (1 << DMA_B_INTERRUPT) \
| (1 << DMA_A_INTERRUPT))
#define PCI_ERROR_INTERRUPTS ( \
(1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT) \
| (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT) \
| (1 << PCI_RETRY_ABORT_INTERRUPT))
static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
{
struct net2280_ep *ep;
u32 tmp, num, mask, scratch;
/* after disconnect there's nothing else to do! */
tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
mask = (1 << HIGH_SPEED) | (1 << FULL_SPEED);
/* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
* Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
* both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
* only indicates a change in the reset state).
*/
if (stat & tmp) {
writel (tmp, &dev->regs->irqstat1);
if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT))
&& ((readl (&dev->usb->usbstat) & mask)
== 0))
|| ((readl (&dev->usb->usbctl)
& (1 << VBUS_PIN)) == 0)
) && ( dev->gadget.speed != USB_SPEED_UNKNOWN)) {
DEBUG (dev, "disconnect %s\n",
dev->driver->driver.name);
stop_activity (dev, dev->driver);
ep0_start (dev);
return;
}
stat &= ~tmp;
/* vBUS can bounce ... one of many reasons to ignore the
* notion of hotplug events on bus connect/disconnect!
*/
if (!stat)
return;
}
/* NOTE: chip stays in PCI D0 state for now, but it could
* enter D1 to save more power
*/
tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
if (stat & tmp) {
writel (tmp, &dev->regs->irqstat1);
if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
if (dev->driver->suspend)
dev->driver->suspend (&dev->gadget);
if (!enable_suspend)
stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
} else {
if (dev->driver->resume)
dev->driver->resume (&dev->gadget);
/* at high speed, note erratum 0133 */
}
stat &= ~tmp;
}
/* clear any other status/irqs */
if (stat)
writel (stat, &dev->regs->irqstat1);
/* some status we can just ignore */
if (dev->pdev->device == 0x2280)
stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
| (1 << SUSPEND_REQUEST_INTERRUPT)
| (1 << RESUME_INTERRUPT)
| (1 << SOF_INTERRUPT));
else
stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
| (1 << RESUME_INTERRUPT)
| (1 << SOF_DOWN_INTERRUPT)
| (1 << SOF_INTERRUPT));
if (!stat)
return;
// DEBUG (dev, "irqstat1 %08x\n", stat);
/* DMA status, for ep-{a,b,c,d} */
scratch = stat & DMA_INTERRUPTS;
stat &= ~DMA_INTERRUPTS;
scratch >>= 9;
for (num = 0; scratch; num++) {
struct net2280_dma_regs __iomem *dma;
tmp = 1 << num;
if ((tmp & scratch) == 0)
continue;
scratch ^= tmp;
ep = &dev->ep [num + 1];
dma = ep->dma;
if (!dma)
continue;
/* clear ep's dma status */
tmp = readl (&dma->dmastat);
writel (tmp, &dma->dmastat);
/* chaining should stop on abort, short OUT from fifo,
* or (stat0 codepath) short OUT transfer.
*/
if (!use_dma_chaining) {
if ((tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT))
== 0) {
DEBUG (ep->dev, "%s no xact done? %08x\n",
ep->ep.name, tmp);
continue;
}
stop_dma (ep->dma);
}
/* OUT transfers terminate when the data from the
* host is in our memory. Process whatever's done.
* On this path, we know transfer's last packet wasn't
* less than req->length. NAK_OUT_PACKETS may be set,
* or the FIFO may already be holding new packets.
*
* IN transfers can linger in the FIFO for a very
* long time ... we ignore that for now, accounting
* precisely (like PIO does) needs per-packet irqs
*/
scan_dma_completions (ep);
/* disable dma on inactive queues; else maybe restart */
if (list_empty (&ep->queue)) {
if (use_dma_chaining)
stop_dma (ep->dma);
} else {
tmp = readl (&dma->dmactl);
if (!use_dma_chaining
|| (tmp & (1 << DMA_ENABLE)) == 0)
restart_dma (ep);
else if (ep->is_in && use_dma_chaining) {
struct net2280_request *req;
__le32 dmacount;
/* the descriptor at the head of the chain
* may still have VALID_BIT clear; that's
* used to trigger changing DMA_FIFO_VALIDATE
* (affects automagic zlp writes).
*/
req = list_entry (ep->queue.next,
struct net2280_request, queue);
dmacount = req->td->dmacount;
dmacount &= cpu_to_le32 (
(1 << VALID_BIT)
| DMA_BYTE_COUNT_MASK);
if (dmacount && (dmacount & valid_bit) == 0)
restart_dma (ep);
}
}
ep->irqs++;
}
/* NOTE: there are other PCI errors we might usefully notice.
* if they appear very often, here's where to try recovering.
*/
if (stat & PCI_ERROR_INTERRUPTS) {
ERROR (dev, "pci dma error; stat %08x\n", stat);
stat &= ~PCI_ERROR_INTERRUPTS;
/* these are fatal errors, but "maybe" they won't
* happen again ...
*/
stop_activity (dev, dev->driver);
ep0_start (dev);
stat = 0;
}
if (stat)
DEBUG (dev, "unhandled irqstat1 %08x\n", stat);
}
static irqreturn_t net2280_irq (int irq, void *_dev)
{
struct net2280 *dev = _dev;
/* shared interrupt, not ours */
if (!(readl(&dev->regs->irqstat0) & (1 << INTA_ASSERTED)))
return IRQ_NONE;
spin_lock (&dev->lock);
/* handle disconnect, dma, and more */
handle_stat1_irqs (dev, readl (&dev->regs->irqstat1));
/* control requests and PIO */
handle_stat0_irqs (dev, readl (&dev->regs->irqstat0));
spin_unlock (&dev->lock);
return IRQ_HANDLED;
}
/*-------------------------------------------------------------------------*/
static void gadget_release (struct device *_dev)
{
struct net2280 *dev = dev_get_drvdata (_dev);
kfree (dev);
}
/* tear down the binding between this driver and the pci device */
static void net2280_remove (struct pci_dev *pdev)
{
struct net2280 *dev = pci_get_drvdata (pdev);
usb_del_gadget_udc(&dev->gadget);
BUG_ON(dev->driver);
/* then clean up the resources we allocated during probe() */
net2280_led_shutdown (dev);
if (dev->requests) {
int i;
for (i = 1; i < 5; i++) {
if (!dev->ep [i].dummy)
continue;
pci_pool_free (dev->requests, dev->ep [i].dummy,
dev->ep [i].td_dma);
}
pci_pool_destroy (dev->requests);
}
if (dev->got_irq)
free_irq (pdev->irq, dev);
if (dev->regs)
iounmap (dev->regs);
if (dev->region)
release_mem_region (pci_resource_start (pdev, 0),
pci_resource_len (pdev, 0));
if (dev->enabled)
pci_disable_device (pdev);
device_unregister (&dev->gadget.dev);
device_remove_file (&pdev->dev, &dev_attr_registers);
pci_set_drvdata (pdev, NULL);
INFO (dev, "unbind\n");
}
/* wrap this driver around the specified device, but
* don't respond over USB until a gadget driver binds to us.
*/
static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
{
struct net2280 *dev;
unsigned long resource, len;
void __iomem *base = NULL;
int retval, i;
/* alloc, and start init */
dev = kzalloc (sizeof *dev, GFP_KERNEL);
if (dev == NULL){
retval = -ENOMEM;
goto done;
}
pci_set_drvdata (pdev, dev);
spin_lock_init (&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &net2280_ops;
dev->gadget.max_speed = USB_SPEED_HIGH;
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&dev->gadget.dev, "gadget");
dev->gadget.dev.parent = &pdev->dev;
dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
dev->gadget.dev.release = gadget_release;
dev->gadget.name = driver_name;
/* now all the pci goodies ... */
if (pci_enable_device (pdev) < 0) {
retval = -ENODEV;
goto done;
}
dev->enabled = 1;
/* BAR 0 holds all the registers
* BAR 1 is 8051 memory; unused here (note erratum 0103)
* BAR 2 is fifo memory; unused here
*/
resource = pci_resource_start (pdev, 0);
len = pci_resource_len (pdev, 0);
if (!request_mem_region (resource, len, driver_name)) {
DEBUG (dev, "controller already in use\n");
retval = -EBUSY;
goto done;
}
dev->region = 1;
/* FIXME provide firmware download interface to put
* 8051 code into the chip, e.g. to turn on PCI PM.
*/
base = ioremap_nocache (resource, len);
if (base == NULL) {
DEBUG (dev, "can't map memory\n");
retval = -EFAULT;
goto done;
}
dev->regs = (struct net2280_regs __iomem *) base;
dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
/* put into initial config, link up all endpoints */
writel (0, &dev->usb->usbctl);
usb_reset (dev);
usb_reinit (dev);
/* irq setup after old hardware is cleaned up */
if (!pdev->irq) {
ERROR (dev, "No IRQ. Check PCI setup!\n");
retval = -ENODEV;
goto done;
}
if (request_irq (pdev->irq, net2280_irq, IRQF_SHARED, driver_name, dev)
!= 0) {
ERROR (dev, "request interrupt %d failed\n", pdev->irq);
retval = -EBUSY;
goto done;
}
dev->got_irq = 1;
/* DMA setup */
/* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
dev->requests = pci_pool_create ("requests", pdev,
sizeof (struct net2280_dma),
0 /* no alignment requirements */,
0 /* or page-crossing issues */);
if (!dev->requests) {
DEBUG (dev, "can't get request pool\n");
retval = -ENOMEM;
goto done;
}
for (i = 1; i < 5; i++) {
struct net2280_dma *td;
td = pci_pool_alloc (dev->requests, GFP_KERNEL,
&dev->ep [i].td_dma);
if (!td) {
DEBUG (dev, "can't get dummy %d\n", i);
retval = -ENOMEM;
goto done;
}
td->dmacount = 0; /* not VALID */
td->dmaaddr = cpu_to_le32 (DMA_ADDR_INVALID);
td->dmadesc = td->dmaaddr;
dev->ep [i].dummy = td;
}
/* enable lower-overhead pci memory bursts during DMA */
writel ( (1 << DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE)
// 256 write retries may not be enough...
// | (1 << PCI_RETRY_ABORT_ENABLE)
| (1 << DMA_READ_MULTIPLE_ENABLE)
| (1 << DMA_READ_LINE_ENABLE)
, &dev->pci->pcimstctl);
/* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
pci_set_master (pdev);
pci_try_set_mwi (pdev);
/* ... also flushes any posted pci writes */
dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff;
/* done */
INFO (dev, "%s\n", driver_desc);
INFO (dev, "irq %d, pci mem %p, chip rev %04x\n",
pdev->irq, base, dev->chiprev);
INFO (dev, "version: " DRIVER_VERSION "; dma %s\n",
use_dma
? (use_dma_chaining ? "chaining" : "enabled")
: "disabled");
retval = device_register (&dev->gadget.dev);
if (retval) goto done;
retval = device_create_file (&pdev->dev, &dev_attr_registers);
if (retval) goto done;
retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
if (retval)
goto done;
return 0;
done:
if (dev)
net2280_remove (pdev);
return retval;
}
/* make sure the board is quiescent; otherwise it will continue
* generating IRQs across the upcoming reboot.
*/
static void net2280_shutdown (struct pci_dev *pdev)
{
struct net2280 *dev = pci_get_drvdata (pdev);
/* disable IRQs */
writel (0, &dev->regs->pciirqenb0);
writel (0, &dev->regs->pciirqenb1);
/* disable the pullup so the host will think we're gone */
writel (0, &dev->usb->usbctl);
}
/*-------------------------------------------------------------------------*/
static const struct pci_device_id pci_ids [] = { {
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.class_mask = ~0,
.vendor = 0x17cc,
.device = 0x2280,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
}, {
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.class_mask = ~0,
.vendor = 0x17cc,
.device = 0x2282,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE (pci, pci_ids);
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver net2280_pci_driver = {
.name = (char *) driver_name,
.id_table = pci_ids,
.probe = net2280_probe,
.remove = net2280_remove,
.shutdown = net2280_shutdown,
/* FIXME add power management support */
};
MODULE_DESCRIPTION (DRIVER_DESC);
MODULE_AUTHOR ("David Brownell");
MODULE_LICENSE ("GPL");
static int __init init (void)
{
if (!use_dma)
use_dma_chaining = 0;
return pci_register_driver (&net2280_pci_driver);
}
module_init (init);
static void __exit cleanup (void)
{
pci_unregister_driver (&net2280_pci_driver);
}
module_exit (cleanup);
| gpl-2.0 |
cdesjardins/DTS-Eagle-Integration_CAF-Android-kernel | drivers/media/rc/keymaps/rc-lme2510.c | 8819 | 2756 | /* LME2510 remote control
*
*
* Copyright (C) 2010 Malcolm Priestley (tvboxspy@gmail.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table lme2510_rc[] = {
/* Type 1 - 26 buttons */
{ 0x10ed45, KEY_0 },
{ 0x10ed5f, KEY_1 },
{ 0x10ed50, KEY_2 },
{ 0x10ed5d, KEY_3 },
{ 0x10ed41, KEY_4 },
{ 0x10ed0a, KEY_5 },
{ 0x10ed42, KEY_6 },
{ 0x10ed47, KEY_7 },
{ 0x10ed49, KEY_8 },
{ 0x10ed05, KEY_9 },
{ 0x10ed43, KEY_POWER },
{ 0x10ed46, KEY_SUBTITLE },
{ 0x10ed06, KEY_PAUSE },
{ 0x10ed03, KEY_MEDIA_REPEAT},
{ 0x10ed02, KEY_PAUSE },
{ 0x10ed5e, KEY_VOLUMEUP },
{ 0x10ed5c, KEY_VOLUMEDOWN },
{ 0x10ed09, KEY_CHANNELUP },
{ 0x10ed1a, KEY_CHANNELDOWN },
{ 0x10ed1e, KEY_PLAY },
{ 0x10ed1b, KEY_ZOOM },
{ 0x10ed59, KEY_MUTE },
{ 0x10ed5a, KEY_TV },
{ 0x10ed18, KEY_RECORD },
{ 0x10ed07, KEY_EPG },
{ 0x10ed01, KEY_STOP },
/* Type 2 - 20 buttons */
{ 0xbf15, KEY_0 },
{ 0xbf08, KEY_1 },
{ 0xbf09, KEY_2 },
{ 0xbf0a, KEY_3 },
{ 0xbf0c, KEY_4 },
{ 0xbf0d, KEY_5 },
{ 0xbf0e, KEY_6 },
{ 0xbf10, KEY_7 },
{ 0xbf11, KEY_8 },
{ 0xbf12, KEY_9 },
{ 0xbf00, KEY_POWER },
{ 0xbf04, KEY_MEDIA_REPEAT}, /* Recall */
{ 0xbf1a, KEY_PAUSE }, /* Timeshift */
{ 0xbf02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
{ 0xbf06, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
{ 0xbf01, KEY_CHANNELUP },
{ 0xbf05, KEY_CHANNELDOWN },
{ 0xbf14, KEY_ZOOM },
{ 0xbf18, KEY_RECORD },
{ 0xbf16, KEY_STOP },
/* Type 3 - 20 buttons */
{ 0x1c, KEY_0 },
{ 0x07, KEY_1 },
{ 0x15, KEY_2 },
{ 0x09, KEY_3 },
{ 0x16, KEY_4 },
{ 0x19, KEY_5 },
{ 0x0d, KEY_6 },
{ 0x0c, KEY_7 },
{ 0x18, KEY_8 },
{ 0x5e, KEY_9 },
{ 0x45, KEY_POWER },
{ 0x44, KEY_MEDIA_REPEAT}, /* Recall */
{ 0x4a, KEY_PAUSE }, /* Timeshift */
{ 0x47, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
{ 0x43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
{ 0x46, KEY_CHANNELUP },
{ 0x40, KEY_CHANNELDOWN },
{ 0x08, KEY_ZOOM },
{ 0x42, KEY_RECORD },
{ 0x5a, KEY_STOP },
};
static struct rc_map_list lme2510_map = {
.map = {
.scan = lme2510_rc,
.size = ARRAY_SIZE(lme2510_rc),
.rc_type = RC_TYPE_NEC,
.name = RC_MAP_LME2510,
}
};
static int __init init_rc_lme2510_map(void)
{
return rc_map_register(&lme2510_map);
}
static void __exit exit_rc_lme2510_map(void)
{
rc_map_unregister(&lme2510_map);
}
module_init(init_rc_lme2510_map)
module_exit(exit_rc_lme2510_map)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
| gpl-2.0 |
jpoirier/linux | drivers/media/rc/keymaps/rc-lme2510.c | 8819 | 2756 | /* LME2510 remote control
*
*
* Copyright (C) 2010 Malcolm Priestley (tvboxspy@gmail.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table lme2510_rc[] = {
/* Type 1 - 26 buttons */
{ 0x10ed45, KEY_0 },
{ 0x10ed5f, KEY_1 },
{ 0x10ed50, KEY_2 },
{ 0x10ed5d, KEY_3 },
{ 0x10ed41, KEY_4 },
{ 0x10ed0a, KEY_5 },
{ 0x10ed42, KEY_6 },
{ 0x10ed47, KEY_7 },
{ 0x10ed49, KEY_8 },
{ 0x10ed05, KEY_9 },
{ 0x10ed43, KEY_POWER },
{ 0x10ed46, KEY_SUBTITLE },
{ 0x10ed06, KEY_PAUSE },
{ 0x10ed03, KEY_MEDIA_REPEAT},
{ 0x10ed02, KEY_PAUSE },
{ 0x10ed5e, KEY_VOLUMEUP },
{ 0x10ed5c, KEY_VOLUMEDOWN },
{ 0x10ed09, KEY_CHANNELUP },
{ 0x10ed1a, KEY_CHANNELDOWN },
{ 0x10ed1e, KEY_PLAY },
{ 0x10ed1b, KEY_ZOOM },
{ 0x10ed59, KEY_MUTE },
{ 0x10ed5a, KEY_TV },
{ 0x10ed18, KEY_RECORD },
{ 0x10ed07, KEY_EPG },
{ 0x10ed01, KEY_STOP },
/* Type 2 - 20 buttons */
{ 0xbf15, KEY_0 },
{ 0xbf08, KEY_1 },
{ 0xbf09, KEY_2 },
{ 0xbf0a, KEY_3 },
{ 0xbf0c, KEY_4 },
{ 0xbf0d, KEY_5 },
{ 0xbf0e, KEY_6 },
{ 0xbf10, KEY_7 },
{ 0xbf11, KEY_8 },
{ 0xbf12, KEY_9 },
{ 0xbf00, KEY_POWER },
{ 0xbf04, KEY_MEDIA_REPEAT}, /* Recall */
{ 0xbf1a, KEY_PAUSE }, /* Timeshift */
{ 0xbf02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
{ 0xbf06, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
{ 0xbf01, KEY_CHANNELUP },
{ 0xbf05, KEY_CHANNELDOWN },
{ 0xbf14, KEY_ZOOM },
{ 0xbf18, KEY_RECORD },
{ 0xbf16, KEY_STOP },
/* Type 3 - 20 buttons */
{ 0x1c, KEY_0 },
{ 0x07, KEY_1 },
{ 0x15, KEY_2 },
{ 0x09, KEY_3 },
{ 0x16, KEY_4 },
{ 0x19, KEY_5 },
{ 0x0d, KEY_6 },
{ 0x0c, KEY_7 },
{ 0x18, KEY_8 },
{ 0x5e, KEY_9 },
{ 0x45, KEY_POWER },
{ 0x44, KEY_MEDIA_REPEAT}, /* Recall */
{ 0x4a, KEY_PAUSE }, /* Timeshift */
{ 0x47, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
{ 0x43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
{ 0x46, KEY_CHANNELUP },
{ 0x40, KEY_CHANNELDOWN },
{ 0x08, KEY_ZOOM },
{ 0x42, KEY_RECORD },
{ 0x5a, KEY_STOP },
};
static struct rc_map_list lme2510_map = {
.map = {
.scan = lme2510_rc,
.size = ARRAY_SIZE(lme2510_rc),
.rc_type = RC_TYPE_NEC,
.name = RC_MAP_LME2510,
}
};
static int __init init_rc_lme2510_map(void)
{
return rc_map_register(&lme2510_map);
}
static void __exit exit_rc_lme2510_map(void)
{
rc_map_unregister(&lme2510_map);
}
module_init(init_rc_lme2510_map)
module_exit(exit_rc_lme2510_map)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
| gpl-2.0 |
zlatinski/p-android-omap-3.4-new-ion-topic-sync-dma-buf-fence2 | drivers/s390/cio/isc.c | 14707 | 1707 | /*
* Functions for registration of I/O interruption subclasses on s390.
*
* Copyright IBM Corp. 2008
* Authors: Sebastian Ott <sebott@linux.vnet.ibm.com>
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/isc.h>
static unsigned int isc_refs[MAX_ISC + 1];
static DEFINE_SPINLOCK(isc_ref_lock);
/**
* isc_register - register an I/O interruption subclass.
* @isc: I/O interruption subclass to register
*
* The number of users for @isc is increased. If this is the first user to
* register @isc, the corresponding I/O interruption subclass mask is enabled.
*
* Context:
* This function must not be called in interrupt context.
*/
void isc_register(unsigned int isc)
{
if (isc > MAX_ISC) {
WARN_ON(1);
return;
}
spin_lock(&isc_ref_lock);
if (isc_refs[isc] == 0)
ctl_set_bit(6, 31 - isc);
isc_refs[isc]++;
spin_unlock(&isc_ref_lock);
}
EXPORT_SYMBOL_GPL(isc_register);
/**
* isc_unregister - unregister an I/O interruption subclass.
* @isc: I/O interruption subclass to unregister
*
* The number of users for @isc is decreased. If this is the last user to
* unregister @isc, the corresponding I/O interruption subclass mask is
* disabled.
* Note: This function must not be called if isc_register() hasn't been called
* before by the driver for @isc.
*
* Context:
* This function must not be called in interrupt context.
*/
void isc_unregister(unsigned int isc)
{
spin_lock(&isc_ref_lock);
/* check for misuse */
if (isc > MAX_ISC || isc_refs[isc] == 0) {
WARN_ON(1);
goto out_unlock;
}
if (isc_refs[isc] == 1)
ctl_clear_bit(6, 31 - isc);
isc_refs[isc]--;
out_unlock:
spin_unlock(&isc_ref_lock);
}
EXPORT_SYMBOL_GPL(isc_unregister);
| gpl-2.0 |
idanfima/Jetstreamkernel | drivers/s390/cio/isc.c | 14707 | 1707 | /*
* Functions for registration of I/O interruption subclasses on s390.
*
* Copyright IBM Corp. 2008
* Authors: Sebastian Ott <sebott@linux.vnet.ibm.com>
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/isc.h>
static unsigned int isc_refs[MAX_ISC + 1];
static DEFINE_SPINLOCK(isc_ref_lock);
/**
* isc_register - register an I/O interruption subclass.
* @isc: I/O interruption subclass to register
*
* The number of users for @isc is increased. If this is the first user to
* register @isc, the corresponding I/O interruption subclass mask is enabled.
*
* Context:
* This function must not be called in interrupt context.
*/
void isc_register(unsigned int isc)
{
if (isc > MAX_ISC) {
WARN_ON(1);
return;
}
spin_lock(&isc_ref_lock);
if (isc_refs[isc] == 0)
ctl_set_bit(6, 31 - isc);
isc_refs[isc]++;
spin_unlock(&isc_ref_lock);
}
EXPORT_SYMBOL_GPL(isc_register);
/**
* isc_unregister - unregister an I/O interruption subclass.
* @isc: I/O interruption subclass to unregister
*
* The number of users for @isc is decreased. If this is the last user to
* unregister @isc, the corresponding I/O interruption subclass mask is
* disabled.
* Note: This function must not be called if isc_register() hasn't been called
* before by the driver for @isc.
*
* Context:
* This function must not be called in interrupt context.
*/
void isc_unregister(unsigned int isc)
{
spin_lock(&isc_ref_lock);
/* check for misuse */
if (isc > MAX_ISC || isc_refs[isc] == 0) {
WARN_ON(1);
goto out_unlock;
}
if (isc_refs[isc] == 1)
ctl_clear_bit(6, 31 - isc);
isc_refs[isc]--;
out_unlock:
spin_unlock(&isc_ref_lock);
}
EXPORT_SYMBOL_GPL(isc_unregister);
| gpl-2.0 |
openrisc/or1k-src | opcodes/alpha-dis.c | 116 | 6426 | /* alpha-dis.c -- Disassemble Alpha AXP instructions
Copyright 1996, 1998, 1999, 2000, 2001, 2002, 2005, 2007, 2012
Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@tamu.edu>,
patterned after the PPC opcode handling written by Ian Lance Taylor.
This file is part of libopcodes.
This library is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
It is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
You should have received a copy of the GNU General Public License
along with this file; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
02110-1301, USA. */
#include "sysdep.h"
#include <stdio.h>
#include "dis-asm.h"
#include "opcode/alpha.h"
/* OSF register names. */
static const char * const osf_regnames[64] = {
"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
"t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
"a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
"t10", "t11", "ra", "t12", "at", "gp", "sp", "zero",
"$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
"$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
"$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
"$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31"
};
/* VMS register names. */
static const char * const vms_regnames[64] = {
"R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
"R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
"R16", "R17", "R18", "R19", "R20", "R21", "R22", "R23",
"R24", "AI", "RA", "PV", "AT", "FP", "SP", "RZ",
"F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7",
"F8", "F9", "F10", "F11", "F12", "F13", "F14", "F15",
"F16", "F17", "F18", "F19", "F20", "F21", "F22", "F23",
"F24", "F25", "F26", "F27", "F28", "F29", "F30", "FZ"
};
/* Disassemble Alpha instructions. */
int
print_insn_alpha (memaddr, info)
bfd_vma memaddr;
struct disassemble_info *info;
{
static const struct alpha_opcode *opcode_index[AXP_NOPS+1];
const char * const * regnames;
const struct alpha_opcode *opcode, *opcode_end;
const unsigned char *opindex;
unsigned insn, op, isa_mask;
int need_comma;
/* Initialize the majorop table the first time through */
if (!opcode_index[0])
{
opcode = alpha_opcodes;
opcode_end = opcode + alpha_num_opcodes;
for (op = 0; op < AXP_NOPS; ++op)
{
opcode_index[op] = opcode;
while (opcode < opcode_end && op == AXP_OP (opcode->opcode))
++opcode;
}
opcode_index[op] = opcode;
}
if (info->flavour == bfd_target_evax_flavour)
regnames = vms_regnames;
else
regnames = osf_regnames;
isa_mask = AXP_OPCODE_NOPAL;
switch (info->mach)
{
case bfd_mach_alpha_ev4:
isa_mask |= AXP_OPCODE_EV4;
break;
case bfd_mach_alpha_ev5:
isa_mask |= AXP_OPCODE_EV5;
break;
case bfd_mach_alpha_ev6:
isa_mask |= AXP_OPCODE_EV6;
break;
}
/* Read the insn into a host word */
{
bfd_byte buffer[4];
int status = (*info->read_memory_func) (memaddr, buffer, 4, info);
if (status != 0)
{
(*info->memory_error_func) (status, memaddr, info);
return -1;
}
insn = bfd_getl32 (buffer);
}
/* Get the major opcode of the instruction. */
op = AXP_OP (insn);
/* Find the first match in the opcode table. */
opcode_end = opcode_index[op + 1];
for (opcode = opcode_index[op]; opcode < opcode_end; ++opcode)
{
if ((insn ^ opcode->opcode) & opcode->mask)
continue;
if (!(opcode->flags & isa_mask))
continue;
/* Make two passes over the operands. First see if any of them
have extraction functions, and, if they do, make sure the
instruction is valid. */
{
int invalid = 0;
for (opindex = opcode->operands; *opindex != 0; opindex++)
{
const struct alpha_operand *operand = alpha_operands + *opindex;
if (operand->extract)
(*operand->extract) (insn, &invalid);
}
if (invalid)
continue;
}
/* The instruction is valid. */
goto found;
}
/* No instruction found */
(*info->fprintf_func) (info->stream, ".long %#08x", insn);
return 4;
found:
(*info->fprintf_func) (info->stream, "%s", opcode->name);
if (opcode->operands[0] != 0)
(*info->fprintf_func) (info->stream, "\t");
/* Now extract and print the operands. */
need_comma = 0;
for (opindex = opcode->operands; *opindex != 0; opindex++)
{
const struct alpha_operand *operand = alpha_operands + *opindex;
int value;
/* Operands that are marked FAKE are simply ignored. We
already made sure that the extract function considered
the instruction to be valid. */
if ((operand->flags & AXP_OPERAND_FAKE) != 0)
continue;
/* Extract the value from the instruction. */
if (operand->extract)
value = (*operand->extract) (insn, (int *) NULL);
else
{
value = (insn >> operand->shift) & ((1 << operand->bits) - 1);
if (operand->flags & AXP_OPERAND_SIGNED)
{
int signbit = 1 << (operand->bits - 1);
value = (value ^ signbit) - signbit;
}
}
if (need_comma &&
((operand->flags & (AXP_OPERAND_PARENS | AXP_OPERAND_COMMA))
!= AXP_OPERAND_PARENS))
{
(*info->fprintf_func) (info->stream, ",");
}
if (operand->flags & AXP_OPERAND_PARENS)
(*info->fprintf_func) (info->stream, "(");
/* Print the operand as directed by the flags. */
if (operand->flags & AXP_OPERAND_IR)
(*info->fprintf_func) (info->stream, "%s", regnames[value]);
else if (operand->flags & AXP_OPERAND_FPR)
(*info->fprintf_func) (info->stream, "%s", regnames[value + 32]);
else if (operand->flags & AXP_OPERAND_RELATIVE)
(*info->print_address_func) (memaddr + 4 + value, info);
else if (operand->flags & AXP_OPERAND_SIGNED)
(*info->fprintf_func) (info->stream, "%d", value);
else
(*info->fprintf_func) (info->stream, "%#x", value);
if (operand->flags & AXP_OPERAND_PARENS)
(*info->fprintf_func) (info->stream, ")");
need_comma = 1;
}
return 4;
}
| gpl-2.0 |
fledermaus/steamos_kernel | arch/arm/kernel/traps.c | 116 | 21154 | /*
* linux/arch/arm/kernel/traps.c
*
* Copyright (C) 1995-2009 Russell King
* Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* 'traps.c' handles hardware exceptions after we have saved some state in
* 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably
* kill the offending process.
*/
#include <linux/signal.h>
#include <linux/personality.h>
#include <linux/kallsyms.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/kexec.h>
#include <linux/bug.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/exception.h>
#include <asm/unistd.h>
#include <asm/traps.h>
#include <asm/unwind.h>
#include <asm/tls.h>
#include <asm/system_misc.h>
static const char *handler[]= {
"prefetch abort",
"data abort",
"address exception",
"interrupt",
"undefined instruction",
};
void *vectors_page;
#ifdef CONFIG_DEBUG_USER
unsigned int user_debug;
static int __init user_debug_setup(char *str)
{
get_option(&str, &user_debug);
return 1;
}
__setup("user_debug=", user_debug_setup);
#endif
static void dump_mem(const char *, const char *, unsigned long, unsigned long);
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
{
#ifdef CONFIG_KALLSYMS
printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
#else
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
#endif
if (in_exception_text(where))
dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
}
#ifndef CONFIG_ARM_UNWIND
/*
* Stack pointers should always be within the kernels view of
* physical memory. If it is not there, then we can't dump
* out any information relating to the stack.
*/
static int verify_stack(unsigned long sp)
{
if (sp < PAGE_OFFSET ||
(sp > (unsigned long)high_memory && high_memory != NULL))
return -EFAULT;
return 0;
}
#endif
/*
* Dump out the contents of some memory nicely...
*/
static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
unsigned long top)
{
unsigned long first;
mm_segment_t fs;
int i;
/*
* We need to switch to kernel mode so that we can use __get_user
* to safely read from kernel space. Note that we now dump the
* code first, just in case the backtrace kills us.
*/
fs = get_fs();
set_fs(KERNEL_DS);
printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
for (first = bottom & ~31; first < top; first += 32) {
unsigned long p;
char str[sizeof(" 12345678") * 8 + 1];
memset(str, ' ', sizeof(str));
str[sizeof(str) - 1] = '\0';
for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
if (p >= bottom && p < top) {
unsigned long val;
if (__get_user(val, (unsigned long *)p) == 0)
sprintf(str + i * 9, " %08lx", val);
else
sprintf(str + i * 9, " ????????");
}
}
printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
}
set_fs(fs);
}
static void dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs);
const int width = thumb ? 4 : 8;
mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
/*
* We need to switch to kernel mode so that we can use __get_user
* to safely read from kernel space. Note that we now dump the
* code first, just in case the backtrace kills us.
*/
fs = get_fs();
set_fs(KERNEL_DS);
for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
if (thumb)
bad = __get_user(val, &((u16 *)addr)[i]);
else
bad = __get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
width, val);
else {
p += sprintf(p, "bad PC value");
break;
}
}
printk("%sCode: %s\n", lvl, str);
set_fs(fs);
}
#ifdef CONFIG_ARM_UNWIND
static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
unwind_backtrace(regs, tsk);
}
#else
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
unsigned int fp, mode;
int ok = 1;
printk("Backtrace: ");
if (!tsk)
tsk = current;
if (regs) {
fp = regs->ARM_fp;
mode = processor_mode(regs);
} else if (tsk != current) {
fp = thread_saved_fp(tsk);
mode = 0x10;
} else {
asm("mov %0, fp" : "=r" (fp) : : "cc");
mode = 0x10;
}
if (!fp) {
printk("no frame pointer");
ok = 0;
} else if (verify_stack(fp)) {
printk("invalid frame pointer 0x%08x", fp);
ok = 0;
} else if (fp < (unsigned long)end_of_stack(tsk))
printk("frame pointer underflow");
printk("\n");
if (ok)
c_backtrace(fp, mode);
}
#endif
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
dump_backtrace(NULL, tsk);
barrier();
}
#ifdef CONFIG_PREEMPT
#define S_PREEMPT " PREEMPT"
#else
#define S_PREEMPT ""
#endif
#ifdef CONFIG_SMP
#define S_SMP " SMP"
#else
#define S_SMP ""
#endif
#ifdef CONFIG_THUMB2_KERNEL
#define S_ISA " THUMB2"
#else
#define S_ISA " ARM"
#endif
static int __die(const char *str, int err, struct pt_regs *regs)
{
struct task_struct *tsk = current;
static int die_counter;
int ret;
printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP
S_ISA "\n", str, err, ++die_counter);
/* trap and error numbers are mostly meaningless on ARM */
ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
if (ret == NOTIFY_STOP)
return 1;
print_modules();
__show_regs(regs);
printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
THREAD_SIZE + (unsigned long)task_stack_page(tsk));
dump_backtrace(regs, tsk);
dump_instr(KERN_EMERG, regs);
}
return 0;
}
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static int die_owner = -1;
static unsigned int die_nest_count;
static unsigned long oops_begin(void)
{
int cpu;
unsigned long flags;
oops_enter();
/* racy, but better than risking deadlock. */
raw_local_irq_save(flags);
cpu = smp_processor_id();
if (!arch_spin_trylock(&die_lock)) {
if (cpu == die_owner)
/* nested oops. should stop eventually */;
else
arch_spin_lock(&die_lock);
}
die_nest_count++;
die_owner = cpu;
console_verbose();
bust_spinlocks(1);
return flags;
}
static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
{
if (regs && kexec_should_crash(current))
crash_kexec(regs);
bust_spinlocks(0);
die_owner = -1;
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
die_nest_count--;
if (!die_nest_count)
/* Nest count reaches zero, release the lock. */
arch_spin_unlock(&die_lock);
raw_local_irq_restore(flags);
oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
if (signr)
do_exit(signr);
}
/*
* This function is protected against re-entrancy.
*/
void die(const char *str, struct pt_regs *regs, int err)
{
enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
unsigned long flags = oops_begin();
int sig = SIGSEGV;
if (!user_mode(regs))
bug_type = report_bug(regs->ARM_pc, regs);
if (bug_type != BUG_TRAP_TYPE_NONE)
str = "Oops - BUG";
if (__die(str, err, regs))
sig = 0;
oops_end(flags, regs, sig);
}
void arm_notify_die(const char *str, struct pt_regs *regs,
struct siginfo *info, unsigned long err, unsigned long trap)
{
if (user_mode(regs)) {
current->thread.error_code = err;
current->thread.trap_no = trap;
force_sig_info(info->si_signo, info, current);
} else {
die(str, regs, err);
}
}
#ifdef CONFIG_GENERIC_BUG
int is_valid_bugaddr(unsigned long pc)
{
#ifdef CONFIG_THUMB2_KERNEL
unsigned short bkpt;
#else
unsigned long bkpt;
#endif
if (probe_kernel_address((unsigned *)pc, bkpt))
return 0;
return bkpt == BUG_INSTR_VALUE;
}
#endif
static LIST_HEAD(undef_hook);
static DEFINE_RAW_SPINLOCK(undef_lock);
void register_undef_hook(struct undef_hook *hook)
{
unsigned long flags;
raw_spin_lock_irqsave(&undef_lock, flags);
list_add(&hook->node, &undef_hook);
raw_spin_unlock_irqrestore(&undef_lock, flags);
}
void unregister_undef_hook(struct undef_hook *hook)
{
unsigned long flags;
raw_spin_lock_irqsave(&undef_lock, flags);
list_del(&hook->node);
raw_spin_unlock_irqrestore(&undef_lock, flags);
}
static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
{
struct undef_hook *hook;
unsigned long flags;
int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
raw_spin_lock_irqsave(&undef_lock, flags);
list_for_each_entry(hook, &undef_hook, node)
if ((instr & hook->instr_mask) == hook->instr_val &&
(regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
fn = hook->fn;
raw_spin_unlock_irqrestore(&undef_lock, flags);
return fn ? fn(regs, instr) : 1;
}
asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
{
unsigned int instr;
siginfo_t info;
void __user *pc;
pc = (void __user *)instruction_pointer(regs);
if (processor_mode(regs) == SVC_MODE) {
#ifdef CONFIG_THUMB2_KERNEL
if (thumb_mode(regs)) {
instr = ((u16 *)pc)[0];
if (is_wide_instruction(instr)) {
instr <<= 16;
instr |= ((u16 *)pc)[1];
}
} else
#endif
instr = *(u32 *) pc;
} else if (thumb_mode(regs)) {
if (get_user(instr, (u16 __user *)pc))
goto die_sig;
if (is_wide_instruction(instr)) {
unsigned int instr2;
if (get_user(instr2, (u16 __user *)pc+1))
goto die_sig;
instr <<= 16;
instr |= instr2;
}
} else if (get_user(instr, (u32 __user *)pc)) {
goto die_sig;
}
if (call_undef_hook(regs, instr) == 0)
return;
die_sig:
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_UNDEFINED) {
printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
current->comm, task_pid_nr(current), pc);
dump_instr(KERN_INFO, regs);
}
#endif
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
info.si_addr = pc;
arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
}
asmlinkage void do_unexp_fiq (struct pt_regs *regs)
{
printk("Hmm. Unexpected FIQ received, but trying to continue\n");
printk("You may have a hardware problem...\n");
}
/*
* bad_mode handles the impossible case in the vectors. If you see one of
* these, then it's extremely serious, and could mean you have buggy hardware.
* It never returns, and never tries to sync. We hope that we can at least
* dump out some state information...
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason)
{
console_verbose();
printk(KERN_CRIT "Bad mode in %s handler detected\n", handler[reason]);
die("Oops - bad mode", regs, 0);
local_irq_disable();
panic("bad mode");
}
static int bad_syscall(int n, struct pt_regs *regs)
{
struct thread_info *thread = current_thread_info();
siginfo_t info;
if ((current->personality & PER_MASK) != PER_LINUX &&
thread->exec_domain->handler) {
thread->exec_domain->handler(n, regs);
return regs->ARM_r0;
}
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_SYSCALL) {
printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
task_pid_nr(current), current->comm, n);
dump_instr(KERN_ERR, regs);
}
#endif
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
info.si_addr = (void __user *)instruction_pointer(regs) -
(thumb_mode(regs) ? 2 : 4);
arm_notify_die("Oops - bad syscall", regs, &info, n, 0);
return regs->ARM_r0;
}
static inline int
do_cache_op(unsigned long start, unsigned long end, int flags)
{
struct mm_struct *mm = current->active_mm;
struct vm_area_struct *vma;
if (end < start || flags)
return -EINVAL;
down_read(&mm->mmap_sem);
vma = find_vma(mm, start);
if (vma && vma->vm_start < end) {
if (start < vma->vm_start)
start = vma->vm_start;
if (end > vma->vm_end)
end = vma->vm_end;
up_read(&mm->mmap_sem);
return flush_cache_user_range(start, end);
}
up_read(&mm->mmap_sem);
return -EINVAL;
}
/*
* Handle all unrecognised system calls.
* 0x9f0000 - 0x9fffff are some more esoteric system calls
*/
#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
asmlinkage int arm_syscall(int no, struct pt_regs *regs)
{
struct thread_info *thread = current_thread_info();
siginfo_t info;
if ((no >> 16) != (__ARM_NR_BASE>> 16))
return bad_syscall(no, regs);
switch (no & 0xffff) {
case 0: /* branch through 0 */
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = SEGV_MAPERR;
info.si_addr = NULL;
arm_notify_die("branch through zero", regs, &info, 0, 0);
return 0;
case NR(breakpoint): /* SWI BREAK_POINT */
regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
ptrace_break(current, regs);
return regs->ARM_r0;
/*
* Flush a region from virtual address 'r0' to virtual address 'r1'
* _exclusive_. There is no alignment requirement on either address;
* user space does not need to know the hardware cache layout.
*
* r2 contains flags. It should ALWAYS be passed as ZERO until it
* is defined to be something else. For now we ignore it, but may
* the fires of hell burn in your belly if you break this rule. ;)
*
* (at a later date, we may want to allow this call to not flush
* various aspects of the cache. Passing '0' will guarantee that
* everything necessary gets flushed to maintain consistency in
* the specified region).
*/
case NR(cacheflush):
return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
case NR(usr26):
if (!(elf_hwcap & HWCAP_26BIT))
break;
regs->ARM_cpsr &= ~MODE32_BIT;
return regs->ARM_r0;
case NR(usr32):
if (!(elf_hwcap & HWCAP_26BIT))
break;
regs->ARM_cpsr |= MODE32_BIT;
return regs->ARM_r0;
case NR(set_tls):
thread->tp_value = regs->ARM_r0;
if (tls_emu)
return 0;
if (has_tls_reg) {
asm ("mcr p15, 0, %0, c13, c0, 3"
: : "r" (regs->ARM_r0));
} else {
/*
* User space must never try to access this directly.
* Expect your app to break eventually if you do so.
* The user helper at 0xffff0fe0 must be used instead.
* (see entry-armv.S for details)
*/
*((unsigned int *)0xffff0ff0) = regs->ARM_r0;
}
return 0;
#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
/*
* Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
* Return zero in r0 if *MEM was changed or non-zero if no exchange
* happened. Also set the user C flag accordingly.
* If access permissions have to be fixed up then non-zero is
* returned and the operation has to be re-attempted.
*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
* __kuser_cmpxchg code in entry-armv.S should be aware of its
* existence. Don't ever use this from user code.
*/
case NR(cmpxchg):
for (;;) {
extern void do_DataAbort(unsigned long addr, unsigned int fsr,
struct pt_regs *regs);
unsigned long val;
unsigned long addr = regs->ARM_r2;
struct mm_struct *mm = current->mm;
pgd_t *pgd; pmd_t *pmd; pte_t *pte;
spinlock_t *ptl;
regs->ARM_cpsr &= ~PSR_C_BIT;
down_read(&mm->mmap_sem);
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, addr);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
val = *(unsigned long *)addr;
val -= regs->ARM_r0;
if (val == 0) {
*(unsigned long *)addr = regs->ARM_r1;
regs->ARM_cpsr |= PSR_C_BIT;
}
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
return val;
bad_access:
up_read(&mm->mmap_sem);
/* simulate a write access fault */
do_DataAbort(addr, 15 + (1 << 11), regs);
}
#endif
default:
/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
if not implemented, rather than raising SIGILL. This
way the calling program can gracefully determine whether
a feature is supported. */
if ((no & 0xffff) <= 0x7ff)
return -ENOSYS;
break;
}
#ifdef CONFIG_DEBUG_USER
/*
* experience shows that these seem to indicate that
* something catastrophic has happened
*/
if (user_debug & UDBG_SYSCALL) {
printk("[%d] %s: arm syscall %d\n",
task_pid_nr(current), current->comm, no);
dump_instr("", regs);
if (user_mode(regs)) {
__show_regs(regs);
c_backtrace(regs->ARM_fp, processor_mode(regs));
}
}
#endif
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
info.si_addr = (void __user *)instruction_pointer(regs) -
(thumb_mode(regs) ? 2 : 4);
arm_notify_die("Oops - bad syscall(2)", regs, &info, no, 0);
return 0;
}
#ifdef CONFIG_TLS_REG_EMUL
/*
* We might be running on an ARMv6+ processor which should have the TLS
* register but for some reason we can't use it, or maybe an SMP system
* using a pre-ARMv6 processor (there are apparently a few prototypes like
* that in existence) and therefore access to that register must be
* emulated.
*/
static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
{
int reg = (instr >> 12) & 15;
if (reg == 15)
return 1;
regs->uregs[reg] = current_thread_info()->tp_value;
regs->ARM_pc += 4;
return 0;
}
static struct undef_hook arm_mrc_hook = {
.instr_mask = 0x0fff0fff,
.instr_val = 0x0e1d0f70,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = 0,
.fn = get_tp_trap,
};
static int __init arm_mrc_hook_init(void)
{
register_undef_hook(&arm_mrc_hook);
return 0;
}
late_initcall(arm_mrc_hook_init);
#endif
void __bad_xchg(volatile void *ptr, int size)
{
printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
__builtin_return_address(0), ptr, size);
BUG();
}
EXPORT_SYMBOL(__bad_xchg);
/*
* A data abort trap was taken, but we did not handle the instruction.
* Try to abort the user program, or panic if it was the kernel.
*/
asmlinkage void
baddataabort(int code, unsigned long instr, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
siginfo_t info;
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_BADABORT) {
printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
task_pid_nr(current), current->comm, code, instr);
dump_instr(KERN_ERR, regs);
show_pte(current->mm, addr);
}
#endif
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
info.si_addr = (void __user *)addr;
arm_notify_die("unknown data abort code", regs, &info, instr, 0);
}
void __readwrite_bug(const char *fn)
{
printk("%s called, but not implemented\n", fn);
BUG();
}
EXPORT_SYMBOL(__readwrite_bug);
void __pte_error(const char *file, int line, pte_t pte)
{
printk("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
}
void __pmd_error(const char *file, int line, pmd_t pmd)
{
printk("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
}
void __pgd_error(const char *file, int line, pgd_t pgd)
{
printk("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
}
asmlinkage void __div0(void)
{
printk("Division by zero in kernel.\n");
dump_stack();
}
EXPORT_SYMBOL(__div0);
void abort(void)
{
BUG();
/* if that doesn't kill us, halt */
panic("Oops failed to kill thread");
}
EXPORT_SYMBOL(abort);
void __init trap_init(void)
{
return;
}
#ifdef CONFIG_KUSER_HELPERS
static void __init kuser_init(void *vectors)
{
extern char __kuser_helper_start[], __kuser_helper_end[];
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
/*
* vectors + 0xfe0 = __kuser_get_tls
* vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
*/
if (tls_emu || has_tls_reg)
memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
}
#else
static void __init kuser_init(void *vectors)
{
}
#endif
void __init early_trap_init(void *vectors_base)
{
unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
unsigned i;
vectors_page = vectors_base;
/*
* Poison the vectors page with an undefined instruction. This
* instruction is chosen to be undefined for both ARM and Thumb
* ISAs. The Thumb version is an undefined instruction with a
* branch back to the undefined instruction.
*/
for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
((u32 *)vectors_base)[i] = 0xe7fddef1;
/*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
kuser_init(vectors_base);
flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}
| gpl-2.0 |
yseras/SGS4-3.13 | arch/arm/mach-tegra/pmc.c | 372 | 9647 | /*
* Copyright (C) 2012,2013 NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/tegra-powergate.h>
#include "flowctrl.h"
#include "fuse.h"
#include "pm.h"
#include "pmc.h"
#include "sleep.h"
#define TEGRA_POWER_SYSCLK_POLARITY (1 << 10) /* sys clk polarity */
#define TEGRA_POWER_SYSCLK_OE (1 << 11) /* system clock enable */
#define TEGRA_POWER_EFFECT_LP0 (1 << 14) /* LP0 when CPU pwr gated */
#define TEGRA_POWER_CPU_PWRREQ_POLARITY (1 << 15) /* CPU pwr req polarity */
#define TEGRA_POWER_CPU_PWRREQ_OE (1 << 16) /* CPU pwr req enable */
#define PMC_CTRL 0x0
#define PMC_CTRL_INTR_LOW (1 << 17)
#define PMC_PWRGATE_TOGGLE 0x30
#define PMC_PWRGATE_TOGGLE_START (1 << 8)
#define PMC_REMOVE_CLAMPING 0x34
#define PMC_PWRGATE_STATUS 0x38
#define PMC_CPUPWRGOOD_TIMER 0xc8
#define PMC_CPUPWROFF_TIMER 0xcc
static u8 tegra_cpu_domains[] = {
0xFF, /* not available for CPU0 */
TEGRA_POWERGATE_CPU1,
TEGRA_POWERGATE_CPU2,
TEGRA_POWERGATE_CPU3,
};
static DEFINE_SPINLOCK(tegra_powergate_lock);
static void __iomem *tegra_pmc_base;
static bool tegra_pmc_invert_interrupt;
static struct clk *tegra_pclk;
struct pmc_pm_data {
u32 cpu_good_time; /* CPU power good time in uS */
u32 cpu_off_time; /* CPU power off time in uS */
u32 core_osc_time; /* Core power good osc time in uS */
u32 core_pmu_time; /* Core power good pmu time in uS */
u32 core_off_time; /* Core power off time in uS */
bool corereq_high; /* Core power request active-high */
bool sysclkreq_high; /* System clock request active-high */
bool combined_req; /* Combined pwr req for CPU & Core */
bool cpu_pwr_good_en; /* CPU power good signal is enabled */
u32 lp0_vec_phy_addr; /* The phy addr of LP0 warm boot code */
u32 lp0_vec_size; /* The size of LP0 warm boot code */
enum tegra_suspend_mode suspend_mode;
};
static struct pmc_pm_data pmc_pm_data;
static inline u32 tegra_pmc_readl(u32 reg)
{
return readl(tegra_pmc_base + reg);
}
static inline void tegra_pmc_writel(u32 val, u32 reg)
{
writel(val, tegra_pmc_base + reg);
}
static int tegra_pmc_get_cpu_powerdomain_id(int cpuid)
{
if (cpuid <= 0 || cpuid >= num_possible_cpus())
return -EINVAL;
return tegra_cpu_domains[cpuid];
}
static bool tegra_pmc_powergate_is_powered(int id)
{
return (tegra_pmc_readl(PMC_PWRGATE_STATUS) >> id) & 1;
}
static int tegra_pmc_powergate_set(int id, bool new_state)
{
bool old_state;
unsigned long flags;
spin_lock_irqsave(&tegra_powergate_lock, flags);
old_state = tegra_pmc_powergate_is_powered(id);
WARN_ON(old_state == new_state);
tegra_pmc_writel(PMC_PWRGATE_TOGGLE_START | id, PMC_PWRGATE_TOGGLE);
spin_unlock_irqrestore(&tegra_powergate_lock, flags);
return 0;
}
static int tegra_pmc_powergate_remove_clamping(int id)
{
u32 mask;
/*
* Tegra has a bug where PCIE and VDE clamping masks are
* swapped relatively to the partition ids.
*/
if (id == TEGRA_POWERGATE_VDEC)
mask = (1 << TEGRA_POWERGATE_PCIE);
else if (id == TEGRA_POWERGATE_PCIE)
mask = (1 << TEGRA_POWERGATE_VDEC);
else
mask = (1 << id);
tegra_pmc_writel(mask, PMC_REMOVE_CLAMPING);
return 0;
}
bool tegra_pmc_cpu_is_powered(int cpuid)
{
int id;
id = tegra_pmc_get_cpu_powerdomain_id(cpuid);
if (id < 0)
return false;
return tegra_pmc_powergate_is_powered(id);
}
int tegra_pmc_cpu_power_on(int cpuid)
{
int id;
id = tegra_pmc_get_cpu_powerdomain_id(cpuid);
if (id < 0)
return id;
return tegra_pmc_powergate_set(id, true);
}
int tegra_pmc_cpu_remove_clamping(int cpuid)
{
int id;
id = tegra_pmc_get_cpu_powerdomain_id(cpuid);
if (id < 0)
return id;
return tegra_pmc_powergate_remove_clamping(id);
}
void tegra_pmc_restart(enum reboot_mode mode, const char *cmd)
{
u32 val;
val = tegra_pmc_readl(0);
val |= 0x10;
tegra_pmc_writel(val, 0);
}
#ifdef CONFIG_PM_SLEEP
static void set_power_timers(u32 us_on, u32 us_off, unsigned long rate)
{
unsigned long long ticks;
unsigned long long pclk;
static unsigned long tegra_last_pclk;
if (WARN_ON_ONCE(rate <= 0))
pclk = 100000000;
else
pclk = rate;
if ((rate != tegra_last_pclk)) {
ticks = (us_on * pclk) + 999999ull;
do_div(ticks, 1000000);
tegra_pmc_writel((unsigned long)ticks, PMC_CPUPWRGOOD_TIMER);
ticks = (us_off * pclk) + 999999ull;
do_div(ticks, 1000000);
tegra_pmc_writel((unsigned long)ticks, PMC_CPUPWROFF_TIMER);
wmb();
}
tegra_last_pclk = pclk;
}
enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void)
{
return pmc_pm_data.suspend_mode;
}
void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode)
{
if (mode < TEGRA_SUSPEND_NONE || mode >= TEGRA_MAX_SUSPEND_MODE)
return;
pmc_pm_data.suspend_mode = mode;
}
void tegra_pmc_suspend(void)
{
tegra_pmc_writel(virt_to_phys(tegra_resume), PMC_SCRATCH41);
}
void tegra_pmc_resume(void)
{
tegra_pmc_writel(0x0, PMC_SCRATCH41);
}
void tegra_pmc_pm_set(enum tegra_suspend_mode mode)
{
u32 reg, csr_reg;
unsigned long rate = 0;
reg = tegra_pmc_readl(PMC_CTRL);
reg |= TEGRA_POWER_CPU_PWRREQ_OE;
reg &= ~TEGRA_POWER_EFFECT_LP0;
switch (tegra_chip_id) {
case TEGRA20:
case TEGRA30:
break;
default:
/* Turn off CRAIL */
csr_reg = flowctrl_read_cpu_csr(0);
csr_reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
csr_reg |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
flowctrl_write_cpu_csr(0, csr_reg);
break;
}
switch (mode) {
case TEGRA_SUSPEND_LP1:
rate = 32768;
break;
case TEGRA_SUSPEND_LP2:
rate = clk_get_rate(tegra_pclk);
break;
default:
break;
}
set_power_timers(pmc_pm_data.cpu_good_time, pmc_pm_data.cpu_off_time,
rate);
tegra_pmc_writel(reg, PMC_CTRL);
}
void tegra_pmc_suspend_init(void)
{
u32 reg;
/* Always enable CPU power request */
reg = tegra_pmc_readl(PMC_CTRL);
reg |= TEGRA_POWER_CPU_PWRREQ_OE;
tegra_pmc_writel(reg, PMC_CTRL);
reg = tegra_pmc_readl(PMC_CTRL);
if (!pmc_pm_data.sysclkreq_high)
reg |= TEGRA_POWER_SYSCLK_POLARITY;
else
reg &= ~TEGRA_POWER_SYSCLK_POLARITY;
/* configure the output polarity while the request is tristated */
tegra_pmc_writel(reg, PMC_CTRL);
/* now enable the request */
reg |= TEGRA_POWER_SYSCLK_OE;
tegra_pmc_writel(reg, PMC_CTRL);
}
#endif
static const struct of_device_id matches[] __initconst = {
{ .compatible = "nvidia,tegra124-pmc" },
{ .compatible = "nvidia,tegra114-pmc" },
{ .compatible = "nvidia,tegra30-pmc" },
{ .compatible = "nvidia,tegra20-pmc" },
{ }
};
void __init tegra_pmc_init_irq(void)
{
struct device_node *np;
u32 val;
np = of_find_matching_node(NULL, matches);
BUG_ON(!np);
tegra_pmc_base = of_iomap(np, 0);
tegra_pmc_invert_interrupt = of_property_read_bool(np,
"nvidia,invert-interrupt");
val = tegra_pmc_readl(PMC_CTRL);
if (tegra_pmc_invert_interrupt)
val |= PMC_CTRL_INTR_LOW;
else
val &= ~PMC_CTRL_INTR_LOW;
tegra_pmc_writel(val, PMC_CTRL);
}
void __init tegra_pmc_init(void)
{
struct device_node *np;
u32 prop;
enum tegra_suspend_mode suspend_mode;
u32 core_good_time[2] = {0, 0};
u32 lp0_vec[2] = {0, 0};
np = of_find_matching_node(NULL, matches);
BUG_ON(!np);
tegra_pclk = of_clk_get_by_name(np, "pclk");
WARN_ON(IS_ERR(tegra_pclk));
/* Grabbing the power management configurations */
if (of_property_read_u32(np, "nvidia,suspend-mode", &prop)) {
suspend_mode = TEGRA_SUSPEND_NONE;
} else {
switch (prop) {
case 0:
suspend_mode = TEGRA_SUSPEND_LP0;
break;
case 1:
suspend_mode = TEGRA_SUSPEND_LP1;
break;
case 2:
suspend_mode = TEGRA_SUSPEND_LP2;
break;
default:
suspend_mode = TEGRA_SUSPEND_NONE;
break;
}
}
suspend_mode = tegra_pm_validate_suspend_mode(suspend_mode);
if (of_property_read_u32(np, "nvidia,cpu-pwr-good-time", &prop))
suspend_mode = TEGRA_SUSPEND_NONE;
pmc_pm_data.cpu_good_time = prop;
if (of_property_read_u32(np, "nvidia,cpu-pwr-off-time", &prop))
suspend_mode = TEGRA_SUSPEND_NONE;
pmc_pm_data.cpu_off_time = prop;
if (of_property_read_u32_array(np, "nvidia,core-pwr-good-time",
core_good_time, ARRAY_SIZE(core_good_time)))
suspend_mode = TEGRA_SUSPEND_NONE;
pmc_pm_data.core_osc_time = core_good_time[0];
pmc_pm_data.core_pmu_time = core_good_time[1];
if (of_property_read_u32(np, "nvidia,core-pwr-off-time",
&prop))
suspend_mode = TEGRA_SUSPEND_NONE;
pmc_pm_data.core_off_time = prop;
pmc_pm_data.corereq_high = of_property_read_bool(np,
"nvidia,core-power-req-active-high");
pmc_pm_data.sysclkreq_high = of_property_read_bool(np,
"nvidia,sys-clock-req-active-high");
pmc_pm_data.combined_req = of_property_read_bool(np,
"nvidia,combined-power-req");
pmc_pm_data.cpu_pwr_good_en = of_property_read_bool(np,
"nvidia,cpu-pwr-good-en");
if (of_property_read_u32_array(np, "nvidia,lp0-vec", lp0_vec,
ARRAY_SIZE(lp0_vec)))
if (suspend_mode == TEGRA_SUSPEND_LP0)
suspend_mode = TEGRA_SUSPEND_LP1;
pmc_pm_data.lp0_vec_phy_addr = lp0_vec[0];
pmc_pm_data.lp0_vec_size = lp0_vec[1];
pmc_pm_data.suspend_mode = suspend_mode;
}
| gpl-2.0 |
javelinanddart/bricked-flo | arch/arm/mach-msm/msm_bus/msm_bus_board_8064.c | 372 | 23884 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/module.h>
#include <mach/msm_bus.h>
#include <mach/msm_bus_board.h>
#include <mach/board.h>
#include <mach/rpm.h>
#include "msm_bus_core.h"
#define NMASTERS 54
#define NSLAVES 75
#define NFAB_8064 5
enum msm_bus_fabric_tiered_slave_type {
MSM_BUS_SYSTEM_TIERED_SLAVE_FAB_APPSS_0 = 1,
MSM_BUS_SYSTEM_TIERED_SLAVE_FAB_APPSS_1,
MSM_BUS_TIERED_SLAVE_SYSTEM_IMEM,
MSM_BUS_TIERED_SLAVE_MM_IMEM = 1,
MSM_BUS_MMSS_TIERED_SLAVE_FAB_APPS_0,
MSM_BUS_MMSS_TIERED_SLAVE_FAB_APPS_1,
MSM_BUS_TIERED_SLAVE_EBI1_CH0 = 1,
MSM_BUS_TIERED_SLAVE_EBI1_CH1,
MSM_BUS_TIERED_SLAVE_KMPSS_L2,
};
enum msm_bus_8064_master_ports_type {
MSM_BUS_SYSTEM_MASTER_PORT_APPSS_FAB = 0,
MSM_BUS_MASTER_PORT_SPS,
MSM_BUS_MASTER_PORT_ADM_PORT0,
MSM_BUS_MASTER_PORT_ADM_PORT1,
MSM_BUS_MASTER_PORT_LPASS_PROC,
MSM_BUS_MASTER_PORT_GSS_NAV,
MSM_BUS_MASTER_PORT_PCIE,
MSM_BUS_MASTER_PORT_RIVA,
MSM_BUS_MASTER_PORT_SATA,
MSM_BUS_MASTER_PORT_LPASS,
MSM_BUS_SYSTEM_MASTER_PORT_CPSS_FPB,
MSM_BUS_SYSTEM_MASTER_PORT_SYSTEM_FPB,
MSM_BUS_SYSTEM_MASTER_PORT_MMSS_FPB,
MSM_BUS_SYSTEM_MASTER_PORT_ADM_AHB_CI,
MSM_BUS_SYSTEM_MASTER_PORT_CRYPTO,
MSM_BUS_MASTER_PORT_MDP_PORT0 = 0,
MSM_BUS_MASTER_PORT_MDP_PORT1,
MSM_BUS_MASTER_PORT_GRAPHICS_3D_PORT0,
MSM_BUS_MASTER_PORT_ROTATOR,
MSM_BUS_MASTER_PORT_GRAPHICS_3D_PORT1,
MSM_BUS_MASTER_PORT_JPEG_DEC,
MSM_BUS_MASTER_PORT_VIDEO_CAP,
MSM_BUS_MASTER_PORT_VFE,
MSM_BUS_MASTER_PORT_VPE,
MSM_BUS_MASTER_PORT_JPEG_ENC,
MSM_BUS_MASTER_PORT_VIDEO_DEC,
MSM_BUS_MMSS_MASTER_PORT_APPS_FAB,
MSM_BUS_MASTER_PORT_VIDEO_ENC,
MSM_BUS_MASTER_PORT_KMPSS_M0 = 0,
MSM_BUS_MASTER_PORT_KMPSS_M1,
MSM_BUS_APPSS_MASTER_PORT_FAB_MMSS_0,
MSM_BUS_APPSS_MASTER_PORT_FAB_MMSS_1,
MSM_BUS_APPSS_MASTER_PORT_FAB_SYSTEM_0,
MSM_BUS_APPSS_MASTER_PORT_FAB_SYSTEM_1,
};
enum msm_bus_8064_slave_ports_type {
MSM_BUS_SLAVE_PORT_MM_IMEM = 0,
MSM_BUS_MMSS_SLAVE_PORT_APPS_FAB_0,
MSM_BUS_MMSS_SLAVE_PORT_APPS_FAB_1,
MSM_BUS_SLAVE_PORT_EBI1_CH0 = 0,
MSM_BUS_SLAVE_PORT_EBI1_CH1,
MSM_BUS_SLAVE_PORT_KMPSS_L2,
MSM_BUS_APPSS_SLAVE_PORT_MMSS_FAB,
MSM_BUS_SLAVE_PORT_SYSTEM_FAB,
MSM_BUS_SYSTEM_SLAVE_PORT_APPSS_FAB_0 = 0,
MSM_BUS_SYSTEM_SLAVE_PORT_APPSS_FAB_1,
MSM_BUS_SLAVE_PORT_SPS,
MSM_BUS_SLAVE_PORT_SYSTEM_IMEM,
MSM_BUS_SLAVE_PORT_CORESIGHT,
MSM_BUS_SLAVE_PORT_PCIE,
MSM_BUS_SLAVE_PORT_KMPSS,
MSM_BUS_SLAVE_PORT_GSS,
MSM_BUS_SLAVE_PORT_LPASS,
MSM_BUS_SYSTEM_SLAVE_PORT_CPSS_FPB,
MSM_BUS_SYSTEM_SLAVE_PORT_SYSTEM_FPB,
MSM_BUS_SYSTEM_SLAVE_PORT_MMSS_FPB,
MSM_BUS_SLAVE_PORT_RIVA,
MSM_BUS_SLAVE_PORT_SATA,
MSM_BUS_SLAVE_PORT_CRYPTO,
};
static int tier2[] = {MSM_BUS_BW_TIER2,};
static uint32_t master_iids[NMASTERS];
static uint32_t slave_iids[NSLAVES];
static int mport_kmpss_m0[] = {MSM_BUS_MASTER_PORT_KMPSS_M0,};
static int mport_kmpss_m1[] = {MSM_BUS_MASTER_PORT_KMPSS_M1,};
static int mmss_mport_apps_fab[] = {MSM_BUS_MMSS_MASTER_PORT_APPS_FAB,};
static int system_mport_appss_fab[] = {MSM_BUS_SYSTEM_MASTER_PORT_APPSS_FAB,};
static int sport_ebi1_ch0[] = {
MSM_BUS_SLAVE_PORT_EBI1_CH0,
MSM_BUS_SLAVE_PORT_EBI1_CH1,
};
static int sport_ebi1_ch1[] = {MSM_BUS_SLAVE_PORT_EBI1_CH1,};
static int sport_kmpss_l2[] = {MSM_BUS_SLAVE_PORT_KMPSS_L2,};
static int appss_sport_mmss_fab[] = {MSM_BUS_APPSS_SLAVE_PORT_MMSS_FAB,};
static int sport_system_fab[] = {MSM_BUS_SLAVE_PORT_SYSTEM_FAB,};
static int tiered_slave_ebi1_ch0[] = {
MSM_BUS_TIERED_SLAVE_EBI1_CH0,
MSM_BUS_TIERED_SLAVE_EBI1_CH1,
};
static int tiered_slave_ebi1_ch1[] = {MSM_BUS_TIERED_SLAVE_EBI1_CH1,};
static int tiered_slave_kmpss[] = {MSM_BUS_TIERED_SLAVE_KMPSS_L2,};
static struct msm_bus_node_info apps_fabric_info[] = {
{
.id = MSM_BUS_MASTER_AMPSS_M0,
.masterp = mport_kmpss_m0,
.num_mports = ARRAY_SIZE(mport_kmpss_m0),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_AMPSS_M1,
.masterp = mport_kmpss_m1,
.num_mports = ARRAY_SIZE(mport_kmpss_m1),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_SLAVE_EBI_CH0,
.slavep = sport_ebi1_ch0,
.num_sports = ARRAY_SIZE(sport_ebi1_ch0),
.tier = tiered_slave_ebi1_ch0,
.num_tiers = ARRAY_SIZE(tiered_slave_ebi1_ch0),
.buswidth = 8,
.slaveclk[DUAL_CTX] = "mem_clk",
.slaveclk[ACTIVE_CTX] = "mem_a_clk",
},
{
.id = MSM_BUS_SLAVE_EBI_CH1,
.slavep = sport_ebi1_ch1,
.num_sports = ARRAY_SIZE(sport_ebi1_ch1),
.tier = tiered_slave_ebi1_ch1,
.num_tiers = ARRAY_SIZE(tiered_slave_ebi1_ch1),
.buswidth = 8,
.slaveclk[DUAL_CTX] = "mem_clk",
.slaveclk[ACTIVE_CTX] = "mem_a_clk",
},
{
.id = MSM_BUS_SLAVE_AMPSS_L2,
.slavep = sport_kmpss_l2,
.num_sports = ARRAY_SIZE(sport_kmpss_l2),
.tier = tiered_slave_kmpss,
.num_tiers = ARRAY_SIZE(tiered_slave_kmpss),
.buswidth = 8,
},
{
.id = MSM_BUS_FAB_MMSS,
.gateway = 1,
.slavep = appss_sport_mmss_fab,
.num_sports = ARRAY_SIZE(appss_sport_mmss_fab),
.masterp = mmss_mport_apps_fab,
.num_mports = ARRAY_SIZE(mmss_mport_apps_fab),
.buswidth = 8,
},
{
.id = MSM_BUS_FAB_SYSTEM,
.gateway = 1,
.slavep = sport_system_fab,
.num_sports = ARRAY_SIZE(sport_system_fab),
.masterp = system_mport_appss_fab,
.num_mports = ARRAY_SIZE(system_mport_appss_fab),
.buswidth = 8,
},
};
static int mport_sps[] = {MSM_BUS_MASTER_PORT_SPS,};
static int mport_adm_port0[] = {MSM_BUS_MASTER_PORT_ADM_PORT0,};
static int mport_adm_port1[] = {MSM_BUS_MASTER_PORT_ADM_PORT1,};
static int mport_gss_nav[] = {MSM_BUS_MASTER_PORT_GSS_NAV,};
static int mport_pcie[] = {MSM_BUS_MASTER_PORT_PCIE,};
static int mport_lpass_proc[] = {MSM_BUS_MASTER_PORT_LPASS_PROC,};
static int mport_sata[] = {MSM_BUS_MASTER_PORT_SATA,};
static int mport_riva[] = {MSM_BUS_MASTER_PORT_RIVA,};
static int mport_crypto[] = {MSM_BUS_SYSTEM_MASTER_PORT_CRYPTO,};
static int mport_lpass[] = {MSM_BUS_MASTER_PORT_LPASS,};
static int system_mport_mmss_fpb[] = {MSM_BUS_SYSTEM_MASTER_PORT_MMSS_FPB,};
static int system_mport_adm_ahb_ci[] = {MSM_BUS_SYSTEM_MASTER_PORT_ADM_AHB_CI,};
static int appss_mport_fab_system[] = {
MSM_BUS_APPSS_MASTER_PORT_FAB_SYSTEM_0,
MSM_BUS_APPSS_MASTER_PORT_FAB_SYSTEM_1
};
static int mport_system_fpb[] = {MSM_BUS_SYSTEM_MASTER_PORT_SYSTEM_FPB,};
static int system_mport_cpss_fpb[] = {MSM_BUS_SYSTEM_MASTER_PORT_CPSS_FPB,};
static int system_sport_appss_fab[] = {
MSM_BUS_SYSTEM_SLAVE_PORT_APPSS_FAB_0,
MSM_BUS_SYSTEM_SLAVE_PORT_APPSS_FAB_1
};
static int system_sport_system_fpb[] = {MSM_BUS_SYSTEM_SLAVE_PORT_SYSTEM_FPB,};
static int system_sport_cpss_fpb[] = {MSM_BUS_SYSTEM_SLAVE_PORT_CPSS_FPB,};
static int sport_sps[] = {MSM_BUS_SLAVE_PORT_SPS,};
static int sport_system_imem[] = {MSM_BUS_SLAVE_PORT_SYSTEM_IMEM,};
static int sport_coresight[] = {MSM_BUS_SLAVE_PORT_CORESIGHT,};
static int sport_crypto[] = {MSM_BUS_SLAVE_PORT_CRYPTO,};
static int sport_riva[] = {MSM_BUS_SLAVE_PORT_RIVA,};
static int sport_sata[] = {MSM_BUS_SLAVE_PORT_SATA,};
static int sport_kmpss[] = {MSM_BUS_SLAVE_PORT_KMPSS,};
static int sport_gss[] = {MSM_BUS_SLAVE_PORT_GSS,};
static int sport_lpass[] = {MSM_BUS_SLAVE_PORT_LPASS,};
static int sport_mmss_fpb[] = {MSM_BUS_SYSTEM_SLAVE_PORT_MMSS_FPB,};
static int tiered_slave_system_imem[] = {MSM_BUS_TIERED_SLAVE_SYSTEM_IMEM,};
static int system_tiered_slave_fab_appss[] = {
MSM_BUS_SYSTEM_TIERED_SLAVE_FAB_APPSS_0,
MSM_BUS_SYSTEM_TIERED_SLAVE_FAB_APPSS_1,
};
static struct msm_bus_node_info system_fabric_info[] = {
{
.id = MSM_BUS_MASTER_SPS,
.masterp = mport_sps,
.num_mports = ARRAY_SIZE(mport_sps),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_ADM_PORT0,
.masterp = mport_adm_port0,
.num_mports = ARRAY_SIZE(mport_adm_port0),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_ADM_PORT1,
.masterp = mport_adm_port1,
.num_mports = ARRAY_SIZE(mport_adm_port1),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_LPASS_PROC,
.masterp = mport_lpass_proc,
.num_mports = ARRAY_SIZE(mport_lpass_proc),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_GSS_NAV,
.masterp = mport_gss_nav,
.num_mports = ARRAY_SIZE(mport_gss_nav),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_PCIE,
.masterp = mport_pcie,
.num_mports = ARRAY_SIZE(mport_pcie),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_RIVA,
.masterp = mport_riva,
.num_mports = ARRAY_SIZE(mport_riva),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_SATA,
.masterp = mport_sata,
.num_mports = ARRAY_SIZE(mport_sata),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_CRYPTO,
.masterp = mport_crypto,
.num_mports = ARRAY_SIZE(mport_crypto),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_LPASS,
.masterp = mport_lpass,
.num_mports = ARRAY_SIZE(mport_lpass),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_SYSTEM_MASTER_MMSS_FPB,
.masterp = system_mport_mmss_fpb,
.num_mports = ARRAY_SIZE(system_mport_mmss_fpb),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_ADM0_CI,
.masterp = system_mport_adm_ahb_ci,
.num_mports = ARRAY_SIZE(system_mport_adm_ahb_ci),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_FAB_APPSS,
.gateway = 1,
.slavep = system_sport_appss_fab,
.num_sports = ARRAY_SIZE(system_sport_appss_fab),
.masterp = appss_mport_fab_system,
.num_mports = ARRAY_SIZE(appss_mport_fab_system),
.tier = system_tiered_slave_fab_appss,
.num_tiers = ARRAY_SIZE(system_tiered_slave_fab_appss),
.buswidth = 8,
},
{
.id = MSM_BUS_FAB_SYSTEM_FPB,
.gateway = 1,
.slavep = system_sport_system_fpb,
.num_sports = ARRAY_SIZE(system_sport_system_fpb),
.masterp = mport_system_fpb,
.num_mports = ARRAY_SIZE(mport_system_fpb),
.buswidth = 4,
},
{
.id = MSM_BUS_FAB_CPSS_FPB,
.gateway = 1,
.slavep = system_sport_cpss_fpb,
.num_sports = ARRAY_SIZE(system_sport_cpss_fpb),
.masterp = system_mport_cpss_fpb,
.num_mports = ARRAY_SIZE(system_mport_cpss_fpb),
.buswidth = 4,
},
{
.id = MSM_BUS_SLAVE_SPS,
.slavep = sport_sps,
.num_sports = ARRAY_SIZE(sport_sps),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
.slaveclk[DUAL_CTX] = "dfab_clk",
.slaveclk[ACTIVE_CTX] = "dfab_a_clk",
},
{
.id = MSM_BUS_SLAVE_SYSTEM_IMEM,
.slavep = sport_system_imem,
.num_sports = ARRAY_SIZE(sport_system_imem),
.tier = tiered_slave_system_imem,
.num_tiers = ARRAY_SIZE(tiered_slave_system_imem),
.buswidth = 8,
},
{
.id = MSM_BUS_SLAVE_CORESIGHT,
.slavep = sport_coresight,
.num_sports = ARRAY_SIZE(sport_coresight),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
},
{
.id = MSM_BUS_SLAVE_CRYPTO,
.slavep = sport_crypto,
.num_sports = ARRAY_SIZE(sport_crypto),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
},
{
.id = MSM_BUS_SLAVE_RIVA,
.slavep = sport_riva,
.num_sports = ARRAY_SIZE(sport_riva),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
},
{
.id = MSM_BUS_SLAVE_SATA,
.slavep = sport_sata,
.num_sports = ARRAY_SIZE(sport_sata),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
},
{
.id = MSM_BUS_SLAVE_AMPSS,
.slavep = sport_kmpss,
.num_sports = ARRAY_SIZE(sport_kmpss),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
},
{
.id = MSM_BUS_SLAVE_GSS,
.slavep = sport_gss,
.num_sports = ARRAY_SIZE(sport_gss),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
},
{
.id = MSM_BUS_SLAVE_LPASS,
.slavep = sport_lpass,
.num_sports = ARRAY_SIZE(sport_lpass),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
},
{
.id = MSM_BUS_SYSTEM_SLAVE_MMSS_FPB,
.slavep = sport_mmss_fpb,
.num_sports = ARRAY_SIZE(sport_mmss_fpb),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
.buswidth = 8,
},
};
static int mport_mdp[] = {
MSM_BUS_MASTER_PORT_MDP_PORT0,
MSM_BUS_MASTER_PORT_MDP_PORT1,
};
static int mport_mdp1[] = {MSM_BUS_MASTER_PORT_MDP_PORT1,};
static int mport_rotator[] = {MSM_BUS_MASTER_PORT_ROTATOR,};
static int mport_graphics_3d_port0[] = {MSM_BUS_MASTER_PORT_GRAPHICS_3D_PORT0,};
static int mport_graphics_3d_port1[] = {MSM_BUS_MASTER_PORT_GRAPHICS_3D_PORT1,};
static int mport_jpeg_dec[] = {MSM_BUS_MASTER_PORT_JPEG_DEC,};
static int mport_video_cap[] = {MSM_BUS_MASTER_PORT_VIDEO_CAP,};
static int mport_vfe[] = {MSM_BUS_MASTER_PORT_VFE,};
static int mport_vpe[] = {MSM_BUS_MASTER_PORT_VPE,};
static int mport_jpeg_enc[] = {MSM_BUS_MASTER_PORT_JPEG_ENC,};
static int mport_video_enc[] = {MSM_BUS_MASTER_PORT_VIDEO_ENC,};
static int mport_video_dec[] = {MSM_BUS_MASTER_PORT_VIDEO_DEC,};
static int appss_mport_fab_mmss[] = {
MSM_BUS_APPSS_MASTER_PORT_FAB_MMSS_0,
MSM_BUS_APPSS_MASTER_PORT_FAB_MMSS_1
};
static int mmss_sport_apps_fab[] = {
MSM_BUS_MMSS_SLAVE_PORT_APPS_FAB_0,
MSM_BUS_MMSS_SLAVE_PORT_APPS_FAB_1
};
static int sport_mm_imem[] = {MSM_BUS_SLAVE_PORT_MM_IMEM,};
static int mmss_tiered_slave_fab_apps[] = {
MSM_BUS_MMSS_TIERED_SLAVE_FAB_APPS_0,
MSM_BUS_MMSS_TIERED_SLAVE_FAB_APPS_1,
};
static int tiered_slave_mm_imem[] = {MSM_BUS_TIERED_SLAVE_MM_IMEM,};
static struct msm_bus_node_info mmss_fabric_info[] = {
{
.id = MSM_BUS_MASTER_MDP_PORT0,
.masterp = mport_mdp,
.num_mports = ARRAY_SIZE(mport_mdp),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_MDP_PORT1,
.masterp = mport_mdp1,
.num_mports = ARRAY_SIZE(mport_mdp1),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_ROTATOR,
.masterp = mport_rotator,
.num_mports = ARRAY_SIZE(mport_rotator),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_GRAPHICS_3D,
.masterp = mport_graphics_3d_port0,
.num_mports = ARRAY_SIZE(mport_graphics_3d_port0),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_GRAPHICS_3D_PORT1,
.masterp = mport_graphics_3d_port1,
.num_mports = ARRAY_SIZE(mport_graphics_3d_port1),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_JPEG_DEC,
.masterp = mport_jpeg_dec,
.num_mports = ARRAY_SIZE(mport_jpeg_dec),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_VIDEO_CAP,
.masterp = mport_video_cap,
.num_mports = ARRAY_SIZE(mport_video_cap),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_VIDEO_ENC,
.masterp = mport_video_enc,
.num_mports = ARRAY_SIZE(mport_video_enc),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_VFE,
.masterp = mport_vfe,
.num_mports = ARRAY_SIZE(mport_vfe),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_VPE,
.masterp = mport_vpe,
.num_mports = ARRAY_SIZE(mport_vpe),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_JPEG_ENC,
.masterp = mport_jpeg_enc,
.num_mports = ARRAY_SIZE(mport_jpeg_enc),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
/* This port has been added for V2. It is absent in V1 */
{
.id = MSM_BUS_MASTER_VIDEO_DEC,
.masterp = mport_video_dec,
.num_mports = ARRAY_SIZE(mport_video_dec),
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_FAB_APPSS,
.gateway = 1,
.slavep = mmss_sport_apps_fab,
.num_sports = ARRAY_SIZE(mmss_sport_apps_fab),
.masterp = appss_mport_fab_mmss,
.num_mports = ARRAY_SIZE(appss_mport_fab_mmss),
.tier = mmss_tiered_slave_fab_apps,
.num_tiers = ARRAY_SIZE(mmss_tiered_slave_fab_apps),
.buswidth = 16,
},
{
.id = MSM_BUS_SLAVE_MM_IMEM,
.slavep = sport_mm_imem,
.num_sports = ARRAY_SIZE(sport_mm_imem),
.tier = tiered_slave_mm_imem,
.num_tiers = ARRAY_SIZE(tiered_slave_mm_imem),
.buswidth = 8,
},
};
static struct msm_bus_node_info sys_fpb_fabric_info[] = {
{
.id = MSM_BUS_FAB_SYSTEM,
.gateway = 1,
.slavep = system_sport_system_fpb,
.num_sports = ARRAY_SIZE(system_sport_system_fpb),
.masterp = mport_system_fpb,
.num_mports = ARRAY_SIZE(mport_system_fpb),
.buswidth = 4,
.ahb = 1,
},
{
.id = MSM_BUS_MASTER_SPDM,
.ahb = 1,
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_MASTER_RPM,
.ahb = 1,
.tier = tier2,
.num_tiers = ARRAY_SIZE(tier2),
},
{
.id = MSM_BUS_SLAVE_SPDM,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_RPM,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_RPM_MSG_RAM,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_MPM,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_PMIC1_SSBI1_A,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_PMIC1_SSBI1_B,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_PMIC1_SSBI1_C,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_PMIC2_SSBI2_A,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_PMIC2_SSBI2_B,
.buswidth = 4,
.ahb = 1,
},
};
static struct msm_bus_node_info cpss_fpb_fabric_info[] = {
{
.id = MSM_BUS_FAB_SYSTEM,
.gateway = 1,
.slavep = system_sport_cpss_fpb,
.num_sports = ARRAY_SIZE(system_sport_cpss_fpb),
.masterp = system_mport_cpss_fpb,
.num_mports = ARRAY_SIZE(system_mport_cpss_fpb),
.buswidth = 4,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI1_UART,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI2_UART,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI3_UART,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI4_UART,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI5_UART,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI6_UART,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI7_UART,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI8_UART,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI9_UART,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI10_UART,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI11_UART,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI12_UART,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI1_QUP,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI2_QUP,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI3_QUP,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI4_QUP,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI5_QUP,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI6_QUP,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI7_QUP,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI8_QUP,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI9_QUP,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI10_QUP,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI11_QUP,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_GSBI12_QUP,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_EBI2_NAND,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_EBI2_CS0,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_EBI2_CS1,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_EBI2_CS2,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_EBI2_CS3,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_EBI2_CS4,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_EBI2_CS5,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_USB_FS1,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_USB_FS2,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_TSIF,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_MSM_TSSC,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_MSM_PDM,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_MSM_DIMEM,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_MSM_TCSR,
.buswidth = 8,
.ahb = 1,
},
{
.id = MSM_BUS_SLAVE_MSM_PRNG,
.buswidth = 4,
.ahb = 1,
},
};
static void msm_bus_board_assign_iids(struct msm_bus_fabric_registration
*fabreg, int fabid)
{
int i;
for (i = 0; i < fabreg->len; i++) {
if (!fabreg->info[i].gateway) {
fabreg->info[i].priv_id = fabid + fabreg->info[i].id;
if (fabreg->info[i].id < SLAVE_ID_KEY) {
WARN(fabreg->info[i].id >= NMASTERS,
"id %d exceeds array size!\n",
fabreg->info[i].id);
master_iids[fabreg->info[i].id] =
fabreg->info[i].priv_id;
} else {
WARN((fabreg->info[i].id - SLAVE_ID_KEY) >=
NSLAVES, "id %d exceeds array size!\n",
fabreg->info[i].id);
slave_iids[fabreg->info[i].id - (SLAVE_ID_KEY)]
= fabreg->info[i].priv_id;
}
} else
fabreg->info[i].priv_id = fabreg->info[i].id;
}
}
static int msm_bus_board_8064_get_iid(int id)
{
if ((id < SLAVE_ID_KEY && id >= NMASTERS) ||
id >= (SLAVE_ID_KEY + NSLAVES)) {
MSM_BUS_ERR("Cannot get iid. Invalid id %d passed\n", id);
return -EINVAL;
}
return CHECK_ID(((id < SLAVE_ID_KEY) ? master_iids[id] :
slave_iids[id - SLAVE_ID_KEY]), id);
}
static struct msm_bus_board_algorithm msm_bus_board_algo = {
.board_nfab = NFAB_8064,
.get_iid = msm_bus_board_8064_get_iid,
.assign_iids = msm_bus_board_assign_iids,
};
struct msm_bus_fabric_registration msm_bus_8064_apps_fabric_pdata = {
.id = MSM_BUS_FAB_APPSS,
.name = "msm_apps_fab",
.info = apps_fabric_info,
.len = ARRAY_SIZE(apps_fabric_info),
.ahb = 0,
.fabclk[DUAL_CTX] = "bus_clk",
.fabclk[ACTIVE_CTX] = "bus_a_clk",
.haltid = MSM_RPM_ID_APPS_FABRIC_CFG_HALT_0,
.offset = MSM_RPM_ID_APPS_FABRIC_ARB_0,
.nmasters = 6,
.nslaves = 5,
.ntieredslaves = 3,
.board_algo = &msm_bus_board_algo,
};
struct msm_bus_fabric_registration msm_bus_8064_sys_fabric_pdata = {
.id = MSM_BUS_FAB_SYSTEM,
.name = "msm_sys_fab",
system_fabric_info,
ARRAY_SIZE(system_fabric_info),
.ahb = 0,
.fabclk[DUAL_CTX] = "bus_clk",
.fabclk[ACTIVE_CTX] = "bus_a_clk",
.haltid = MSM_RPM_ID_SYS_FABRIC_CFG_HALT_0,
.offset = MSM_RPM_ID_SYSTEM_FABRIC_ARB_0,
.nmasters = 15,
.nslaves = 15,
.ntieredslaves = 3,
.board_algo = &msm_bus_board_algo,
};
struct msm_bus_fabric_registration msm_bus_8064_mm_fabric_pdata = {
.id = MSM_BUS_FAB_MMSS,
.name = "msm_mm_fab",
mmss_fabric_info,
ARRAY_SIZE(mmss_fabric_info),
.ahb = 0,
.fabclk[DUAL_CTX] = "bus_clk",
.fabclk[ACTIVE_CTX] = "bus_a_clk",
.haltid = MSM_RPM_ID_MMSS_FABRIC_CFG_HALT_0,
.offset = MSM_RPM_ID_MM_FABRIC_ARB_0,
.nmasters = 13,
.nslaves = 3,
.ntieredslaves = 3,
.board_algo = &msm_bus_board_algo,
};
struct msm_bus_fabric_registration msm_bus_8064_sys_fpb_pdata = {
.id = MSM_BUS_FAB_SYSTEM_FPB,
.name = "msm_sys_fpb",
sys_fpb_fabric_info,
ARRAY_SIZE(sys_fpb_fabric_info),
.ahb = 1,
.fabclk[DUAL_CTX] = "bus_clk",
.fabclk[ACTIVE_CTX] = "bus_a_clk",
.nmasters = 0,
.nslaves = 0,
.ntieredslaves = 0,
.board_algo = &msm_bus_board_algo,
};
struct msm_bus_fabric_registration msm_bus_8064_cpss_fpb_pdata = {
.id = MSM_BUS_FAB_CPSS_FPB,
.name = "msm_cpss_fpb",
cpss_fpb_fabric_info,
ARRAY_SIZE(cpss_fpb_fabric_info),
.ahb = 1,
.fabclk[DUAL_CTX] = "bus_clk",
.fabclk[ACTIVE_CTX] = "bus_a_clk",
.nmasters = 0,
.nslaves = 0,
.ntieredslaves = 0,
.board_algo = &msm_bus_board_algo,
};
| gpl-2.0 |
atalax/linux | sound/soc/intel/haswell/sst-haswell-pcm.c | 628 | 39133 | /*
* Intel SST Haswell/Broadwell PCM Support
*
* Copyright (C) 2013, Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/dmaengine_pcm.h>
#include <sound/soc.h>
#include <sound/tlv.h>
#include <sound/compress_driver.h>
#include "../haswell/sst-haswell-ipc.h"
#include "../common/sst-dsp-priv.h"
#include "../common/sst-dsp.h"
#define HSW_PCM_COUNT 6
#define HSW_VOLUME_MAX 0x7FFFFFFF /* 0dB */
#define SST_OLD_POSITION(d, r, o) ((d) + \
frames_to_bytes(r, o))
#define SST_SAMPLES(r, x) (bytes_to_samples(r, \
frames_to_bytes(r, (x))))
/* simple volume table */
static const u32 volume_map[] = {
HSW_VOLUME_MAX >> 30,
HSW_VOLUME_MAX >> 29,
HSW_VOLUME_MAX >> 28,
HSW_VOLUME_MAX >> 27,
HSW_VOLUME_MAX >> 26,
HSW_VOLUME_MAX >> 25,
HSW_VOLUME_MAX >> 24,
HSW_VOLUME_MAX >> 23,
HSW_VOLUME_MAX >> 22,
HSW_VOLUME_MAX >> 21,
HSW_VOLUME_MAX >> 20,
HSW_VOLUME_MAX >> 19,
HSW_VOLUME_MAX >> 18,
HSW_VOLUME_MAX >> 17,
HSW_VOLUME_MAX >> 16,
HSW_VOLUME_MAX >> 15,
HSW_VOLUME_MAX >> 14,
HSW_VOLUME_MAX >> 13,
HSW_VOLUME_MAX >> 12,
HSW_VOLUME_MAX >> 11,
HSW_VOLUME_MAX >> 10,
HSW_VOLUME_MAX >> 9,
HSW_VOLUME_MAX >> 8,
HSW_VOLUME_MAX >> 7,
HSW_VOLUME_MAX >> 6,
HSW_VOLUME_MAX >> 5,
HSW_VOLUME_MAX >> 4,
HSW_VOLUME_MAX >> 3,
HSW_VOLUME_MAX >> 2,
HSW_VOLUME_MAX >> 1,
HSW_VOLUME_MAX >> 0,
};
#define HSW_PCM_PERIODS_MAX 64
#define HSW_PCM_PERIODS_MIN 2
#define HSW_PCM_DAI_ID_SYSTEM 0
#define HSW_PCM_DAI_ID_OFFLOAD0 1
#define HSW_PCM_DAI_ID_OFFLOAD1 2
#define HSW_PCM_DAI_ID_LOOPBACK 3
static const struct snd_pcm_hardware hsw_pcm_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
SNDRV_PCM_INFO_DRAIN_TRIGGER,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.period_bytes_min = PAGE_SIZE,
.period_bytes_max = (HSW_PCM_PERIODS_MAX / HSW_PCM_PERIODS_MIN) * PAGE_SIZE,
.periods_min = HSW_PCM_PERIODS_MIN,
.periods_max = HSW_PCM_PERIODS_MAX,
.buffer_bytes_max = HSW_PCM_PERIODS_MAX * PAGE_SIZE,
};
struct hsw_pcm_module_map {
int dai_id;
int stream;
enum sst_hsw_module_id mod_id;
};
/* private data for each PCM DSP stream */
struct hsw_pcm_data {
int dai_id;
struct sst_hsw_stream *stream;
struct sst_module_runtime *runtime;
struct sst_module_runtime_context context;
struct snd_pcm *hsw_pcm;
u32 volume[2];
struct snd_pcm_substream *substream;
struct snd_compr_stream *cstream;
unsigned int wpos;
struct mutex mutex;
bool allocated;
int persistent_offset;
};
enum hsw_pm_state {
HSW_PM_STATE_D0 = 0,
HSW_PM_STATE_RTD3 = 1,
HSW_PM_STATE_D3 = 2,
};
/* private data for the driver */
struct hsw_priv_data {
/* runtime DSP */
struct sst_hsw *hsw;
struct device *dev;
enum hsw_pm_state pm_state;
struct snd_soc_card *soc_card;
struct sst_module_runtime *runtime_waves; /* sound effect module */
/* page tables */
struct snd_dma_buffer dmab[HSW_PCM_COUNT][2];
/* DAI data */
struct hsw_pcm_data pcm[HSW_PCM_COUNT][2];
};
/* static mappings between PCMs and modules - may be dynamic in future */
static struct hsw_pcm_module_map mod_map[] = {
{HSW_PCM_DAI_ID_SYSTEM, 0, SST_HSW_MODULE_PCM_SYSTEM},
{HSW_PCM_DAI_ID_OFFLOAD0, 0, SST_HSW_MODULE_PCM},
{HSW_PCM_DAI_ID_OFFLOAD1, 0, SST_HSW_MODULE_PCM},
{HSW_PCM_DAI_ID_LOOPBACK, 1, SST_HSW_MODULE_PCM_REFERENCE},
{HSW_PCM_DAI_ID_SYSTEM, 1, SST_HSW_MODULE_PCM_CAPTURE},
};
static u32 hsw_notify_pointer(struct sst_hsw_stream *stream, void *data);
static inline u32 hsw_mixer_to_ipc(unsigned int value)
{
if (value >= ARRAY_SIZE(volume_map))
return volume_map[0];
else
return volume_map[value];
}
static inline unsigned int hsw_ipc_to_mixer(u32 value)
{
int i;
for (i = 0; i < ARRAY_SIZE(volume_map); i++) {
if (volume_map[i] >= value)
return i;
}
return i - 1;
}
static int hsw_stream_volume_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct hsw_priv_data *pdata =
snd_soc_platform_get_drvdata(platform);
struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
u32 volume;
int dai, stream;
dai = mod_map[mc->reg].dai_id;
stream = mod_map[mc->reg].stream;
pcm_data = &pdata->pcm[dai][stream];
mutex_lock(&pcm_data->mutex);
pm_runtime_get_sync(pdata->dev);
if (!pcm_data->stream) {
pcm_data->volume[0] =
hsw_mixer_to_ipc(ucontrol->value.integer.value[0]);
pcm_data->volume[1] =
hsw_mixer_to_ipc(ucontrol->value.integer.value[1]);
pm_runtime_mark_last_busy(pdata->dev);
pm_runtime_put_autosuspend(pdata->dev);
mutex_unlock(&pcm_data->mutex);
return 0;
}
if (ucontrol->value.integer.value[0] ==
ucontrol->value.integer.value[1]) {
volume = hsw_mixer_to_ipc(ucontrol->value.integer.value[0]);
/* apply volume value to all channels */
sst_hsw_stream_set_volume(hsw, pcm_data->stream, 0, SST_HSW_CHANNELS_ALL, volume);
} else {
volume = hsw_mixer_to_ipc(ucontrol->value.integer.value[0]);
sst_hsw_stream_set_volume(hsw, pcm_data->stream, 0, 0, volume);
volume = hsw_mixer_to_ipc(ucontrol->value.integer.value[1]);
sst_hsw_stream_set_volume(hsw, pcm_data->stream, 0, 1, volume);
}
pm_runtime_mark_last_busy(pdata->dev);
pm_runtime_put_autosuspend(pdata->dev);
mutex_unlock(&pcm_data->mutex);
return 0;
}
static int hsw_stream_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct hsw_priv_data *pdata =
snd_soc_platform_get_drvdata(platform);
struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
u32 volume;
int dai, stream;
dai = mod_map[mc->reg].dai_id;
stream = mod_map[mc->reg].stream;
pcm_data = &pdata->pcm[dai][stream];
mutex_lock(&pcm_data->mutex);
pm_runtime_get_sync(pdata->dev);
if (!pcm_data->stream) {
ucontrol->value.integer.value[0] =
hsw_ipc_to_mixer(pcm_data->volume[0]);
ucontrol->value.integer.value[1] =
hsw_ipc_to_mixer(pcm_data->volume[1]);
pm_runtime_mark_last_busy(pdata->dev);
pm_runtime_put_autosuspend(pdata->dev);
mutex_unlock(&pcm_data->mutex);
return 0;
}
sst_hsw_stream_get_volume(hsw, pcm_data->stream, 0, 0, &volume);
ucontrol->value.integer.value[0] = hsw_ipc_to_mixer(volume);
sst_hsw_stream_get_volume(hsw, pcm_data->stream, 0, 1, &volume);
ucontrol->value.integer.value[1] = hsw_ipc_to_mixer(volume);
pm_runtime_mark_last_busy(pdata->dev);
pm_runtime_put_autosuspend(pdata->dev);
mutex_unlock(&pcm_data->mutex);
return 0;
}
static int hsw_volume_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
struct hsw_priv_data *pdata = snd_soc_platform_get_drvdata(platform);
struct sst_hsw *hsw = pdata->hsw;
u32 volume;
pm_runtime_get_sync(pdata->dev);
if (ucontrol->value.integer.value[0] ==
ucontrol->value.integer.value[1]) {
volume = hsw_mixer_to_ipc(ucontrol->value.integer.value[0]);
sst_hsw_mixer_set_volume(hsw, 0, SST_HSW_CHANNELS_ALL, volume);
} else {
volume = hsw_mixer_to_ipc(ucontrol->value.integer.value[0]);
sst_hsw_mixer_set_volume(hsw, 0, 0, volume);
volume = hsw_mixer_to_ipc(ucontrol->value.integer.value[1]);
sst_hsw_mixer_set_volume(hsw, 0, 1, volume);
}
pm_runtime_mark_last_busy(pdata->dev);
pm_runtime_put_autosuspend(pdata->dev);
return 0;
}
static int hsw_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
struct hsw_priv_data *pdata = snd_soc_platform_get_drvdata(platform);
struct sst_hsw *hsw = pdata->hsw;
unsigned int volume = 0;
pm_runtime_get_sync(pdata->dev);
sst_hsw_mixer_get_volume(hsw, 0, 0, &volume);
ucontrol->value.integer.value[0] = hsw_ipc_to_mixer(volume);
sst_hsw_mixer_get_volume(hsw, 0, 1, &volume);
ucontrol->value.integer.value[1] = hsw_ipc_to_mixer(volume);
pm_runtime_mark_last_busy(pdata->dev);
pm_runtime_put_autosuspend(pdata->dev);
return 0;
}
static int hsw_waves_switch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
struct hsw_priv_data *pdata = snd_soc_platform_get_drvdata(platform);
struct sst_hsw *hsw = pdata->hsw;
enum sst_hsw_module_id id = SST_HSW_MODULE_WAVES;
ucontrol->value.integer.value[0] =
(sst_hsw_is_module_active(hsw, id) ||
sst_hsw_is_module_enabled_rtd3(hsw, id));
return 0;
}
static int hsw_waves_switch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
struct hsw_priv_data *pdata = snd_soc_platform_get_drvdata(platform);
struct sst_hsw *hsw = pdata->hsw;
int ret = 0;
enum sst_hsw_module_id id = SST_HSW_MODULE_WAVES;
bool switch_on = (bool)ucontrol->value.integer.value[0];
/* if module is in RAM on the DSP, apply user settings to module through
* ipc. If module is not in RAM on the DSP, store user setting for
* track */
if (sst_hsw_is_module_loaded(hsw, id)) {
if (switch_on == sst_hsw_is_module_active(hsw, id))
return 0;
if (switch_on)
ret = sst_hsw_module_enable(hsw, id, 0);
else
ret = sst_hsw_module_disable(hsw, id, 0);
} else {
if (switch_on == sst_hsw_is_module_enabled_rtd3(hsw, id))
return 0;
if (switch_on)
sst_hsw_set_module_enabled_rtd3(hsw, id);
else
sst_hsw_set_module_disabled_rtd3(hsw, id);
}
return ret;
}
static int hsw_waves_param_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
struct hsw_priv_data *pdata = snd_soc_platform_get_drvdata(platform);
struct sst_hsw *hsw = pdata->hsw;
/* return a matching line from param buffer */
return sst_hsw_load_param_line(hsw, ucontrol->value.bytes.data);
}
static int hsw_waves_param_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
struct hsw_priv_data *pdata = snd_soc_platform_get_drvdata(platform);
struct sst_hsw *hsw = pdata->hsw;
int ret;
enum sst_hsw_module_id id = SST_HSW_MODULE_WAVES;
int param_id = ucontrol->value.bytes.data[0];
int param_size = WAVES_PARAM_COUNT;
/* clear param buffer and reset buffer index */
if (param_id == 0xFF) {
sst_hsw_reset_param_buf(hsw);
return 0;
}
/* store params into buffer */
ret = sst_hsw_store_param_line(hsw, ucontrol->value.bytes.data);
if (ret < 0)
return ret;
if (sst_hsw_is_module_active(hsw, id))
ret = sst_hsw_module_set_param(hsw, id, 0, param_id,
param_size, ucontrol->value.bytes.data);
return ret;
}
/* TLV used by both global and stream volumes */
static const DECLARE_TLV_DB_SCALE(hsw_vol_tlv, -9000, 300, 1);
/* System Pin has no volume control */
static const struct snd_kcontrol_new hsw_volume_controls[] = {
/* Global DSP volume */
SOC_DOUBLE_EXT_TLV("Master Playback Volume", 0, 0, 8,
ARRAY_SIZE(volume_map) - 1, 0,
hsw_volume_get, hsw_volume_put, hsw_vol_tlv),
/* Offload 0 volume */
SOC_DOUBLE_EXT_TLV("Media0 Playback Volume", 1, 0, 8,
ARRAY_SIZE(volume_map) - 1, 0,
hsw_stream_volume_get, hsw_stream_volume_put, hsw_vol_tlv),
/* Offload 1 volume */
SOC_DOUBLE_EXT_TLV("Media1 Playback Volume", 2, 0, 8,
ARRAY_SIZE(volume_map) - 1, 0,
hsw_stream_volume_get, hsw_stream_volume_put, hsw_vol_tlv),
/* Mic Capture volume */
SOC_DOUBLE_EXT_TLV("Mic Capture Volume", 4, 0, 8,
ARRAY_SIZE(volume_map) - 1, 0,
hsw_stream_volume_get, hsw_stream_volume_put, hsw_vol_tlv),
/* enable/disable module waves */
SOC_SINGLE_BOOL_EXT("Waves Switch", 0,
hsw_waves_switch_get, hsw_waves_switch_put),
/* set parameters to module waves */
SND_SOC_BYTES_EXT("Waves Set Param", WAVES_PARAM_COUNT,
hsw_waves_param_get, hsw_waves_param_put),
};
/* Create DMA buffer page table for DSP */
static int create_adsp_page_table(struct snd_pcm_substream *substream,
struct hsw_priv_data *pdata, struct snd_soc_pcm_runtime *rtd,
unsigned char *dma_area, size_t size, int pcm)
{
struct snd_dma_buffer *dmab = snd_pcm_get_dma_buf(substream);
int i, pages, stream = substream->stream;
pages = snd_sgbuf_aligned_pages(size);
dev_dbg(rtd->dev, "generating page table for %p size 0x%zu pages %d\n",
dma_area, size, pages);
for (i = 0; i < pages; i++) {
u32 idx = (((i << 2) + i)) >> 1;
u32 pfn = snd_sgbuf_get_addr(dmab, i * PAGE_SIZE) >> PAGE_SHIFT;
u32 *pg_table;
dev_dbg(rtd->dev, "pfn i %i idx %d pfn %x\n", i, idx, pfn);
pg_table = (u32 *)(pdata->dmab[pcm][stream].area + idx);
if (i & 1)
*pg_table |= (pfn << 4);
else
*pg_table |= pfn;
}
return 0;
}
/* this may get called several times by oss emulation */
static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
struct hsw_priv_data *pdata =
snd_soc_platform_get_drvdata(rtd->platform);
struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
struct sst_module *module_data;
struct sst_dsp *dsp;
struct snd_dma_buffer *dmab;
enum sst_hsw_stream_type stream_type;
enum sst_hsw_stream_path_id path_id;
u32 rate, bits, map, pages, module_id;
u8 channels;
int ret, dai;
dai = mod_map[rtd->cpu_dai->id].dai_id;
pcm_data = &pdata->pcm[dai][substream->stream];
/* check if we are being called a subsequent time */
if (pcm_data->allocated) {
ret = sst_hsw_stream_reset(hsw, pcm_data->stream);
if (ret < 0)
dev_dbg(rtd->dev, "error: reset stream failed %d\n",
ret);
ret = sst_hsw_stream_free(hsw, pcm_data->stream);
if (ret < 0) {
dev_dbg(rtd->dev, "error: free stream failed %d\n",
ret);
return ret;
}
pcm_data->allocated = false;
pcm_data->stream = sst_hsw_stream_new(hsw, rtd->cpu_dai->id,
hsw_notify_pointer, pcm_data);
if (pcm_data->stream == NULL) {
dev_err(rtd->dev, "error: failed to create stream\n");
return -EINVAL;
}
}
/* stream direction */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
path_id = SST_HSW_STREAM_PATH_SSP0_OUT;
else
path_id = SST_HSW_STREAM_PATH_SSP0_IN;
/* DSP stream type depends on DAI ID */
switch (rtd->cpu_dai->id) {
case 0:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
stream_type = SST_HSW_STREAM_TYPE_SYSTEM;
module_id = SST_HSW_MODULE_PCM_SYSTEM;
}
else {
stream_type = SST_HSW_STREAM_TYPE_CAPTURE;
module_id = SST_HSW_MODULE_PCM_CAPTURE;
}
break;
case 1:
case 2:
stream_type = SST_HSW_STREAM_TYPE_RENDER;
module_id = SST_HSW_MODULE_PCM;
break;
case 3:
/* path ID needs to be OUT for loopback */
stream_type = SST_HSW_STREAM_TYPE_LOOPBACK;
path_id = SST_HSW_STREAM_PATH_SSP0_OUT;
module_id = SST_HSW_MODULE_PCM_REFERENCE;
break;
default:
dev_err(rtd->dev, "error: invalid DAI ID %d\n",
rtd->cpu_dai->id);
return -EINVAL;
};
ret = sst_hsw_stream_format(hsw, pcm_data->stream,
path_id, stream_type, SST_HSW_STREAM_FORMAT_PCM_FORMAT);
if (ret < 0) {
dev_err(rtd->dev, "error: failed to set format %d\n", ret);
return ret;
}
rate = params_rate(params);
ret = sst_hsw_stream_set_rate(hsw, pcm_data->stream, rate);
if (ret < 0) {
dev_err(rtd->dev, "error: could not set rate %d\n", rate);
return ret;
}
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
bits = SST_HSW_DEPTH_16BIT;
sst_hsw_stream_set_valid(hsw, pcm_data->stream, 16);
break;
case SNDRV_PCM_FORMAT_S24_LE:
bits = SST_HSW_DEPTH_32BIT;
sst_hsw_stream_set_valid(hsw, pcm_data->stream, 24);
break;
case SNDRV_PCM_FORMAT_S8:
bits = SST_HSW_DEPTH_8BIT;
sst_hsw_stream_set_valid(hsw, pcm_data->stream, 8);
break;
case SNDRV_PCM_FORMAT_S32_LE:
bits = SST_HSW_DEPTH_32BIT;
sst_hsw_stream_set_valid(hsw, pcm_data->stream, 32);
break;
default:
dev_err(rtd->dev, "error: invalid format %d\n",
params_format(params));
return -EINVAL;
}
ret = sst_hsw_stream_set_bits(hsw, pcm_data->stream, bits);
if (ret < 0) {
dev_err(rtd->dev, "error: could not set bits %d\n", bits);
return ret;
}
channels = params_channels(params);
map = create_channel_map(SST_HSW_CHANNEL_CONFIG_STEREO);
sst_hsw_stream_set_map_config(hsw, pcm_data->stream,
map, SST_HSW_CHANNEL_CONFIG_STEREO);
ret = sst_hsw_stream_set_channels(hsw, pcm_data->stream, channels);
if (ret < 0) {
dev_err(rtd->dev, "error: could not set channels %d\n",
channels);
return ret;
}
ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
if (ret < 0) {
dev_err(rtd->dev, "error: could not allocate %d bytes for PCM %d\n",
params_buffer_bytes(params), ret);
return ret;
}
dmab = snd_pcm_get_dma_buf(substream);
ret = create_adsp_page_table(substream, pdata, rtd, runtime->dma_area,
runtime->dma_bytes, rtd->cpu_dai->id);
if (ret < 0)
return ret;
sst_hsw_stream_set_style(hsw, pcm_data->stream,
SST_HSW_INTERLEAVING_PER_CHANNEL);
if (runtime->dma_bytes % PAGE_SIZE)
pages = (runtime->dma_bytes / PAGE_SIZE) + 1;
else
pages = runtime->dma_bytes / PAGE_SIZE;
ret = sst_hsw_stream_buffer(hsw, pcm_data->stream,
pdata->dmab[rtd->cpu_dai->id][substream->stream].addr,
pages, runtime->dma_bytes, 0,
snd_sgbuf_get_addr(dmab, 0) >> PAGE_SHIFT);
if (ret < 0) {
dev_err(rtd->dev, "error: failed to set DMA buffer %d\n", ret);
return ret;
}
dsp = sst_hsw_get_dsp(hsw);
module_data = sst_module_get_from_id(dsp, module_id);
if (module_data == NULL) {
dev_err(rtd->dev, "error: failed to get module config\n");
return -EINVAL;
}
sst_hsw_stream_set_module_info(hsw, pcm_data->stream,
pcm_data->runtime);
ret = sst_hsw_stream_commit(hsw, pcm_data->stream);
if (ret < 0) {
dev_err(rtd->dev, "error: failed to commit stream %d\n", ret);
return ret;
}
if (!pcm_data->allocated) {
/* Set previous saved volume */
sst_hsw_stream_set_volume(hsw, pcm_data->stream, 0,
0, pcm_data->volume[0]);
sst_hsw_stream_set_volume(hsw, pcm_data->stream, 0,
1, pcm_data->volume[1]);
pcm_data->allocated = true;
}
ret = sst_hsw_stream_pause(hsw, pcm_data->stream, 1);
if (ret < 0)
dev_err(rtd->dev, "error: failed to pause %d\n", ret);
return 0;
}
static int hsw_pcm_hw_free(struct snd_pcm_substream *substream)
{
snd_pcm_lib_free_pages(substream);
return 0;
}
static int hsw_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct hsw_priv_data *pdata =
snd_soc_platform_get_drvdata(rtd->platform);
struct hsw_pcm_data *pcm_data;
struct sst_hsw_stream *sst_stream;
struct sst_hsw *hsw = pdata->hsw;
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_uframes_t pos;
int dai;
dai = mod_map[rtd->cpu_dai->id].dai_id;
pcm_data = &pdata->pcm[dai][substream->stream];
sst_stream = pcm_data->stream;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
sst_hsw_stream_set_silence_start(hsw, sst_stream, false);
sst_hsw_stream_resume(hsw, pcm_data->stream, 0);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
sst_hsw_stream_set_silence_start(hsw, sst_stream, false);
sst_hsw_stream_pause(hsw, pcm_data->stream, 0);
break;
case SNDRV_PCM_TRIGGER_DRAIN:
pos = runtime->control->appl_ptr % runtime->buffer_size;
sst_hsw_stream_set_old_position(hsw, pcm_data->stream, pos);
sst_hsw_stream_set_silence_start(hsw, sst_stream, true);
break;
default:
break;
}
return 0;
}
static u32 hsw_notify_pointer(struct sst_hsw_stream *stream, void *data)
{
struct hsw_pcm_data *pcm_data = data;
struct snd_pcm_substream *substream = pcm_data->substream;
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct hsw_priv_data *pdata =
snd_soc_platform_get_drvdata(rtd->platform);
struct sst_hsw *hsw = pdata->hsw;
u32 pos;
snd_pcm_uframes_t position = bytes_to_frames(runtime,
sst_hsw_get_dsp_position(hsw, pcm_data->stream));
unsigned char *dma_area = runtime->dma_area;
snd_pcm_uframes_t dma_frames =
bytes_to_frames(runtime, runtime->dma_bytes);
snd_pcm_uframes_t old_position;
ssize_t samples;
pos = frames_to_bytes(runtime,
(runtime->control->appl_ptr % runtime->buffer_size));
dev_vdbg(rtd->dev, "PCM: App pointer %d bytes\n", pos);
/* SST fw don't know where to stop dma
* So, SST driver need to clean the data which has been consumed
*/
if (dma_area == NULL || dma_frames <= 0
|| (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
|| !sst_hsw_stream_get_silence_start(hsw, stream)) {
snd_pcm_period_elapsed(substream);
return pos;
}
old_position = sst_hsw_stream_get_old_position(hsw, stream);
if (position > old_position) {
if (position < dma_frames) {
samples = SST_SAMPLES(runtime, position - old_position);
snd_pcm_format_set_silence(runtime->format,
SST_OLD_POSITION(dma_area,
runtime, old_position),
samples);
} else
dev_err(rtd->dev, "PCM: position is wrong\n");
} else {
if (old_position < dma_frames) {
samples = SST_SAMPLES(runtime,
dma_frames - old_position);
snd_pcm_format_set_silence(runtime->format,
SST_OLD_POSITION(dma_area,
runtime, old_position),
samples);
} else
dev_err(rtd->dev, "PCM: dma_bytes is wrong\n");
if (position < dma_frames) {
samples = SST_SAMPLES(runtime, position);
snd_pcm_format_set_silence(runtime->format,
dma_area, samples);
} else
dev_err(rtd->dev, "PCM: position is wrong\n");
}
sst_hsw_stream_set_old_position(hsw, stream, position);
/* let alsa know we have play a period */
snd_pcm_period_elapsed(substream);
return pos;
}
static snd_pcm_uframes_t hsw_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
struct hsw_priv_data *pdata =
snd_soc_platform_get_drvdata(rtd->platform);
struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
snd_pcm_uframes_t offset;
uint64_t ppos;
u32 position;
int dai;
dai = mod_map[rtd->cpu_dai->id].dai_id;
pcm_data = &pdata->pcm[dai][substream->stream];
position = sst_hsw_get_dsp_position(hsw, pcm_data->stream);
offset = bytes_to_frames(runtime, position);
ppos = sst_hsw_get_dsp_presentation_position(hsw, pcm_data->stream);
dev_vdbg(rtd->dev, "PCM: DMA pointer %du bytes, pos %llu\n",
position, ppos);
return offset;
}
static int hsw_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct hsw_priv_data *pdata =
snd_soc_platform_get_drvdata(rtd->platform);
struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
int dai;
dai = mod_map[rtd->cpu_dai->id].dai_id;
pcm_data = &pdata->pcm[dai][substream->stream];
mutex_lock(&pcm_data->mutex);
pm_runtime_get_sync(pdata->dev);
snd_soc_pcm_set_drvdata(rtd, pcm_data);
pcm_data->substream = substream;
snd_soc_set_runtime_hwparams(substream, &hsw_pcm_hardware);
pcm_data->stream = sst_hsw_stream_new(hsw, rtd->cpu_dai->id,
hsw_notify_pointer, pcm_data);
if (pcm_data->stream == NULL) {
dev_err(rtd->dev, "error: failed to create stream\n");
pm_runtime_mark_last_busy(pdata->dev);
pm_runtime_put_autosuspend(pdata->dev);
mutex_unlock(&pcm_data->mutex);
return -EINVAL;
}
mutex_unlock(&pcm_data->mutex);
return 0;
}
static int hsw_pcm_close(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct hsw_priv_data *pdata =
snd_soc_platform_get_drvdata(rtd->platform);
struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
int ret, dai;
dai = mod_map[rtd->cpu_dai->id].dai_id;
pcm_data = &pdata->pcm[dai][substream->stream];
mutex_lock(&pcm_data->mutex);
ret = sst_hsw_stream_reset(hsw, pcm_data->stream);
if (ret < 0) {
dev_dbg(rtd->dev, "error: reset stream failed %d\n", ret);
goto out;
}
ret = sst_hsw_stream_free(hsw, pcm_data->stream);
if (ret < 0) {
dev_dbg(rtd->dev, "error: free stream failed %d\n", ret);
goto out;
}
pcm_data->allocated = 0;
pcm_data->stream = NULL;
out:
pm_runtime_mark_last_busy(pdata->dev);
pm_runtime_put_autosuspend(pdata->dev);
mutex_unlock(&pcm_data->mutex);
return ret;
}
static struct snd_pcm_ops hsw_pcm_ops = {
.open = hsw_pcm_open,
.close = hsw_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = hsw_pcm_hw_params,
.hw_free = hsw_pcm_hw_free,
.trigger = hsw_pcm_trigger,
.pointer = hsw_pcm_pointer,
.page = snd_pcm_sgbuf_ops_page,
};
static int hsw_pcm_create_modules(struct hsw_priv_data *pdata)
{
struct sst_hsw *hsw = pdata->hsw;
struct hsw_pcm_data *pcm_data;
int i;
for (i = 0; i < ARRAY_SIZE(mod_map); i++) {
pcm_data = &pdata->pcm[mod_map[i].dai_id][mod_map[i].stream];
/* create new runtime module, use same offset if recreated */
pcm_data->runtime = sst_hsw_runtime_module_create(hsw,
mod_map[i].mod_id, pcm_data->persistent_offset);
if (pcm_data->runtime == NULL)
goto err;
pcm_data->persistent_offset =
pcm_data->runtime->persistent_offset;
}
/* create runtime blocks for module waves */
if (sst_hsw_is_module_loaded(hsw, SST_HSW_MODULE_WAVES)) {
pdata->runtime_waves = sst_hsw_runtime_module_create(hsw,
SST_HSW_MODULE_WAVES, 0);
if (pdata->runtime_waves == NULL)
goto err;
}
return 0;
err:
for (--i; i >= 0; i--) {
pcm_data = &pdata->pcm[mod_map[i].dai_id][mod_map[i].stream];
sst_hsw_runtime_module_free(pcm_data->runtime);
}
return -ENODEV;
}
static void hsw_pcm_free_modules(struct hsw_priv_data *pdata)
{
struct sst_hsw *hsw = pdata->hsw;
struct hsw_pcm_data *pcm_data;
int i;
for (i = 0; i < ARRAY_SIZE(mod_map); i++) {
pcm_data = &pdata->pcm[mod_map[i].dai_id][mod_map[i].stream];
if (pcm_data->runtime){
sst_hsw_runtime_module_free(pcm_data->runtime);
pcm_data->runtime = NULL;
}
}
if (sst_hsw_is_module_loaded(hsw, SST_HSW_MODULE_WAVES) &&
pdata->runtime_waves) {
sst_hsw_runtime_module_free(pdata->runtime_waves);
pdata->runtime_waves = NULL;
}
}
static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
struct snd_soc_platform *platform = rtd->platform;
struct sst_pdata *pdata = dev_get_platdata(platform->dev);
struct hsw_priv_data *priv_data = dev_get_drvdata(platform->dev);
struct device *dev = pdata->dma_dev;
int ret = 0;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream ||
pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
ret = snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV_SG,
dev,
hsw_pcm_hardware.buffer_bytes_max,
hsw_pcm_hardware.buffer_bytes_max);
if (ret) {
dev_err(rtd->dev, "dma buffer allocation failed %d\n",
ret);
return ret;
}
}
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream)
priv_data->pcm[rtd->cpu_dai->id][SNDRV_PCM_STREAM_PLAYBACK].hsw_pcm = pcm;
if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream)
priv_data->pcm[rtd->cpu_dai->id][SNDRV_PCM_STREAM_CAPTURE].hsw_pcm = pcm;
return ret;
}
#define HSW_FORMATS \
(SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
static struct snd_soc_dai_driver hsw_dais[] = {
{
.name = "System Pin",
.id = HSW_PCM_DAI_ID_SYSTEM,
.playback = {
.stream_name = "System Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.stream_name = "Analog Capture",
.channels_min = 2,
.channels_max = 4,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE,
},
},
{
/* PCM */
.name = "Offload0 Pin",
.id = HSW_PCM_DAI_ID_OFFLOAD0,
.playback = {
.stream_name = "Offload0 Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = HSW_FORMATS,
},
},
{
/* PCM */
.name = "Offload1 Pin",
.id = HSW_PCM_DAI_ID_OFFLOAD1,
.playback = {
.stream_name = "Offload1 Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = HSW_FORMATS,
},
},
{
.name = "Loopback Pin",
.id = HSW_PCM_DAI_ID_LOOPBACK,
.capture = {
.stream_name = "Loopback Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE,
},
},
};
static const struct snd_soc_dapm_widget widgets[] = {
/* Backend DAIs */
SND_SOC_DAPM_AIF_IN("SSP0 CODEC IN", NULL, 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("SSP0 CODEC OUT", NULL, 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("SSP1 BT IN", NULL, 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("SSP1 BT OUT", NULL, 0, SND_SOC_NOPM, 0, 0),
/* Global Playback Mixer */
SND_SOC_DAPM_MIXER("Playback VMixer", SND_SOC_NOPM, 0, 0, NULL, 0),
};
static const struct snd_soc_dapm_route graph[] = {
/* Playback Mixer */
{"Playback VMixer", NULL, "System Playback"},
{"Playback VMixer", NULL, "Offload0 Playback"},
{"Playback VMixer", NULL, "Offload1 Playback"},
{"SSP0 CODEC OUT", NULL, "Playback VMixer"},
{"Analog Capture", NULL, "SSP0 CODEC IN"},
};
static int hsw_pcm_probe(struct snd_soc_platform *platform)
{
struct hsw_priv_data *priv_data = snd_soc_platform_get_drvdata(platform);
struct sst_pdata *pdata = dev_get_platdata(platform->dev);
struct device *dma_dev, *dev;
int i, ret = 0;
if (!pdata)
return -ENODEV;
dev = platform->dev;
dma_dev = pdata->dma_dev;
priv_data->hsw = pdata->dsp;
priv_data->dev = platform->dev;
priv_data->pm_state = HSW_PM_STATE_D0;
priv_data->soc_card = platform->component.card;
/* allocate DSP buffer page tables */
for (i = 0; i < ARRAY_SIZE(hsw_dais); i++) {
/* playback */
if (hsw_dais[i].playback.channels_min) {
mutex_init(&priv_data->pcm[i][SNDRV_PCM_STREAM_PLAYBACK].mutex);
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dma_dev,
PAGE_SIZE, &priv_data->dmab[i][0]);
if (ret < 0)
goto err;
}
/* capture */
if (hsw_dais[i].capture.channels_min) {
mutex_init(&priv_data->pcm[i][SNDRV_PCM_STREAM_CAPTURE].mutex);
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dma_dev,
PAGE_SIZE, &priv_data->dmab[i][1]);
if (ret < 0)
goto err;
}
}
/* allocate runtime modules */
ret = hsw_pcm_create_modules(priv_data);
if (ret < 0)
goto err;
/* enable runtime PM with auto suspend */
pm_runtime_set_autosuspend_delay(platform->dev,
SST_RUNTIME_SUSPEND_DELAY);
pm_runtime_use_autosuspend(platform->dev);
pm_runtime_enable(platform->dev);
pm_runtime_idle(platform->dev);
return 0;
err:
for (--i; i >= 0; i--) {
if (hsw_dais[i].playback.channels_min)
snd_dma_free_pages(&priv_data->dmab[i][0]);
if (hsw_dais[i].capture.channels_min)
snd_dma_free_pages(&priv_data->dmab[i][1]);
}
return ret;
}
static int hsw_pcm_remove(struct snd_soc_platform *platform)
{
struct hsw_priv_data *priv_data =
snd_soc_platform_get_drvdata(platform);
int i;
pm_runtime_disable(platform->dev);
hsw_pcm_free_modules(priv_data);
for (i = 0; i < ARRAY_SIZE(hsw_dais); i++) {
if (hsw_dais[i].playback.channels_min)
snd_dma_free_pages(&priv_data->dmab[i][0]);
if (hsw_dais[i].capture.channels_min)
snd_dma_free_pages(&priv_data->dmab[i][1]);
}
return 0;
}
static struct snd_soc_platform_driver hsw_soc_platform = {
.probe = hsw_pcm_probe,
.remove = hsw_pcm_remove,
.ops = &hsw_pcm_ops,
.pcm_new = hsw_pcm_new,
};
static const struct snd_soc_component_driver hsw_dai_component = {
.name = "haswell-dai",
.controls = hsw_volume_controls,
.num_controls = ARRAY_SIZE(hsw_volume_controls),
.dapm_widgets = widgets,
.num_dapm_widgets = ARRAY_SIZE(widgets),
.dapm_routes = graph,
.num_dapm_routes = ARRAY_SIZE(graph),
};
static int hsw_pcm_dev_probe(struct platform_device *pdev)
{
struct sst_pdata *sst_pdata = dev_get_platdata(&pdev->dev);
struct hsw_priv_data *priv_data;
int ret;
if (!sst_pdata)
return -EINVAL;
priv_data = devm_kzalloc(&pdev->dev, sizeof(*priv_data), GFP_KERNEL);
if (!priv_data)
return -ENOMEM;
ret = sst_hsw_dsp_init(&pdev->dev, sst_pdata);
if (ret < 0)
return -ENODEV;
priv_data->hsw = sst_pdata->dsp;
platform_set_drvdata(pdev, priv_data);
ret = snd_soc_register_platform(&pdev->dev, &hsw_soc_platform);
if (ret < 0)
goto err_plat;
ret = snd_soc_register_component(&pdev->dev, &hsw_dai_component,
hsw_dais, ARRAY_SIZE(hsw_dais));
if (ret < 0)
goto err_comp;
return 0;
err_comp:
snd_soc_unregister_platform(&pdev->dev);
err_plat:
sst_hsw_dsp_free(&pdev->dev, sst_pdata);
return 0;
}
static int hsw_pcm_dev_remove(struct platform_device *pdev)
{
struct sst_pdata *sst_pdata = dev_get_platdata(&pdev->dev);
snd_soc_unregister_platform(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
sst_hsw_dsp_free(&pdev->dev, sst_pdata);
return 0;
}
#ifdef CONFIG_PM
static int hsw_pcm_runtime_idle(struct device *dev)
{
return 0;
}
static int hsw_pcm_suspend(struct device *dev)
{
struct hsw_priv_data *pdata = dev_get_drvdata(dev);
struct sst_hsw *hsw = pdata->hsw;
/* enter D3 state and stall */
sst_hsw_dsp_runtime_suspend(hsw);
/* free all runtime modules */
hsw_pcm_free_modules(pdata);
/* put the DSP to sleep, fw unloaded after runtime modules freed */
sst_hsw_dsp_runtime_sleep(hsw);
return 0;
}
static int hsw_pcm_runtime_suspend(struct device *dev)
{
struct hsw_priv_data *pdata = dev_get_drvdata(dev);
struct sst_hsw *hsw = pdata->hsw;
int ret;
if (pdata->pm_state >= HSW_PM_STATE_RTD3)
return 0;
/* fw modules will be unloaded on RTD3, set flag to track */
if (sst_hsw_is_module_active(hsw, SST_HSW_MODULE_WAVES)) {
ret = sst_hsw_module_disable(hsw, SST_HSW_MODULE_WAVES, 0);
if (ret < 0)
return ret;
sst_hsw_set_module_enabled_rtd3(hsw, SST_HSW_MODULE_WAVES);
}
hsw_pcm_suspend(dev);
pdata->pm_state = HSW_PM_STATE_RTD3;
return 0;
}
static int hsw_pcm_runtime_resume(struct device *dev)
{
struct hsw_priv_data *pdata = dev_get_drvdata(dev);
struct sst_hsw *hsw = pdata->hsw;
int ret;
if (pdata->pm_state != HSW_PM_STATE_RTD3)
return 0;
ret = sst_hsw_dsp_load(hsw);
if (ret < 0) {
dev_err(dev, "failed to reload %d\n", ret);
return ret;
}
ret = hsw_pcm_create_modules(pdata);
if (ret < 0) {
dev_err(dev, "failed to create modules %d\n", ret);
return ret;
}
ret = sst_hsw_dsp_runtime_resume(hsw);
if (ret < 0)
return ret;
else if (ret == 1) /* no action required */
return 0;
/* check flag when resume */
if (sst_hsw_is_module_enabled_rtd3(hsw, SST_HSW_MODULE_WAVES)) {
ret = sst_hsw_module_enable(hsw, SST_HSW_MODULE_WAVES, 0);
if (ret < 0)
return ret;
/* put parameters from buffer to dsp */
ret = sst_hsw_launch_param_buf(hsw);
if (ret < 0)
return ret;
/* unset flag */
sst_hsw_set_module_disabled_rtd3(hsw, SST_HSW_MODULE_WAVES);
}
pdata->pm_state = HSW_PM_STATE_D0;
return ret;
}
#else
#define hsw_pcm_runtime_idle NULL
#define hsw_pcm_runtime_suspend NULL
#define hsw_pcm_runtime_resume NULL
#endif
#ifdef CONFIG_PM
static void hsw_pcm_complete(struct device *dev)
{
struct hsw_priv_data *pdata = dev_get_drvdata(dev);
struct sst_hsw *hsw = pdata->hsw;
struct hsw_pcm_data *pcm_data;
int i, err;
if (pdata->pm_state != HSW_PM_STATE_D3)
return;
err = sst_hsw_dsp_load(hsw);
if (err < 0) {
dev_err(dev, "failed to reload %d\n", err);
return;
}
err = hsw_pcm_create_modules(pdata);
if (err < 0) {
dev_err(dev, "failed to create modules %d\n", err);
return;
}
for (i = 0; i < ARRAY_SIZE(mod_map); i++) {
pcm_data = &pdata->pcm[mod_map[i].dai_id][mod_map[i].stream];
if (!pcm_data->substream)
continue;
err = sst_module_runtime_restore(pcm_data->runtime,
&pcm_data->context);
if (err < 0)
dev_err(dev, "failed to restore context for PCM %d\n", i);
}
snd_soc_resume(pdata->soc_card->dev);
err = sst_hsw_dsp_runtime_resume(hsw);
if (err < 0)
return;
else if (err == 1) /* no action required */
return;
pdata->pm_state = HSW_PM_STATE_D0;
return;
}
static int hsw_pcm_prepare(struct device *dev)
{
struct hsw_priv_data *pdata = dev_get_drvdata(dev);
struct hsw_pcm_data *pcm_data;
int i, err;
if (pdata->pm_state == HSW_PM_STATE_D3)
return 0;
else if (pdata->pm_state == HSW_PM_STATE_D0) {
/* suspend all active streams */
for (i = 0; i < ARRAY_SIZE(mod_map); i++) {
pcm_data = &pdata->pcm[mod_map[i].dai_id][mod_map[i].stream];
if (!pcm_data->substream)
continue;
dev_dbg(dev, "suspending pcm %d\n", i);
snd_pcm_suspend_all(pcm_data->hsw_pcm);
/* We need to wait until the DSP FW stops the streams */
msleep(2);
}
/* preserve persistent memory */
for (i = 0; i < ARRAY_SIZE(mod_map); i++) {
pcm_data = &pdata->pcm[mod_map[i].dai_id][mod_map[i].stream];
if (!pcm_data->substream)
continue;
dev_dbg(dev, "saving context pcm %d\n", i);
err = sst_module_runtime_save(pcm_data->runtime,
&pcm_data->context);
if (err < 0)
dev_err(dev, "failed to save context for PCM %d\n", i);
}
hsw_pcm_suspend(dev);
}
snd_soc_suspend(pdata->soc_card->dev);
snd_soc_poweroff(pdata->soc_card->dev);
pdata->pm_state = HSW_PM_STATE_D3;
return 0;
}
#else
#define hsw_pcm_prepare NULL
#define hsw_pcm_complete NULL
#endif
static const struct dev_pm_ops hsw_pcm_pm = {
.runtime_idle = hsw_pcm_runtime_idle,
.runtime_suspend = hsw_pcm_runtime_suspend,
.runtime_resume = hsw_pcm_runtime_resume,
.prepare = hsw_pcm_prepare,
.complete = hsw_pcm_complete,
};
static struct platform_driver hsw_pcm_driver = {
.driver = {
.name = "haswell-pcm-audio",
.pm = &hsw_pcm_pm,
},
.probe = hsw_pcm_dev_probe,
.remove = hsw_pcm_dev_remove,
};
module_platform_driver(hsw_pcm_driver);
MODULE_AUTHOR("Liam Girdwood, Xingchao Wang");
MODULE_DESCRIPTION("Haswell/Lynxpoint + Broadwell/Wildcatpoint PCM");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:haswell-pcm-audio");
| gpl-2.0 |
segment-routing/openwrt | drivers/net/wireless/cw1200/main.c | 628 | 16825 | /*
* mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers
*
* Copyright (c) 2010, ST-Ericsson
* Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
*
* Based on:
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de>
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
*
* Based on:
* - the islsm (softmac prism54) driver, which is:
* Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
* - stlc45xx driver
* Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <net/mac80211.h>
#include "cw1200.h"
#include "txrx.h"
#include "hwbus.h"
#include "fwio.h"
#include "hwio.h"
#include "bh.h"
#include "sta.h"
#include "scan.h"
#include "debug.h"
#include "pm.h"
MODULE_AUTHOR("Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>");
MODULE_DESCRIPTION("Softmac ST-Ericsson CW1200 common code");
MODULE_LICENSE("GPL");
MODULE_ALIAS("cw1200_core");
/* Accept MAC address of the form macaddr=0x00,0x80,0xE1,0x30,0x40,0x50 */
static u8 cw1200_mac_template[ETH_ALEN] = {0x02, 0x80, 0xe1, 0x00, 0x00, 0x00};
module_param_array_named(macaddr, cw1200_mac_template, byte, NULL, S_IRUGO);
MODULE_PARM_DESC(macaddr, "Override platform_data MAC address");
static char *cw1200_sdd_path;
module_param(cw1200_sdd_path, charp, 0644);
MODULE_PARM_DESC(cw1200_sdd_path, "Override platform_data SDD file");
static int cw1200_refclk;
module_param(cw1200_refclk, int, 0644);
MODULE_PARM_DESC(cw1200_refclk, "Override platform_data reference clock");
int cw1200_power_mode = wsm_power_mode_quiescent;
module_param(cw1200_power_mode, int, 0644);
MODULE_PARM_DESC(cw1200_power_mode, "WSM power mode. 0 == active, 1 == doze, 2 == quiescent (default)");
#define RATETAB_ENT(_rate, _rateid, _flags) \
{ \
.bitrate = (_rate), \
.hw_value = (_rateid), \
.flags = (_flags), \
}
static struct ieee80211_rate cw1200_rates[] = {
RATETAB_ENT(10, 0, 0),
RATETAB_ENT(20, 1, 0),
RATETAB_ENT(55, 2, 0),
RATETAB_ENT(110, 3, 0),
RATETAB_ENT(60, 6, 0),
RATETAB_ENT(90, 7, 0),
RATETAB_ENT(120, 8, 0),
RATETAB_ENT(180, 9, 0),
RATETAB_ENT(240, 10, 0),
RATETAB_ENT(360, 11, 0),
RATETAB_ENT(480, 12, 0),
RATETAB_ENT(540, 13, 0),
};
static struct ieee80211_rate cw1200_mcs_rates[] = {
RATETAB_ENT(65, 14, IEEE80211_TX_RC_MCS),
RATETAB_ENT(130, 15, IEEE80211_TX_RC_MCS),
RATETAB_ENT(195, 16, IEEE80211_TX_RC_MCS),
RATETAB_ENT(260, 17, IEEE80211_TX_RC_MCS),
RATETAB_ENT(390, 18, IEEE80211_TX_RC_MCS),
RATETAB_ENT(520, 19, IEEE80211_TX_RC_MCS),
RATETAB_ENT(585, 20, IEEE80211_TX_RC_MCS),
RATETAB_ENT(650, 21, IEEE80211_TX_RC_MCS),
};
#define cw1200_a_rates (cw1200_rates + 4)
#define cw1200_a_rates_size (ARRAY_SIZE(cw1200_rates) - 4)
#define cw1200_g_rates (cw1200_rates + 0)
#define cw1200_g_rates_size (ARRAY_SIZE(cw1200_rates))
#define cw1200_n_rates (cw1200_mcs_rates)
#define cw1200_n_rates_size (ARRAY_SIZE(cw1200_mcs_rates))
#define CHAN2G(_channel, _freq, _flags) { \
.band = IEEE80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
#define CHAN5G(_channel, _flags) { \
.band = IEEE80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
static struct ieee80211_channel cw1200_2ghz_chantable[] = {
CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0),
CHAN2G(3, 2422, 0),
CHAN2G(4, 2427, 0),
CHAN2G(5, 2432, 0),
CHAN2G(6, 2437, 0),
CHAN2G(7, 2442, 0),
CHAN2G(8, 2447, 0),
CHAN2G(9, 2452, 0),
CHAN2G(10, 2457, 0),
CHAN2G(11, 2462, 0),
CHAN2G(12, 2467, 0),
CHAN2G(13, 2472, 0),
CHAN2G(14, 2484, 0),
};
static struct ieee80211_channel cw1200_5ghz_chantable[] = {
CHAN5G(34, 0), CHAN5G(36, 0),
CHAN5G(38, 0), CHAN5G(40, 0),
CHAN5G(42, 0), CHAN5G(44, 0),
CHAN5G(46, 0), CHAN5G(48, 0),
CHAN5G(52, 0), CHAN5G(56, 0),
CHAN5G(60, 0), CHAN5G(64, 0),
CHAN5G(100, 0), CHAN5G(104, 0),
CHAN5G(108, 0), CHAN5G(112, 0),
CHAN5G(116, 0), CHAN5G(120, 0),
CHAN5G(124, 0), CHAN5G(128, 0),
CHAN5G(132, 0), CHAN5G(136, 0),
CHAN5G(140, 0), CHAN5G(149, 0),
CHAN5G(153, 0), CHAN5G(157, 0),
CHAN5G(161, 0), CHAN5G(165, 0),
CHAN5G(184, 0), CHAN5G(188, 0),
CHAN5G(192, 0), CHAN5G(196, 0),
CHAN5G(200, 0), CHAN5G(204, 0),
CHAN5G(208, 0), CHAN5G(212, 0),
CHAN5G(216, 0),
};
static struct ieee80211_supported_band cw1200_band_2ghz = {
.channels = cw1200_2ghz_chantable,
.n_channels = ARRAY_SIZE(cw1200_2ghz_chantable),
.bitrates = cw1200_g_rates,
.n_bitrates = cw1200_g_rates_size,
.ht_cap = {
.cap = IEEE80211_HT_CAP_GRN_FLD |
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) |
IEEE80211_HT_CAP_MAX_AMSDU,
.ht_supported = 1,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
.mcs = {
.rx_mask[0] = 0xFF,
.rx_highest = __cpu_to_le16(0x41),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
},
},
};
static struct ieee80211_supported_band cw1200_band_5ghz = {
.channels = cw1200_5ghz_chantable,
.n_channels = ARRAY_SIZE(cw1200_5ghz_chantable),
.bitrates = cw1200_a_rates,
.n_bitrates = cw1200_a_rates_size,
.ht_cap = {
.cap = IEEE80211_HT_CAP_GRN_FLD |
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) |
IEEE80211_HT_CAP_MAX_AMSDU,
.ht_supported = 1,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
.mcs = {
.rx_mask[0] = 0xFF,
.rx_highest = __cpu_to_le16(0x41),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
},
},
};
static const unsigned long cw1200_ttl[] = {
1 * HZ, /* VO */
2 * HZ, /* VI */
5 * HZ, /* BE */
10 * HZ /* BK */
};
static const struct ieee80211_ops cw1200_ops = {
.start = cw1200_start,
.stop = cw1200_stop,
.add_interface = cw1200_add_interface,
.remove_interface = cw1200_remove_interface,
.change_interface = cw1200_change_interface,
.tx = cw1200_tx,
.hw_scan = cw1200_hw_scan,
.set_tim = cw1200_set_tim,
.sta_notify = cw1200_sta_notify,
.sta_add = cw1200_sta_add,
.sta_remove = cw1200_sta_remove,
.set_key = cw1200_set_key,
.set_rts_threshold = cw1200_set_rts_threshold,
.config = cw1200_config,
.bss_info_changed = cw1200_bss_info_changed,
.prepare_multicast = cw1200_prepare_multicast,
.configure_filter = cw1200_configure_filter,
.conf_tx = cw1200_conf_tx,
.get_stats = cw1200_get_stats,
.ampdu_action = cw1200_ampdu_action,
.flush = cw1200_flush,
#ifdef CONFIG_PM
.suspend = cw1200_wow_suspend,
.resume = cw1200_wow_resume,
#endif
/* Intentionally not offloaded: */
/*.channel_switch = cw1200_channel_switch, */
/*.remain_on_channel = cw1200_remain_on_channel, */
/*.cancel_remain_on_channel = cw1200_cancel_remain_on_channel, */
};
static int cw1200_ba_rx_tids = -1;
static int cw1200_ba_tx_tids = -1;
module_param(cw1200_ba_rx_tids, int, 0644);
module_param(cw1200_ba_tx_tids, int, 0644);
MODULE_PARM_DESC(cw1200_ba_rx_tids, "Block ACK RX TIDs");
MODULE_PARM_DESC(cw1200_ba_tx_tids, "Block ACK TX TIDs");
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support cw1200_wowlan_support = {
/* Support only for limited wowlan functionalities */
.flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT,
};
#endif
static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
const bool have_5ghz)
{
int i, band;
struct ieee80211_hw *hw;
struct cw1200_common *priv;
hw = ieee80211_alloc_hw(sizeof(struct cw1200_common), &cw1200_ops);
if (!hw)
return NULL;
priv = hw->priv;
priv->hw = hw;
priv->hw_type = -1;
priv->mode = NL80211_IFTYPE_UNSPECIFIED;
priv->rates = cw1200_rates; /* TODO: fetch from FW */
priv->mcs_rates = cw1200_n_rates;
if (cw1200_ba_rx_tids != -1)
priv->ba_rx_tid_mask = cw1200_ba_rx_tids;
else
priv->ba_rx_tid_mask = 0xFF; /* Enable RX BLKACK for all TIDs */
if (cw1200_ba_tx_tids != -1)
priv->ba_tx_tid_mask = cw1200_ba_tx_tids;
else
priv->ba_tx_tid_mask = 0xff; /* Enable TX BLKACK for all TIDs */
ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, CONNECTION_MONITOR);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, SUPPORTS_PS);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO);
#ifdef CONFIG_PM
hw->wiphy->wowlan = &cw1200_wowlan_support;
#endif
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->queues = 4;
priv->rts_threshold = -1;
hw->max_rates = 8;
hw->max_rate_tries = 15;
hw->extra_tx_headroom = WSM_TX_EXTRA_HEADROOM +
8; /* TKIP IV */
hw->sta_data_size = sizeof(struct cw1200_sta_priv);
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &cw1200_band_2ghz;
if (have_5ghz)
hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &cw1200_band_5ghz;
/* Channel params have to be cleared before registering wiphy again */
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
if (!sband)
continue;
for (i = 0; i < sband->n_channels; i++) {
sband->channels[i].flags = 0;
sband->channels[i].max_antenna_gain = 0;
sband->channels[i].max_power = 30;
}
}
hw->wiphy->max_scan_ssids = 2;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
if (macaddr)
SET_IEEE80211_PERM_ADDR(hw, (u8 *)macaddr);
else
SET_IEEE80211_PERM_ADDR(hw, cw1200_mac_template);
/* Fix up mac address if necessary */
if (hw->wiphy->perm_addr[3] == 0 &&
hw->wiphy->perm_addr[4] == 0 &&
hw->wiphy->perm_addr[5] == 0) {
get_random_bytes(&hw->wiphy->perm_addr[3], 3);
}
mutex_init(&priv->wsm_cmd_mux);
mutex_init(&priv->conf_mutex);
priv->workqueue = create_singlethread_workqueue("cw1200_wq");
sema_init(&priv->scan.lock, 1);
INIT_WORK(&priv->scan.work, cw1200_scan_work);
INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work);
INIT_DELAYED_WORK(&priv->scan.timeout, cw1200_scan_timeout);
INIT_DELAYED_WORK(&priv->clear_recent_scan_work,
cw1200_clear_recent_scan_work);
INIT_DELAYED_WORK(&priv->join_timeout, cw1200_join_timeout);
INIT_WORK(&priv->unjoin_work, cw1200_unjoin_work);
INIT_WORK(&priv->join_complete_work, cw1200_join_complete_work);
INIT_WORK(&priv->wep_key_work, cw1200_wep_key_work);
INIT_WORK(&priv->tx_policy_upload_work, tx_policy_upload_work);
spin_lock_init(&priv->event_queue_lock);
INIT_LIST_HEAD(&priv->event_queue);
INIT_WORK(&priv->event_handler, cw1200_event_handler);
INIT_DELAYED_WORK(&priv->bss_loss_work, cw1200_bss_loss_work);
INIT_WORK(&priv->bss_params_work, cw1200_bss_params_work);
spin_lock_init(&priv->bss_loss_lock);
spin_lock_init(&priv->ps_state_lock);
INIT_WORK(&priv->set_cts_work, cw1200_set_cts_work);
INIT_WORK(&priv->set_tim_work, cw1200_set_tim_work);
INIT_WORK(&priv->multicast_start_work, cw1200_multicast_start_work);
INIT_WORK(&priv->multicast_stop_work, cw1200_multicast_stop_work);
INIT_WORK(&priv->link_id_work, cw1200_link_id_work);
INIT_DELAYED_WORK(&priv->link_id_gc_work, cw1200_link_id_gc_work);
INIT_WORK(&priv->linkid_reset_work, cw1200_link_id_reset);
INIT_WORK(&priv->update_filtering_work, cw1200_update_filtering_work);
INIT_WORK(&priv->set_beacon_wakeup_period_work,
cw1200_set_beacon_wakeup_period_work);
setup_timer(&priv->mcast_timeout, cw1200_mcast_timeout,
(unsigned long)priv);
if (cw1200_queue_stats_init(&priv->tx_queue_stats,
CW1200_LINK_ID_MAX,
cw1200_skb_dtor,
priv)) {
ieee80211_free_hw(hw);
return NULL;
}
for (i = 0; i < 4; ++i) {
if (cw1200_queue_init(&priv->tx_queue[i],
&priv->tx_queue_stats, i, 16,
cw1200_ttl[i])) {
for (; i > 0; i--)
cw1200_queue_deinit(&priv->tx_queue[i - 1]);
cw1200_queue_stats_deinit(&priv->tx_queue_stats);
ieee80211_free_hw(hw);
return NULL;
}
}
init_waitqueue_head(&priv->channel_switch_done);
init_waitqueue_head(&priv->wsm_cmd_wq);
init_waitqueue_head(&priv->wsm_startup_done);
init_waitqueue_head(&priv->ps_mode_switch_done);
wsm_buf_init(&priv->wsm_cmd_buf);
spin_lock_init(&priv->wsm_cmd.lock);
priv->wsm_cmd.done = 1;
tx_policy_init(priv);
return hw;
}
static int cw1200_register_common(struct ieee80211_hw *dev)
{
struct cw1200_common *priv = dev->priv;
int err;
#ifdef CONFIG_PM
err = cw1200_pm_init(&priv->pm_state, priv);
if (err) {
pr_err("Cannot init PM. (%d).\n",
err);
return err;
}
#endif
err = ieee80211_register_hw(dev);
if (err) {
pr_err("Cannot register device (%d).\n",
err);
#ifdef CONFIG_PM
cw1200_pm_deinit(&priv->pm_state);
#endif
return err;
}
cw1200_debug_init(priv);
pr_info("Registered as '%s'\n", wiphy_name(dev->wiphy));
return 0;
}
static void cw1200_free_common(struct ieee80211_hw *dev)
{
ieee80211_free_hw(dev);
}
static void cw1200_unregister_common(struct ieee80211_hw *dev)
{
struct cw1200_common *priv = dev->priv;
int i;
ieee80211_unregister_hw(dev);
del_timer_sync(&priv->mcast_timeout);
cw1200_unregister_bh(priv);
cw1200_debug_release(priv);
mutex_destroy(&priv->conf_mutex);
wsm_buf_deinit(&priv->wsm_cmd_buf);
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
if (priv->sdd) {
release_firmware(priv->sdd);
priv->sdd = NULL;
}
for (i = 0; i < 4; ++i)
cw1200_queue_deinit(&priv->tx_queue[i]);
cw1200_queue_stats_deinit(&priv->tx_queue_stats);
#ifdef CONFIG_PM
cw1200_pm_deinit(&priv->pm_state);
#endif
}
/* Clock is in KHz */
u32 cw1200_dpll_from_clk(u16 clk_khz)
{
switch (clk_khz) {
case 0x32C8: /* 13000 KHz */
return 0x1D89D241;
case 0x3E80: /* 16000 KHz */
return 0x000001E1;
case 0x41A0: /* 16800 KHz */
return 0x124931C1;
case 0x4B00: /* 19200 KHz */
return 0x00000191;
case 0x5DC0: /* 24000 KHz */
return 0x00000141;
case 0x6590: /* 26000 KHz */
return 0x0EC4F121;
case 0x8340: /* 33600 KHz */
return 0x092490E1;
case 0x9600: /* 38400 KHz */
return 0x100010C1;
case 0x9C40: /* 40000 KHz */
return 0x000000C1;
case 0xBB80: /* 48000 KHz */
return 0x000000A1;
case 0xCB20: /* 52000 KHz */
return 0x07627091;
default:
pr_err("Unknown Refclk freq (0x%04x), using 26000KHz\n",
clk_khz);
return 0x0EC4F121;
}
}
int cw1200_core_probe(const struct hwbus_ops *hwbus_ops,
struct hwbus_priv *hwbus,
struct device *pdev,
struct cw1200_common **core,
int ref_clk, const u8 *macaddr,
const char *sdd_path, bool have_5ghz)
{
int err = -EINVAL;
struct ieee80211_hw *dev;
struct cw1200_common *priv;
struct wsm_operational_mode mode = {
.power_mode = cw1200_power_mode,
.disable_more_flag_usage = true,
};
dev = cw1200_init_common(macaddr, have_5ghz);
if (!dev)
goto err;
priv = dev->priv;
priv->hw_refclk = ref_clk;
if (cw1200_refclk)
priv->hw_refclk = cw1200_refclk;
priv->sdd_path = (char *)sdd_path;
if (cw1200_sdd_path)
priv->sdd_path = cw1200_sdd_path;
priv->hwbus_ops = hwbus_ops;
priv->hwbus_priv = hwbus;
priv->pdev = pdev;
SET_IEEE80211_DEV(priv->hw, pdev);
/* Pass struct cw1200_common back up */
*core = priv;
err = cw1200_register_bh(priv);
if (err)
goto err1;
err = cw1200_load_firmware(priv);
if (err)
goto err2;
if (wait_event_interruptible_timeout(priv->wsm_startup_done,
priv->firmware_ready,
3*HZ) <= 0) {
/* TODO: Need to find how to reset device
in QUEUE mode properly.
*/
pr_err("Timeout waiting on device startup\n");
err = -ETIMEDOUT;
goto err2;
}
/* Set low-power mode. */
wsm_set_operational_mode(priv, &mode);
/* Enable multi-TX confirmation */
wsm_use_multi_tx_conf(priv, true);
err = cw1200_register_common(dev);
if (err)
goto err2;
return err;
err2:
cw1200_unregister_bh(priv);
err1:
cw1200_free_common(dev);
err:
*core = NULL;
return err;
}
EXPORT_SYMBOL_GPL(cw1200_core_probe);
void cw1200_core_release(struct cw1200_common *self)
{
/* Disable device interrupts */
self->hwbus_ops->lock(self->hwbus_priv);
__cw1200_irq_enable(self, 0);
self->hwbus_ops->unlock(self->hwbus_priv);
/* And then clean up */
cw1200_unregister_common(self->hw);
cw1200_free_common(self->hw);
return;
}
EXPORT_SYMBOL_GPL(cw1200_core_release);
| gpl-2.0 |
burstlam/zte-turies-35 | drivers/usb/host/pci-quirks.c | 1140 | 13863 | /*
* This file contains code to reset and initialize USB host controllers.
* Some of it includes work-arounds for PCI hardware and BIOS quirks.
* It may need to run early during booting -- before USB would normally
* initialize -- to ensure that Linux doesn't use any legacy modes.
*
* Copyright (c) 1999 Martin Mares <mj@ucw.cz>
* (and others)
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/acpi.h>
#include "pci-quirks.h"
#include "xhci-ext-caps.h"
#define UHCI_USBLEGSUP 0xc0 /* legacy support */
#define UHCI_USBCMD 0 /* command register */
#define UHCI_USBINTR 4 /* interrupt register */
#define UHCI_USBLEGSUP_RWC 0x8f00 /* the R/WC bits */
#define UHCI_USBLEGSUP_RO 0x5040 /* R/O and reserved bits */
#define UHCI_USBCMD_RUN 0x0001 /* RUN/STOP bit */
#define UHCI_USBCMD_HCRESET 0x0002 /* Host Controller reset */
#define UHCI_USBCMD_EGSM 0x0008 /* Global Suspend Mode */
#define UHCI_USBCMD_CONFIGURE 0x0040 /* Config Flag */
#define UHCI_USBINTR_RESUME 0x0002 /* Resume interrupt enable */
#define OHCI_CONTROL 0x04
#define OHCI_CMDSTATUS 0x08
#define OHCI_INTRSTATUS 0x0c
#define OHCI_INTRENABLE 0x10
#define OHCI_INTRDISABLE 0x14
#define OHCI_OCR (1 << 3) /* ownership change request */
#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */
#define OHCI_CTRL_IR (1 << 8) /* interrupt routing */
#define OHCI_INTR_OC (1 << 30) /* ownership change */
#define EHCI_HCC_PARAMS 0x08 /* extended capabilities */
#define EHCI_USBCMD 0 /* command register */
#define EHCI_USBCMD_RUN (1 << 0) /* RUN/STOP bit */
#define EHCI_USBSTS 4 /* status register */
#define EHCI_USBSTS_HALTED (1 << 12) /* HCHalted bit */
#define EHCI_USBINTR 8 /* interrupt register */
#define EHCI_CONFIGFLAG 0x40 /* configured flag register */
#define EHCI_USBLEGSUP 0 /* legacy support register */
#define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */
#define EHCI_USBLEGSUP_OS (1 << 24) /* OS semaphore */
#define EHCI_USBLEGCTLSTS 4 /* legacy control/status */
#define EHCI_USBLEGCTLSTS_SOOE (1 << 13) /* SMI on ownership change */
/*
* Make sure the controller is completely inactive, unable to
* generate interrupts or do DMA.
*/
void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
{
/* Turn off PIRQ enable and SMI enable. (This also turns off the
* BIOS's USB Legacy Support.) Turn off all the R/WC bits too.
*/
pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
/* Reset the HC - this will force us to get a
* new notification of any already connected
* ports due to the virtual disconnect that it
* implies.
*/
outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
mb();
udelay(5);
if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
/* Just to be safe, disable interrupt requests and
* make sure the controller is stopped.
*/
outw(0, base + UHCI_USBINTR);
outw(0, base + UHCI_USBCMD);
}
EXPORT_SYMBOL_GPL(uhci_reset_hc);
/*
* Initialize a controller that was newly discovered or has just been
* resumed. In either case we can't be sure of its previous state.
*
* Returns: 1 if the controller was reset, 0 otherwise.
*/
int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
{
u16 legsup;
unsigned int cmd, intr;
/*
* When restarting a suspended controller, we expect all the
* settings to be the same as we left them:
*
* PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
* Controller is stopped and configured with EGSM set;
* No interrupts enabled except possibly Resume Detect.
*
* If any of these conditions are violated we do a complete reset.
*/
pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
__func__, legsup);
goto reset_needed;
}
cmd = inw(base + UHCI_USBCMD);
if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
!(cmd & UHCI_USBCMD_EGSM)) {
dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
__func__, cmd);
goto reset_needed;
}
intr = inw(base + UHCI_USBINTR);
if (intr & (~UHCI_USBINTR_RESUME)) {
dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
__func__, intr);
goto reset_needed;
}
return 0;
reset_needed:
dev_dbg(&pdev->dev, "Performing full reset\n");
uhci_reset_hc(pdev, base);
return 1;
}
EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
{
u16 cmd;
return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
}
#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
static void __devinit quirk_usb_handoff_uhci(struct pci_dev *pdev)
{
unsigned long base = 0;
int i;
if (!pio_enabled(pdev))
return;
for (i = 0; i < PCI_ROM_RESOURCE; i++)
if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
base = pci_resource_start(pdev, i);
break;
}
if (base)
uhci_check_and_reset_hc(pdev, base);
}
static int __devinit mmio_resource_enabled(struct pci_dev *pdev, int idx)
{
return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
}
static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
{
void __iomem *base;
if (!mmio_resource_enabled(pdev, 0))
return;
base = pci_ioremap_bar(pdev, 0);
if (base == NULL)
return;
/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
#ifndef __hppa__
{
u32 control = readl(base + OHCI_CONTROL);
if (control & OHCI_CTRL_IR) {
int wait_time = 500; /* arbitrary; 5 seconds */
writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
writel(OHCI_OCR, base + OHCI_CMDSTATUS);
while (wait_time > 0 &&
readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
wait_time -= 10;
msleep(10);
}
if (wait_time <= 0)
dev_warn(&pdev->dev, "OHCI: BIOS handoff failed"
" (BIOS bug?) %08x\n",
readl(base + OHCI_CONTROL));
/* reset controller, preserving RWC */
writel(control & OHCI_CTRL_RWC, base + OHCI_CONTROL);
}
}
#endif
/*
* disable interrupts
*/
writel(~(u32)0, base + OHCI_INTRDISABLE);
writel(~(u32)0, base + OHCI_INTRSTATUS);
iounmap(base);
}
static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
{
int wait_time, delta;
void __iomem *base, *op_reg_base;
u32 hcc_params, val;
u8 offset, cap_length;
int count = 256/4;
int tried_handoff = 0;
if (!mmio_resource_enabled(pdev, 0))
return;
base = pci_ioremap_bar(pdev, 0);
if (base == NULL)
return;
cap_length = readb(base);
op_reg_base = base + cap_length;
/* EHCI 0.96 and later may have "extended capabilities"
* spec section 5.1 explains the bios handoff, e.g. for
* booting from USB disk or using a usb keyboard
*/
hcc_params = readl(base + EHCI_HCC_PARAMS);
offset = (hcc_params >> 8) & 0xff;
while (offset && --count) {
u32 cap;
int msec;
pci_read_config_dword(pdev, offset, &cap);
switch (cap & 0xff) {
case 1: /* BIOS/SMM/... handoff support */
if ((cap & EHCI_USBLEGSUP_BIOS)) {
dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
#if 0
/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
* but that seems dubious in general (the BIOS left it off intentionally)
* and is known to prevent some systems from booting. so we won't do this
* unless maybe we can determine when we're on a system that needs SMI forced.
*/
/* BIOS workaround (?): be sure the
* pre-Linux code receives the SMI
*/
pci_read_config_dword(pdev,
offset + EHCI_USBLEGCTLSTS,
&val);
pci_write_config_dword(pdev,
offset + EHCI_USBLEGCTLSTS,
val | EHCI_USBLEGCTLSTS_SOOE);
#endif
/* some systems get upset if this semaphore is
* set for any other reason than forcing a BIOS
* handoff..
*/
pci_write_config_byte(pdev, offset + 3, 1);
}
/* if boot firmware now owns EHCI, spin till
* it hands it over.
*/
msec = 1000;
while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
tried_handoff = 1;
msleep(10);
msec -= 10;
pci_read_config_dword(pdev, offset, &cap);
}
if (cap & EHCI_USBLEGSUP_BIOS) {
/* well, possibly buggy BIOS... try to shut
* it down, and hope nothing goes too wrong
*/
dev_warn(&pdev->dev, "EHCI: BIOS handoff failed"
" (BIOS bug?) %08x\n", cap);
pci_write_config_byte(pdev, offset + 2, 0);
}
/* just in case, always disable EHCI SMIs */
pci_write_config_dword(pdev,
offset + EHCI_USBLEGCTLSTS,
0);
/* If the BIOS ever owned the controller then we
* can't expect any power sessions to remain intact.
*/
if (tried_handoff)
writel(0, op_reg_base + EHCI_CONFIGFLAG);
break;
case 0: /* illegal reserved capability */
cap = 0;
/* FALLTHROUGH */
default:
dev_warn(&pdev->dev, "EHCI: unrecognized capability "
"%02x\n", cap & 0xff);
break;
}
offset = (cap >> 8) & 0xff;
}
if (!count)
dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
/*
* halt EHCI & disable its interrupts in any case
*/
val = readl(op_reg_base + EHCI_USBSTS);
if ((val & EHCI_USBSTS_HALTED) == 0) {
val = readl(op_reg_base + EHCI_USBCMD);
val &= ~EHCI_USBCMD_RUN;
writel(val, op_reg_base + EHCI_USBCMD);
wait_time = 2000;
delta = 100;
do {
writel(0x3f, op_reg_base + EHCI_USBSTS);
udelay(delta);
wait_time -= delta;
val = readl(op_reg_base + EHCI_USBSTS);
if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
break;
}
} while (wait_time > 0);
}
writel(0, op_reg_base + EHCI_USBINTR);
writel(0x3f, op_reg_base + EHCI_USBSTS);
iounmap(base);
return;
}
/*
* handshake - spin reading a register until handshake completes
* @ptr: address of hc register to be read
* @mask: bits to look at in result of read
* @done: value of those bits when handshake succeeds
* @wait_usec: timeout in microseconds
* @delay_usec: delay in microseconds to wait between polling
*
* Polls a register every delay_usec microseconds.
* Returns 0 when the mask bits have the value done.
* Returns -ETIMEDOUT if this condition is not true after
* wait_usec microseconds have passed.
*/
static int handshake(void __iomem *ptr, u32 mask, u32 done,
int wait_usec, int delay_usec)
{
u32 result;
do {
result = readl(ptr);
result &= mask;
if (result == done)
return 0;
udelay(delay_usec);
wait_usec -= delay_usec;
} while (wait_usec > 0);
return -ETIMEDOUT;
}
/**
* PCI Quirks for xHCI.
*
* Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
* It signals to the BIOS that the OS wants control of the host controller,
* and then waits 5 seconds for the BIOS to hand over control.
* If we timeout, assume the BIOS is broken and take control anyway.
*/
static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
{
void __iomem *base;
int ext_cap_offset;
void __iomem *op_reg_base;
u32 val;
int timeout;
if (!mmio_resource_enabled(pdev, 0))
return;
base = ioremap_nocache(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (base == NULL)
return;
/*
* Find the Legacy Support Capability register -
* this is optional for xHCI host controllers.
*/
ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
do {
if (!ext_cap_offset)
/* We've reached the end of the extended capabilities */
goto hc_init;
val = readl(base + ext_cap_offset);
if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
break;
ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset);
} while (1);
/* If the BIOS owns the HC, signal that the OS wants it, and wait */
if (val & XHCI_HC_BIOS_OWNED) {
writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset);
/* Wait for 5 seconds with 10 microsecond polling interval */
timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
0, 5000, 10);
/* Assume a buggy BIOS and take HC ownership anyway */
if (timeout) {
dev_warn(&pdev->dev, "xHCI BIOS handoff failed"
" (BIOS bug ?) %08x\n", val);
writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
}
}
/* Disable any BIOS SMIs */
writel(XHCI_LEGACY_DISABLE_SMI,
base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
hc_init:
op_reg_base = base + XHCI_HC_LENGTH(readl(base));
/* Wait for the host controller to be ready before writing any
* operational or runtime registers. Wait 5 seconds and no more.
*/
timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
5000, 10);
/* Assume a buggy HC and start HC initialization anyway */
if (timeout) {
val = readl(op_reg_base + XHCI_STS_OFFSET);
dev_warn(&pdev->dev,
"xHCI HW not ready after 5 sec (HC bug?) "
"status = 0x%x\n", val);
}
/* Send the halt and disable interrupts command */
val = readl(op_reg_base + XHCI_CMD_OFFSET);
val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
writel(val, op_reg_base + XHCI_CMD_OFFSET);
/* Wait for the HC to halt - poll every 125 usec (one microframe). */
timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
XHCI_MAX_HALT_USEC, 125);
if (timeout) {
val = readl(op_reg_base + XHCI_STS_OFFSET);
dev_warn(&pdev->dev,
"xHCI HW did not halt within %d usec "
"status = 0x%x\n", XHCI_MAX_HALT_USEC, val);
}
iounmap(base);
}
static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
{
if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
quirk_usb_handoff_uhci(pdev);
else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
quirk_usb_handoff_ohci(pdev);
else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
quirk_usb_disable_ehci(pdev);
else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
quirk_usb_handoff_xhci(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
| gpl-2.0 |
Tim1928/kernel-cm7 | kernel/rtmutex.c | 1140 | 30067 | /*
* RT-Mutexes: simple blocking mutual exclusion locks with PI support
*
* started by Ingo Molnar and Thomas Gleixner.
*
* Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
* Copyright (C) 2006 Esben Nielsen
*
* See Documentation/rt-mutex-design.txt for details.
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include "rtmutex_common.h"
/*
* lock->owner state tracking:
*
* lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
* are used to keep track of the "owner is pending" and "lock has
* waiters" state.
*
* owner bit1 bit0
* NULL 0 0 lock is free (fast acquire possible)
* NULL 0 1 invalid state
* NULL 1 0 Transitional State*
* NULL 1 1 invalid state
* taskpointer 0 0 lock is held (fast release possible)
* taskpointer 0 1 task is pending owner
* taskpointer 1 0 lock is held and has waiters
* taskpointer 1 1 task is pending owner and lock has more waiters
*
* Pending ownership is assigned to the top (highest priority)
* waiter of the lock, when the lock is released. The thread is woken
* up and can now take the lock. Until the lock is taken (bit 0
* cleared) a competing higher priority thread can steal the lock
* which puts the woken up thread back on the waiters list.
*
* The fast atomic compare exchange based acquire and release is only
* possible when bit 0 and 1 of lock->owner are 0.
*
* (*) There's a small time where the owner can be NULL and the
* "lock has waiters" bit is set. This can happen when grabbing the lock.
* To prevent a cmpxchg of the owner releasing the lock, we need to set this
* bit before looking at the lock, hence the reason this is a transitional
* state.
*/
static void
rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
unsigned long mask)
{
unsigned long val = (unsigned long)owner | mask;
if (rt_mutex_has_waiters(lock))
val |= RT_MUTEX_HAS_WAITERS;
lock->owner = (struct task_struct *)val;
}
static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
{
lock->owner = (struct task_struct *)
((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
}
static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
{
if (!rt_mutex_has_waiters(lock))
clear_rt_mutex_waiters(lock);
}
/*
* We can speed up the acquire/release, if the architecture
* supports cmpxchg and if there's no debugging state to be set up
*/
#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
{
unsigned long owner, *p = (unsigned long *) &lock->owner;
do {
owner = *p;
} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
}
#else
# define rt_mutex_cmpxchg(l,c,n) (0)
static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
{
lock->owner = (struct task_struct *)
((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
}
#endif
/*
* Calculate task priority from the waiter list priority
*
* Return task->normal_prio when the waiter list is empty or when
* the waiter is not allowed to do priority boosting
*/
int rt_mutex_getprio(struct task_struct *task)
{
if (likely(!task_has_pi_waiters(task)))
return task->normal_prio;
return min(task_top_pi_waiter(task)->pi_list_entry.prio,
task->normal_prio);
}
/*
* Adjust the priority of a task, after its pi_waiters got modified.
*
* This can be both boosting and unboosting. task->pi_lock must be held.
*/
static void __rt_mutex_adjust_prio(struct task_struct *task)
{
int prio = rt_mutex_getprio(task);
if (task->prio != prio)
rt_mutex_setprio(task, prio);
}
/*
* Adjust task priority (undo boosting). Called from the exit path of
* rt_mutex_slowunlock() and rt_mutex_slowlock().
*
* (Note: We do this outside of the protection of lock->wait_lock to
* allow the lock to be taken while or before we readjust the priority
* of task. We do not use the spin_xx_mutex() variants here as we are
* outside of the debug path.)
*/
static void rt_mutex_adjust_prio(struct task_struct *task)
{
unsigned long flags;
raw_spin_lock_irqsave(&task->pi_lock, flags);
__rt_mutex_adjust_prio(task);
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
/*
* Max number of times we'll walk the boosting chain:
*/
int max_lock_depth = 1024;
/*
* Adjust the priority chain. Also used for deadlock detection.
* Decreases task's usage by one - may thus free the task.
* Returns 0 or -EDEADLK.
*/
static int rt_mutex_adjust_prio_chain(struct task_struct *task,
int deadlock_detect,
struct rt_mutex *orig_lock,
struct rt_mutex_waiter *orig_waiter,
struct task_struct *top_task)
{
struct rt_mutex *lock;
struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
int detect_deadlock, ret = 0, depth = 0;
unsigned long flags;
detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
deadlock_detect);
/*
* The (de)boosting is a step by step approach with a lot of
* pitfalls. We want this to be preemptible and we want hold a
* maximum of two locks per step. So we have to check
* carefully whether things change under us.
*/
again:
if (++depth > max_lock_depth) {
static int prev_max;
/*
* Print this only once. If the admin changes the limit,
* print a new message when reaching the limit again.
*/
if (prev_max != max_lock_depth) {
prev_max = max_lock_depth;
printk(KERN_WARNING "Maximum lock depth %d reached "
"task: %s (%d)\n", max_lock_depth,
top_task->comm, task_pid_nr(top_task));
}
put_task_struct(task);
return deadlock_detect ? -EDEADLK : 0;
}
retry:
/*
* Task can not go away as we did a get_task() before !
*/
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
/*
* Check whether the end of the boosting chain has been
* reached or the state of the chain has changed while we
* dropped the locks.
*/
if (!waiter || !waiter->task)
goto out_unlock_pi;
/*
* Check the orig_waiter state. After we dropped the locks,
* the previous owner of the lock might have released the lock
* and made us the pending owner:
*/
if (orig_waiter && !orig_waiter->task)
goto out_unlock_pi;
/*
* Drop out, when the task has no waiters. Note,
* top_waiter can be NULL, when we are in the deboosting
* mode!
*/
if (top_waiter && (!task_has_pi_waiters(task) ||
top_waiter != task_top_pi_waiter(task)))
goto out_unlock_pi;
/*
* When deadlock detection is off then we check, if further
* priority adjustment is necessary.
*/
if (!detect_deadlock && waiter->list_entry.prio == task->prio)
goto out_unlock_pi;
lock = waiter->lock;
if (!raw_spin_trylock(&lock->wait_lock)) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
cpu_relax();
goto retry;
}
/* Deadlock detection */
if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
raw_spin_unlock(&lock->wait_lock);
ret = deadlock_detect ? -EDEADLK : 0;
goto out_unlock_pi;
}
top_waiter = rt_mutex_top_waiter(lock);
/* Requeue the waiter */
plist_del(&waiter->list_entry, &lock->wait_list);
waiter->list_entry.prio = task->prio;
plist_add(&waiter->list_entry, &lock->wait_list);
/* Release the task */
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
put_task_struct(task);
/* Grab the next task */
task = rt_mutex_owner(lock);
get_task_struct(task);
raw_spin_lock_irqsave(&task->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
/* Boost the owner */
plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
waiter->pi_list_entry.prio = waiter->list_entry.prio;
plist_add(&waiter->pi_list_entry, &task->pi_waiters);
__rt_mutex_adjust_prio(task);
} else if (top_waiter == waiter) {
/* Deboost the owner */
plist_del(&waiter->pi_list_entry, &task->pi_waiters);
waiter = rt_mutex_top_waiter(lock);
waiter->pi_list_entry.prio = waiter->list_entry.prio;
plist_add(&waiter->pi_list_entry, &task->pi_waiters);
__rt_mutex_adjust_prio(task);
}
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
top_waiter = rt_mutex_top_waiter(lock);
raw_spin_unlock(&lock->wait_lock);
if (!detect_deadlock && waiter != top_waiter)
goto out_put_task;
goto again;
out_unlock_pi:
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
out_put_task:
put_task_struct(task);
return ret;
}
/*
* Optimization: check if we can steal the lock from the
* assigned pending owner [which might not have taken the
* lock yet]:
*/
static inline int try_to_steal_lock(struct rt_mutex *lock,
struct task_struct *task)
{
struct task_struct *pendowner = rt_mutex_owner(lock);
struct rt_mutex_waiter *next;
unsigned long flags;
if (!rt_mutex_owner_pending(lock))
return 0;
if (pendowner == task)
return 1;
raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
if (task->prio >= pendowner->prio) {
raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
return 0;
}
/*
* Check if a waiter is enqueued on the pending owners
* pi_waiters list. Remove it and readjust pending owners
* priority.
*/
if (likely(!rt_mutex_has_waiters(lock))) {
raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
return 1;
}
/* No chain handling, pending owner is not blocked on anything: */
next = rt_mutex_top_waiter(lock);
plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
__rt_mutex_adjust_prio(pendowner);
raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
/*
* We are going to steal the lock and a waiter was
* enqueued on the pending owners pi_waiters queue. So
* we have to enqueue this waiter into
* task->pi_waiters list. This covers the case,
* where task is boosted because it holds another
* lock and gets unboosted because the booster is
* interrupted, so we would delay a waiter with higher
* priority as task->normal_prio.
*
* Note: in the rare case of a SCHED_OTHER task changing
* its priority and thus stealing the lock, next->task
* might be task:
*/
if (likely(next->task != task)) {
raw_spin_lock_irqsave(&task->pi_lock, flags);
plist_add(&next->pi_list_entry, &task->pi_waiters);
__rt_mutex_adjust_prio(task);
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
return 1;
}
/*
* Try to take an rt-mutex
*
* This fails
* - when the lock has a real owner
* - when a different pending owner exists and has higher priority than current
*
* Must be called with lock->wait_lock held.
*/
static int try_to_take_rt_mutex(struct rt_mutex *lock)
{
/*
* We have to be careful here if the atomic speedups are
* enabled, such that, when
* - no other waiter is on the lock
* - the lock has been released since we did the cmpxchg
* the lock can be released or taken while we are doing the
* checks and marking the lock with RT_MUTEX_HAS_WAITERS.
*
* The atomic acquire/release aware variant of
* mark_rt_mutex_waiters uses a cmpxchg loop. After setting
* the WAITERS bit, the atomic release / acquire can not
* happen anymore and lock->wait_lock protects us from the
* non-atomic case.
*
* Note, that this might set lock->owner =
* RT_MUTEX_HAS_WAITERS in the case the lock is not contended
* any more. This is fixed up when we take the ownership.
* This is the transitional state explained at the top of this file.
*/
mark_rt_mutex_waiters(lock);
if (rt_mutex_owner(lock) && !try_to_steal_lock(lock, current))
return 0;
/* We got the lock. */
debug_rt_mutex_lock(lock);
rt_mutex_set_owner(lock, current, 0);
rt_mutex_deadlock_account_lock(lock, current);
return 1;
}
/*
* Task blocks on lock.
*
* Prepare waiter and propagate pi chain
*
* This must be called with lock->wait_lock held.
*/
static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task,
int detect_deadlock)
{
struct task_struct *owner = rt_mutex_owner(lock);
struct rt_mutex_waiter *top_waiter = waiter;
unsigned long flags;
int chain_walk = 0, res;
raw_spin_lock_irqsave(&task->pi_lock, flags);
__rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
plist_node_init(&waiter->list_entry, task->prio);
plist_node_init(&waiter->pi_list_entry, task->prio);
/* Get the top priority waiter on the lock */
if (rt_mutex_has_waiters(lock))
top_waiter = rt_mutex_top_waiter(lock);
plist_add(&waiter->list_entry, &lock->wait_list);
task->pi_blocked_on = waiter;
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
raw_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
__rt_mutex_adjust_prio(owner);
if (owner->pi_blocked_on)
chain_walk = 1;
raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
chain_walk = 1;
if (!chain_walk)
return 0;
/*
* The owner can't disappear while holding a lock,
* so the owner struct is protected by wait_lock.
* Gets dropped in rt_mutex_adjust_prio_chain()!
*/
get_task_struct(owner);
raw_spin_unlock(&lock->wait_lock);
res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
task);
raw_spin_lock(&lock->wait_lock);
return res;
}
/*
* Wake up the next waiter on the lock.
*
* Remove the top waiter from the current tasks waiter list and from
* the lock waiter list. Set it as pending owner. Then wake it up.
*
* Called with lock->wait_lock held.
*/
static void wakeup_next_waiter(struct rt_mutex *lock)
{
struct rt_mutex_waiter *waiter;
struct task_struct *pendowner;
unsigned long flags;
raw_spin_lock_irqsave(¤t->pi_lock, flags);
waiter = rt_mutex_top_waiter(lock);
plist_del(&waiter->list_entry, &lock->wait_list);
/*
* Remove it from current->pi_waiters. We do not adjust a
* possible priority boost right now. We execute wakeup in the
* boosted mode and go back to normal after releasing
* lock->wait_lock.
*/
plist_del(&waiter->pi_list_entry, ¤t->pi_waiters);
pendowner = waiter->task;
waiter->task = NULL;
rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
/*
* Clear the pi_blocked_on variable and enqueue a possible
* waiter into the pi_waiters list of the pending owner. This
* prevents that in case the pending owner gets unboosted a
* waiter with higher priority than pending-owner->normal_prio
* is blocked on the unboosted (pending) owner.
*/
raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
WARN_ON(!pendowner->pi_blocked_on);
WARN_ON(pendowner->pi_blocked_on != waiter);
WARN_ON(pendowner->pi_blocked_on->lock != lock);
pendowner->pi_blocked_on = NULL;
if (rt_mutex_has_waiters(lock)) {
struct rt_mutex_waiter *next;
next = rt_mutex_top_waiter(lock);
plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
}
raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
wake_up_process(pendowner);
}
/*
* Remove a waiter from a lock
*
* Must be called with lock->wait_lock held
*/
static void remove_waiter(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter)
{
int first = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
unsigned long flags;
int chain_walk = 0;
raw_spin_lock_irqsave(¤t->pi_lock, flags);
plist_del(&waiter->list_entry, &lock->wait_list);
waiter->task = NULL;
current->pi_blocked_on = NULL;
raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
if (first && owner != current) {
raw_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
if (rt_mutex_has_waiters(lock)) {
struct rt_mutex_waiter *next;
next = rt_mutex_top_waiter(lock);
plist_add(&next->pi_list_entry, &owner->pi_waiters);
}
__rt_mutex_adjust_prio(owner);
if (owner->pi_blocked_on)
chain_walk = 1;
raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
if (!chain_walk)
return;
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(owner);
raw_spin_unlock(&lock->wait_lock);
rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
raw_spin_lock(&lock->wait_lock);
}
/*
* Recheck the pi chain, in case we got a priority setting
*
* Called from sched_setscheduler
*/
void rt_mutex_adjust_pi(struct task_struct *task)
{
struct rt_mutex_waiter *waiter;
unsigned long flags;
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
if (!waiter || waiter->list_entry.prio == task->prio) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(task);
rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
}
/**
* __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
* @lock: the rt_mutex to take
* @state: the state the task should block in (TASK_INTERRUPTIBLE
* or TASK_UNINTERRUPTIBLE)
* @timeout: the pre-initialized and started timer, or NULL for none
* @waiter: the pre-initialized rt_mutex_waiter
* @detect_deadlock: passed to task_blocks_on_rt_mutex
*
* lock->wait_lock must be held by the caller.
*/
static int __sched
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
struct rt_mutex_waiter *waiter,
int detect_deadlock)
{
int ret = 0;
for (;;) {
/* Try to acquire the lock: */
if (try_to_take_rt_mutex(lock))
break;
/*
* TASK_INTERRUPTIBLE checks for signals and
* timeout. Ignored otherwise.
*/
if (unlikely(state == TASK_INTERRUPTIBLE)) {
/* Signal pending? */
if (signal_pending(current))
ret = -EINTR;
if (timeout && !timeout->task)
ret = -ETIMEDOUT;
if (ret)
break;
}
/*
* waiter->task is NULL the first time we come here and
* when we have been woken up by the previous owner
* but the lock got stolen by a higher prio task.
*/
if (!waiter->task) {
ret = task_blocks_on_rt_mutex(lock, waiter, current,
detect_deadlock);
/*
* If we got woken up by the owner then start loop
* all over without going into schedule to try
* to get the lock now:
*/
if (unlikely(!waiter->task)) {
/*
* Reset the return value. We might
* have returned with -EDEADLK and the
* owner released the lock while we
* were walking the pi chain.
*/
ret = 0;
continue;
}
if (unlikely(ret))
break;
}
raw_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
if (waiter->task)
schedule_rt_mutex(lock);
raw_spin_lock(&lock->wait_lock);
set_current_state(state);
}
return ret;
}
/*
* Slow path lock function:
*/
static int __sched
rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
int detect_deadlock)
{
struct rt_mutex_waiter waiter;
int ret = 0;
debug_rt_mutex_init_waiter(&waiter);
waiter.task = NULL;
raw_spin_lock(&lock->wait_lock);
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock)) {
raw_spin_unlock(&lock->wait_lock);
return 0;
}
set_current_state(state);
/* Setup the timer, when timeout != NULL */
if (unlikely(timeout)) {
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
if (!hrtimer_active(&timeout->timer))
timeout->task = NULL;
}
ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
detect_deadlock);
set_current_state(TASK_RUNNING);
if (unlikely(waiter.task))
remove_waiter(lock, &waiter);
/*
* try_to_take_rt_mutex() sets the waiter bit
* unconditionally. We might have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
raw_spin_unlock(&lock->wait_lock);
/* Remove pending timer: */
if (unlikely(timeout))
hrtimer_cancel(&timeout->timer);
/*
* Readjust priority, when we did not get the lock. We might
* have been the pending owner and boosted. Since we did not
* take the lock, the PI boost has to go.
*/
if (unlikely(ret))
rt_mutex_adjust_prio(current);
debug_rt_mutex_free_waiter(&waiter);
return ret;
}
/*
* Slow path try-lock function:
*/
static inline int
rt_mutex_slowtrylock(struct rt_mutex *lock)
{
int ret = 0;
raw_spin_lock(&lock->wait_lock);
if (likely(rt_mutex_owner(lock) != current)) {
ret = try_to_take_rt_mutex(lock);
/*
* try_to_take_rt_mutex() sets the lock waiters
* bit unconditionally. Clean this up.
*/
fixup_rt_mutex_waiters(lock);
}
raw_spin_unlock(&lock->wait_lock);
return ret;
}
/*
* Slow path to release a rt-mutex:
*/
static void __sched
rt_mutex_slowunlock(struct rt_mutex *lock)
{
raw_spin_lock(&lock->wait_lock);
debug_rt_mutex_unlock(lock);
rt_mutex_deadlock_account_unlock(current);
if (!rt_mutex_has_waiters(lock)) {
lock->owner = NULL;
raw_spin_unlock(&lock->wait_lock);
return;
}
wakeup_next_waiter(lock);
raw_spin_unlock(&lock->wait_lock);
/* Undo pi boosting if necessary: */
rt_mutex_adjust_prio(current);
}
/*
* debug aware fast / slowpath lock,trylock,unlock
*
* The atomic acquire/release ops are compiled away, when either the
* architecture does not support cmpxchg or when debugging is enabled.
*/
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
int detect_deadlock,
int (*slowfn)(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
int detect_deadlock))
{
if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
rt_mutex_deadlock_account_lock(lock, current);
return 0;
} else
return slowfn(lock, state, NULL, detect_deadlock);
}
static inline int
rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout, int detect_deadlock,
int (*slowfn)(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
int detect_deadlock))
{
if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
rt_mutex_deadlock_account_lock(lock, current);
return 0;
} else
return slowfn(lock, state, timeout, detect_deadlock);
}
static inline int
rt_mutex_fasttrylock(struct rt_mutex *lock,
int (*slowfn)(struct rt_mutex *lock))
{
if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
rt_mutex_deadlock_account_lock(lock, current);
return 1;
}
return slowfn(lock);
}
static inline void
rt_mutex_fastunlock(struct rt_mutex *lock,
void (*slowfn)(struct rt_mutex *lock))
{
if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
rt_mutex_deadlock_account_unlock(current);
else
slowfn(lock);
}
/**
* rt_mutex_lock - lock a rt_mutex
*
* @lock: the rt_mutex to be locked
*/
void __sched rt_mutex_lock(struct rt_mutex *lock)
{
might_sleep();
rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
/**
* rt_mutex_lock_interruptible - lock a rt_mutex interruptible
*
* @lock: the rt_mutex to be locked
* @detect_deadlock: deadlock detection on/off
*
* Returns:
* 0 on success
* -EINTR when interrupted by a signal
* -EDEADLK when the lock would deadlock (when deadlock detection is on)
*/
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
int detect_deadlock)
{
might_sleep();
return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
detect_deadlock, rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
/**
* rt_mutex_timed_lock - lock a rt_mutex interruptible
* the timeout structure is provided
* by the caller
*
* @lock: the rt_mutex to be locked
* @timeout: timeout structure or NULL (no timeout)
* @detect_deadlock: deadlock detection on/off
*
* Returns:
* 0 on success
* -EINTR when interrupted by a signal
* -ETIMEDOUT when the timeout expired
* -EDEADLK when the lock would deadlock (when deadlock detection is on)
*/
int
rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
int detect_deadlock)
{
might_sleep();
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
detect_deadlock, rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
/**
* rt_mutex_trylock - try to lock a rt_mutex
*
* @lock: the rt_mutex to be locked
*
* Returns 1 on success and 0 on contention
*/
int __sched rt_mutex_trylock(struct rt_mutex *lock)
{
return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
}
EXPORT_SYMBOL_GPL(rt_mutex_trylock);
/**
* rt_mutex_unlock - unlock a rt_mutex
*
* @lock: the rt_mutex to be unlocked
*/
void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
/**
* rt_mutex_destroy - mark a mutex unusable
* @lock: the mutex to be destroyed
*
* This function marks the mutex uninitialized, and any subsequent
* use of the mutex is forbidden. The mutex must not be locked when
* this function is called.
*/
void rt_mutex_destroy(struct rt_mutex *lock)
{
WARN_ON(rt_mutex_is_locked(lock));
#ifdef CONFIG_DEBUG_RT_MUTEXES
lock->magic = NULL;
#endif
}
EXPORT_SYMBOL_GPL(rt_mutex_destroy);
/**
* __rt_mutex_init - initialize the rt lock
*
* @lock: the rt lock to be initialized
*
* Initialize the rt lock to unlocked state.
*
* Initializing of a locked rt lock is not allowed
*/
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
raw_spin_lock_init(&lock->wait_lock);
plist_head_init_raw(&lock->wait_list, &lock->wait_lock);
debug_rt_mutex_init(lock, name);
}
EXPORT_SYMBOL_GPL(__rt_mutex_init);
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
* proxy owner
*
* @lock: the rt_mutex to be locked
* @proxy_owner:the task to set as owner
*
* No locking. Caller has to do serializing itself
* Special API call for PI-futex support
*/
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
__rt_mutex_init(lock, NULL);
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner, 0);
rt_mutex_deadlock_account_lock(lock, proxy_owner);
}
/**
* rt_mutex_proxy_unlock - release a lock on behalf of owner
*
* @lock: the rt_mutex to be locked
*
* No locking. Caller has to do serializing itself
* Special API call for PI-futex support
*/
void rt_mutex_proxy_unlock(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
debug_rt_mutex_proxy_unlock(lock);
rt_mutex_set_owner(lock, NULL, 0);
rt_mutex_deadlock_account_unlock(proxy_owner);
}
/**
* rt_mutex_start_proxy_lock() - Start lock acquisition for another task
* @lock: the rt_mutex to take
* @waiter: the pre-initialized rt_mutex_waiter
* @task: the task to prepare
* @detect_deadlock: perform deadlock detection (1) or not (0)
*
* Returns:
* 0 - task blocked on lock
* 1 - acquired the lock for task, caller should wake it up
* <0 - error
*
* Special API call for FUTEX_REQUEUE_PI support.
*/
int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task, int detect_deadlock)
{
int ret;
raw_spin_lock(&lock->wait_lock);
mark_rt_mutex_waiters(lock);
if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) {
/* We got the lock for task. */
debug_rt_mutex_lock(lock);
rt_mutex_set_owner(lock, task, 0);
raw_spin_unlock(&lock->wait_lock);
rt_mutex_deadlock_account_lock(lock, task);
return 1;
}
ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
if (ret && !waiter->task) {
/*
* Reset the return value. We might have
* returned with -EDEADLK and the owner
* released the lock while we were walking the
* pi chain. Let the waiter sort it out.
*/
ret = 0;
}
raw_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
return ret;
}
/**
* rt_mutex_next_owner - return the next owner of the lock
*
* @lock: the rt lock query
*
* Returns the next owner of the lock or NULL
*
* Caller has to serialize against other accessors to the lock
* itself.
*
* Special API call for PI-futex support
*/
struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
{
if (!rt_mutex_has_waiters(lock))
return NULL;
return rt_mutex_top_waiter(lock)->task;
}
/**
* rt_mutex_finish_proxy_lock() - Complete lock acquisition
* @lock: the rt_mutex we were woken on
* @to: the timeout, null if none. hrtimer should already have
* been started.
* @waiter: the pre-initialized rt_mutex_waiter
* @detect_deadlock: perform deadlock detection (1) or not (0)
*
* Complete the lock acquisition started our behalf by another thread.
*
* Returns:
* 0 - success
* <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
*
* Special API call for PI-futex requeue support
*/
int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *to,
struct rt_mutex_waiter *waiter,
int detect_deadlock)
{
int ret;
raw_spin_lock(&lock->wait_lock);
set_current_state(TASK_INTERRUPTIBLE);
ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter,
detect_deadlock);
set_current_state(TASK_RUNNING);
if (unlikely(waiter->task))
remove_waiter(lock, waiter);
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
raw_spin_unlock(&lock->wait_lock);
/*
* Readjust priority, when we did not get the lock. We might have been
* the pending owner and boosted. Since we did not take the lock, the
* PI boost has to go.
*/
if (unlikely(ret))
rt_mutex_adjust_prio(current);
return ret;
}
| gpl-2.0 |
Sombionix/zissou_kernel | kernel/utsname.c | 1652 | 1689 | /*
* Copyright (C) 2004 IBM Corporation
*
* Author: Serge Hallyn <serue@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*/
#include <linux/module.h>
#include <linux/uts.h>
#include <linux/utsname.h>
#include <linux/err.h>
#include <linux/slab.h>
static struct uts_namespace *create_uts_ns(void)
{
struct uts_namespace *uts_ns;
uts_ns = kmalloc(sizeof(struct uts_namespace), GFP_KERNEL);
if (uts_ns)
kref_init(&uts_ns->kref);
return uts_ns;
}
/*
* Clone a new ns copying an original utsname, setting refcount to 1
* @old_ns: namespace to clone
* Return NULL on error (failure to kmalloc), new ns otherwise
*/
static struct uts_namespace *clone_uts_ns(struct uts_namespace *old_ns)
{
struct uts_namespace *ns;
ns = create_uts_ns();
if (!ns)
return ERR_PTR(-ENOMEM);
down_read(&uts_sem);
memcpy(&ns->name, &old_ns->name, sizeof(ns->name));
up_read(&uts_sem);
return ns;
}
/*
* Copy task tsk's utsname namespace, or clone it if flags
* specifies CLONE_NEWUTS. In latter case, changes to the
* utsname of this process won't be seen by parent, and vice
* versa.
*/
struct uts_namespace *copy_utsname(unsigned long flags, struct uts_namespace *old_ns)
{
struct uts_namespace *new_ns;
BUG_ON(!old_ns);
get_uts_ns(old_ns);
if (!(flags & CLONE_NEWUTS))
return old_ns;
new_ns = clone_uts_ns(old_ns);
put_uts_ns(old_ns);
return new_ns;
}
void free_uts_ns(struct kref *kref)
{
struct uts_namespace *ns;
ns = container_of(kref, struct uts_namespace, kref);
kfree(ns);
}
| gpl-2.0 |
playfulgod/kernel_lge_i_vzw | net/netfilter/nf_conntrack_proto_tcp.c | 1652 | 45601 | /* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/in.h>
#include <linux/tcp.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include <asm/unaligned.h>
#include <net/tcp.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
/* "Be conservative in what you do,
be liberal in what you accept from others."
If it's non-zero, we mark only out of window RST segments as INVALID. */
static int nf_ct_tcp_be_liberal __read_mostly = 0;
/* If it is set to zero, we disable picking up already established
connections. */
static int nf_ct_tcp_loose __read_mostly = 1;
/* Max number of the retransmitted packets without receiving an (acceptable)
ACK from the destination. If this number is reached, a shorter timer
will be started. */
static int nf_ct_tcp_max_retrans __read_mostly = 3;
/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
closely. They're more complex. --RR */
static const char *const tcp_conntrack_names[] = {
"NONE",
"SYN_SENT",
"SYN_RECV",
"ESTABLISHED",
"FIN_WAIT",
"CLOSE_WAIT",
"LAST_ACK",
"TIME_WAIT",
"CLOSE",
"SYN_SENT2",
};
#define SECS * HZ
#define MINS * 60 SECS
#define HOURS * 60 MINS
#define DAYS * 24 HOURS
/* RFC1122 says the R2 limit should be at least 100 seconds.
Linux uses 15 packets as limit, which corresponds
to ~13-30min depending on RTO. */
static unsigned int nf_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS;
static unsigned int nf_ct_tcp_timeout_unacknowledged __read_mostly = 5 MINS;
static unsigned int tcp_timeouts[TCP_CONNTRACK_MAX] __read_mostly = {
[TCP_CONNTRACK_SYN_SENT] = 2 MINS,
[TCP_CONNTRACK_SYN_RECV] = 60 SECS,
[TCP_CONNTRACK_ESTABLISHED] = 5 DAYS,
[TCP_CONNTRACK_FIN_WAIT] = 2 MINS,
[TCP_CONNTRACK_CLOSE_WAIT] = 60 SECS,
[TCP_CONNTRACK_LAST_ACK] = 30 SECS,
[TCP_CONNTRACK_TIME_WAIT] = 2 MINS,
[TCP_CONNTRACK_CLOSE] = 10 SECS,
[TCP_CONNTRACK_SYN_SENT2] = 2 MINS,
};
#define sNO TCP_CONNTRACK_NONE
#define sSS TCP_CONNTRACK_SYN_SENT
#define sSR TCP_CONNTRACK_SYN_RECV
#define sES TCP_CONNTRACK_ESTABLISHED
#define sFW TCP_CONNTRACK_FIN_WAIT
#define sCW TCP_CONNTRACK_CLOSE_WAIT
#define sLA TCP_CONNTRACK_LAST_ACK
#define sTW TCP_CONNTRACK_TIME_WAIT
#define sCL TCP_CONNTRACK_CLOSE
#define sS2 TCP_CONNTRACK_SYN_SENT2
#define sIV TCP_CONNTRACK_MAX
#define sIG TCP_CONNTRACK_IGNORE
/* What TCP flags are set from RST/SYN/FIN/ACK. */
enum tcp_bit_set {
TCP_SYN_SET,
TCP_SYNACK_SET,
TCP_FIN_SET,
TCP_ACK_SET,
TCP_RST_SET,
TCP_NONE_SET,
};
/*
* The TCP state transition table needs a few words...
*
* We are the man in the middle. All the packets go through us
* but might get lost in transit to the destination.
* It is assumed that the destinations can't receive segments
* we haven't seen.
*
* The checked segment is in window, but our windows are *not*
* equivalent with the ones of the sender/receiver. We always
* try to guess the state of the current sender.
*
* The meaning of the states are:
*
* NONE: initial state
* SYN_SENT: SYN-only packet seen
* SYN_SENT2: SYN-only packet seen from reply dir, simultaneous open
* SYN_RECV: SYN-ACK packet seen
* ESTABLISHED: ACK packet seen
* FIN_WAIT: FIN packet seen
* CLOSE_WAIT: ACK seen (after FIN)
* LAST_ACK: FIN seen (after FIN)
* TIME_WAIT: last ACK seen
* CLOSE: closed connection (RST)
*
* Packets marked as IGNORED (sIG):
* if they may be either invalid or valid
* and the receiver may send back a connection
* closing RST or a SYN/ACK.
*
* Packets marked as INVALID (sIV):
* if we regard them as truly invalid packets
*/
static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
{
/* ORIGINAL */
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*syn*/ { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
/*
* sNO -> sSS Initialize a new connection
* sSS -> sSS Retransmitted SYN
* sS2 -> sS2 Late retransmitted SYN
* sSR -> sIG
* sES -> sIG Error: SYNs in window outside the SYN_SENT state
* are errors. Receiver will reply with RST
* and close the connection.
* Or we are not in sync and hold a dead connection.
* sFW -> sIG
* sCW -> sIG
* sLA -> sIG
* sTW -> sSS Reopened connection (RFC 1122).
* sCL -> sSS
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*synack*/ { sIV, sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
/*
* sNO -> sIV Too late and no reason to do anything
* sSS -> sIV Client can't send SYN and then SYN/ACK
* sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open
* sSR -> sIG
* sES -> sIG Error: SYNs in window outside the SYN_SENT state
* are errors. Receiver will reply with RST
* and close the connection.
* Or we are not in sync and hold a dead connection.
* sFW -> sIG
* sCW -> sIG
* sLA -> sIG
* sTW -> sIG
* sCL -> sIG
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
/*
* sNO -> sIV Too late and no reason to do anything...
* sSS -> sIV Client migth not send FIN in this state:
* we enforce waiting for a SYN/ACK reply first.
* sS2 -> sIV
* sSR -> sFW Close started.
* sES -> sFW
* sFW -> sLA FIN seen in both directions, waiting for
* the last ACK.
* Migth be a retransmitted FIN as well...
* sCW -> sLA
* sLA -> sLA Retransmitted FIN. Remain in the same state.
* sTW -> sTW
* sCL -> sCL
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*ack*/ { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
/*
* sNO -> sES Assumed.
* sSS -> sIV ACK is invalid: we haven't seen a SYN/ACK yet.
* sS2 -> sIV
* sSR -> sES Established state is reached.
* sES -> sES :-)
* sFW -> sCW Normal close request answered by ACK.
* sCW -> sCW
* sLA -> sTW Last ACK detected.
* sTW -> sTW Retransmitted last ACK. Remain in the same state.
* sCL -> sCL
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
},
{
/* REPLY */
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sS2 },
/*
* sNO -> sIV Never reached.
* sSS -> sS2 Simultaneous open
* sS2 -> sS2 Retransmitted simultaneous SYN
* sSR -> sIV Invalid SYN packets sent by the server
* sES -> sIV
* sFW -> sIV
* sCW -> sIV
* sLA -> sIV
* sTW -> sIV Reopened connection, but server may not do it.
* sCL -> sIV
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
/*
* sSS -> sSR Standard open.
* sS2 -> sSR Simultaneous open
* sSR -> sIG Retransmitted SYN/ACK, ignore it.
* sES -> sIG Late retransmitted SYN/ACK?
* sFW -> sIG Might be SYN/ACK answering ignored SYN
* sCW -> sIG
* sLA -> sIG
* sTW -> sIG
* sCL -> sIG
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
/*
* sSS -> sIV Server might not send FIN in this state.
* sS2 -> sIV
* sSR -> sFW Close started.
* sES -> sFW
* sFW -> sLA FIN seen in both directions.
* sCW -> sLA
* sLA -> sLA Retransmitted FIN.
* sTW -> sTW
* sCL -> sCL
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*ack*/ { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
/*
* sSS -> sIG Might be a half-open connection.
* sS2 -> sIG
* sSR -> sSR Might answer late resent SYN.
* sES -> sES :-)
* sFW -> sCW Normal close request answered by ACK.
* sCW -> sCW
* sLA -> sTW Last ACK detected.
* sTW -> sTW Retransmitted last ACK.
* sCL -> sCL
*/
/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
}
};
static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{
const struct tcphdr *hp;
struct tcphdr _hdr;
/* Actually only need first 8 bytes. */
hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
if (hp == NULL)
return false;
tuple->src.u.tcp.port = hp->source;
tuple->dst.u.tcp.port = hp->dest;
return true;
}
static bool tcp_invert_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
tuple->src.u.tcp.port = orig->dst.u.tcp.port;
tuple->dst.u.tcp.port = orig->src.u.tcp.port;
return true;
}
/* Print out the per-protocol part of the tuple. */
static int tcp_print_tuple(struct seq_file *s,
const struct nf_conntrack_tuple *tuple)
{
return seq_printf(s, "sport=%hu dport=%hu ",
ntohs(tuple->src.u.tcp.port),
ntohs(tuple->dst.u.tcp.port));
}
/* Print out the private part of the conntrack. */
static int tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
{
enum tcp_conntrack state;
spin_lock_bh(&ct->lock);
state = ct->proto.tcp.state;
spin_unlock_bh(&ct->lock);
return seq_printf(s, "%s ", tcp_conntrack_names[state]);
}
static unsigned int get_conntrack_index(const struct tcphdr *tcph)
{
if (tcph->rst) return TCP_RST_SET;
else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
else if (tcph->fin) return TCP_FIN_SET;
else if (tcph->ack) return TCP_ACK_SET;
else return TCP_NONE_SET;
}
/* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
in IP Filter' by Guido van Rooij.
http://www.sane.nl/events/sane2000/papers.html
http://www.darkart.com/mirrors/www.obfuscation.org/ipf/
The boundaries and the conditions are changed according to RFC793:
the packet must intersect the window (i.e. segments may be
after the right or before the left edge) and thus receivers may ACK
segments after the right edge of the window.
td_maxend = max(sack + max(win,1)) seen in reply packets
td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
td_maxwin += seq + len - sender.td_maxend
if seq + len > sender.td_maxend
td_end = max(seq + len) seen in sent packets
I. Upper bound for valid data: seq <= sender.td_maxend
II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin
III. Upper bound for valid (s)ack: sack <= receiver.td_end
IV. Lower bound for valid (s)ack: sack >= receiver.td_end - MAXACKWINDOW
where sack is the highest right edge of sack block found in the packet
or ack in the case of packet without SACK option.
The upper bound limit for a valid (s)ack is not ignored -
we doesn't have to deal with fragments.
*/
static inline __u32 segment_seq_plus_len(__u32 seq,
size_t len,
unsigned int dataoff,
const struct tcphdr *tcph)
{
/* XXX Should I use payload length field in IP/IPv6 header ?
* - YK */
return (seq + len - dataoff - tcph->doff*4
+ (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
}
/* Fixme: what about big packets? */
#define MAXACKWINCONST 66000
#define MAXACKWINDOW(sender) \
((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \
: MAXACKWINCONST)
/*
* Simplified tcp_parse_options routine from tcp_input.c
*/
static void tcp_options(const struct sk_buff *skb,
unsigned int dataoff,
const struct tcphdr *tcph,
struct ip_ct_tcp_state *state)
{
unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
const unsigned char *ptr;
int length = (tcph->doff*4) - sizeof(struct tcphdr);
if (!length)
return;
ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
length, buff);
BUG_ON(ptr == NULL);
state->td_scale =
state->flags = 0;
while (length > 0) {
int opcode=*ptr++;
int opsize;
switch (opcode) {
case TCPOPT_EOL:
return;
case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
length--;
continue;
default:
opsize=*ptr++;
if (opsize < 2) /* "silly options" */
return;
if (opsize > length)
break; /* don't parse partial options */
if (opcode == TCPOPT_SACK_PERM
&& opsize == TCPOLEN_SACK_PERM)
state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
else if (opcode == TCPOPT_WINDOW
&& opsize == TCPOLEN_WINDOW) {
state->td_scale = *(u_int8_t *)ptr;
if (state->td_scale > 14) {
/* See RFC1323 */
state->td_scale = 14;
}
state->flags |=
IP_CT_TCP_FLAG_WINDOW_SCALE;
}
ptr += opsize - 2;
length -= opsize;
}
}
}
static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
const struct tcphdr *tcph, __u32 *sack)
{
unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
const unsigned char *ptr;
int length = (tcph->doff*4) - sizeof(struct tcphdr);
__u32 tmp;
if (!length)
return;
ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
length, buff);
BUG_ON(ptr == NULL);
/* Fast path for timestamp-only option */
if (length == TCPOLEN_TSTAMP_ALIGNED*4
&& *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
| (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8)
| TCPOLEN_TIMESTAMP))
return;
while (length > 0) {
int opcode = *ptr++;
int opsize, i;
switch (opcode) {
case TCPOPT_EOL:
return;
case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
length--;
continue;
default:
opsize = *ptr++;
if (opsize < 2) /* "silly options" */
return;
if (opsize > length)
break; /* don't parse partial options */
if (opcode == TCPOPT_SACK
&& opsize >= (TCPOLEN_SACK_BASE
+ TCPOLEN_SACK_PERBLOCK)
&& !((opsize - TCPOLEN_SACK_BASE)
% TCPOLEN_SACK_PERBLOCK)) {
for (i = 0;
i < (opsize - TCPOLEN_SACK_BASE);
i += TCPOLEN_SACK_PERBLOCK) {
tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
if (after(tmp, *sack))
*sack = tmp;
}
return;
}
ptr += opsize - 2;
length -= opsize;
}
}
}
#ifdef CONFIG_NF_NAT_NEEDED
static inline s16 nat_offset(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq)
{
typeof(nf_ct_nat_offset) get_offset = rcu_dereference(nf_ct_nat_offset);
return get_offset != NULL ? get_offset(ct, dir, seq) : 0;
}
#define NAT_OFFSET(pf, ct, dir, seq) \
(pf == NFPROTO_IPV4 ? nat_offset(ct, dir, seq) : 0)
#else
#define NAT_OFFSET(pf, ct, dir, seq) 0
#endif
static bool tcp_in_window(const struct nf_conn *ct,
struct ip_ct_tcp *state,
enum ip_conntrack_dir dir,
unsigned int index,
const struct sk_buff *skb,
unsigned int dataoff,
const struct tcphdr *tcph,
u_int8_t pf)
{
struct net *net = nf_ct_net(ct);
struct ip_ct_tcp_state *sender = &state->seen[dir];
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
__u32 seq, ack, sack, end, win, swin;
s16 receiver_offset;
bool res;
/*
* Get the required data from the packet.
*/
seq = ntohl(tcph->seq);
ack = sack = ntohl(tcph->ack_seq);
win = ntohs(tcph->window);
end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
tcp_sack(skb, dataoff, tcph, &sack);
/* Take into account NAT sequence number mangling */
receiver_offset = NAT_OFFSET(pf, ct, !dir, ack - 1);
ack -= receiver_offset;
sack -= receiver_offset;
pr_debug("tcp_in_window: START\n");
pr_debug("tcp_in_window: ");
nf_ct_dump_tuple(tuple);
pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
seq, ack, receiver_offset, sack, receiver_offset, win, end);
pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
"receiver end=%u maxend=%u maxwin=%u scale=%i\n",
sender->td_end, sender->td_maxend, sender->td_maxwin,
sender->td_scale,
receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
receiver->td_scale);
if (sender->td_maxwin == 0) {
/*
* Initialize sender data.
*/
if (tcph->syn) {
/*
* SYN-ACK in reply to a SYN
* or SYN from reply direction in simultaneous open.
*/
sender->td_end =
sender->td_maxend = end;
sender->td_maxwin = (win == 0 ? 1 : win);
tcp_options(skb, dataoff, tcph, sender);
/*
* RFC 1323:
* Both sides must send the Window Scale option
* to enable window scaling in either direction.
*/
if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE
&& receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
sender->td_scale =
receiver->td_scale = 0;
if (!tcph->ack)
/* Simultaneous open */
return true;
} else {
/*
* We are in the middle of a connection,
* its history is lost for us.
* Let's try to use the data from the packet.
*/
sender->td_end = end;
win <<= sender->td_scale;
sender->td_maxwin = (win == 0 ? 1 : win);
sender->td_maxend = end + sender->td_maxwin;
/*
* We haven't seen traffic in the other direction yet
* but we have to tweak window tracking to pass III
* and IV until that happens.
*/
if (receiver->td_maxwin == 0)
receiver->td_end = receiver->td_maxend = sack;
}
} else if (((state->state == TCP_CONNTRACK_SYN_SENT
&& dir == IP_CT_DIR_ORIGINAL)
|| (state->state == TCP_CONNTRACK_SYN_RECV
&& dir == IP_CT_DIR_REPLY))
&& after(end, sender->td_end)) {
/*
* RFC 793: "if a TCP is reinitialized ... then it need
* not wait at all; it must only be sure to use sequence
* numbers larger than those recently used."
*/
sender->td_end =
sender->td_maxend = end;
sender->td_maxwin = (win == 0 ? 1 : win);
tcp_options(skb, dataoff, tcph, sender);
}
if (!(tcph->ack)) {
/*
* If there is no ACK, just pretend it was set and OK.
*/
ack = sack = receiver->td_end;
} else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
(TCP_FLAG_ACK|TCP_FLAG_RST))
&& (ack == 0)) {
/*
* Broken TCP stacks, that set ACK in RST packets as well
* with zero ack value.
*/
ack = sack = receiver->td_end;
}
if (seq == end
&& (!tcph->rst
|| (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)))
/*
* Packets contains no data: we assume it is valid
* and check the ack value only.
* However RST segments are always validated by their
* SEQ number, except when seq == 0 (reset sent answering
* SYN.
*/
seq = end = sender->td_end;
pr_debug("tcp_in_window: ");
nf_ct_dump_tuple(tuple);
pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
seq, ack, receiver_offset, sack, receiver_offset, win, end);
pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
"receiver end=%u maxend=%u maxwin=%u scale=%i\n",
sender->td_end, sender->td_maxend, sender->td_maxwin,
sender->td_scale,
receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
receiver->td_scale);
pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
before(seq, sender->td_maxend + 1),
after(end, sender->td_end - receiver->td_maxwin - 1),
before(sack, receiver->td_end + 1),
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
if (before(seq, sender->td_maxend + 1) &&
after(end, sender->td_end - receiver->td_maxwin - 1) &&
before(sack, receiver->td_end + 1) &&
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
/*
* Take into account window scaling (RFC 1323).
*/
if (!tcph->syn)
win <<= sender->td_scale;
/*
* Update sender data.
*/
swin = win + (sack - ack);
if (sender->td_maxwin < swin)
sender->td_maxwin = swin;
if (after(end, sender->td_end)) {
sender->td_end = end;
sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
}
if (tcph->ack) {
if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) {
sender->td_maxack = ack;
sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET;
} else if (after(ack, sender->td_maxack))
sender->td_maxack = ack;
}
/*
* Update receiver data.
*/
if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
receiver->td_maxwin += end - sender->td_maxend;
if (after(sack + win, receiver->td_maxend - 1)) {
receiver->td_maxend = sack + win;
if (win == 0)
receiver->td_maxend++;
}
if (ack == receiver->td_end)
receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
/*
* Check retransmissions.
*/
if (index == TCP_ACK_SET) {
if (state->last_dir == dir
&& state->last_seq == seq
&& state->last_ack == ack
&& state->last_end == end
&& state->last_win == win)
state->retrans++;
else {
state->last_dir = dir;
state->last_seq = seq;
state->last_ack = ack;
state->last_end = end;
state->last_win = win;
state->retrans = 0;
}
}
res = true;
} else {
res = false;
if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
nf_ct_tcp_be_liberal)
res = true;
if (!res && LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: %s ",
before(seq, sender->td_maxend + 1) ?
after(end, sender->td_end - receiver->td_maxwin - 1) ?
before(sack, receiver->td_end + 1) ?
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
: "ACK is under the lower bound (possible overly delayed ACK)"
: "ACK is over the upper bound (ACKed data not seen yet)"
: "SEQ is under the lower bound (already ACKed data retransmitted)"
: "SEQ is over the upper bound (over the window of the receiver)");
}
pr_debug("tcp_in_window: res=%u sender end=%u maxend=%u maxwin=%u "
"receiver end=%u maxend=%u maxwin=%u\n",
res, sender->td_end, sender->td_maxend, sender->td_maxwin,
receiver->td_end, receiver->td_maxend, receiver->td_maxwin);
return res;
}
/* table of valid flag combinations - PUSH, ECE and CWR are always valid */
static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
TCPHDR_URG) + 1] =
{
[TCPHDR_SYN] = 1,
[TCPHDR_SYN|TCPHDR_URG] = 1,
[TCPHDR_SYN|TCPHDR_ACK] = 1,
[TCPHDR_RST] = 1,
[TCPHDR_RST|TCPHDR_ACK] = 1,
[TCPHDR_FIN|TCPHDR_ACK] = 1,
[TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG] = 1,
[TCPHDR_ACK] = 1,
[TCPHDR_ACK|TCPHDR_URG] = 1,
};
/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
static int tcp_error(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info *ctinfo,
u_int8_t pf,
unsigned int hooknum)
{
const struct tcphdr *th;
struct tcphdr _tcph;
unsigned int tcplen = skb->len - dataoff;
u_int8_t tcpflags;
/* Smaller that minimal TCP header? */
th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
if (th == NULL) {
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: short packet ");
return -NF_ACCEPT;
}
/* Not whole TCP header or malformed packet */
if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: truncated/malformed packet ");
return -NF_ACCEPT;
}
/* Checksum invalid? Ignore.
* We skip checking packets on the outgoing path
* because the checksum is assumed to be correct.
*/
/* FIXME: Source route IP option packets --RR */
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
nf_checksum(skb, hooknum, dataoff, IPPROTO_TCP, pf)) {
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: bad TCP checksum ");
return -NF_ACCEPT;
}
/* Check TCP flags. */
tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
if (!tcp_valid_flags[tcpflags]) {
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: invalid TCP flag combination ");
return -NF_ACCEPT;
}
return NF_ACCEPT;
}
/* Returns verdict for packet, or -1 for invalid. */
static int tcp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int hooknum)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_tuple *tuple;
enum tcp_conntrack new_state, old_state;
enum ip_conntrack_dir dir;
const struct tcphdr *th;
struct tcphdr _tcph;
unsigned long timeout;
unsigned int index;
th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
BUG_ON(th == NULL);
spin_lock_bh(&ct->lock);
old_state = ct->proto.tcp.state;
dir = CTINFO2DIR(ctinfo);
index = get_conntrack_index(th);
new_state = tcp_conntracks[dir][index][old_state];
tuple = &ct->tuplehash[dir].tuple;
switch (new_state) {
case TCP_CONNTRACK_SYN_SENT:
if (old_state < TCP_CONNTRACK_TIME_WAIT)
break;
/* RFC 1122: "When a connection is closed actively,
* it MUST linger in TIME-WAIT state for a time 2xMSL
* (Maximum Segment Lifetime). However, it MAY accept
* a new SYN from the remote TCP to reopen the connection
* directly from TIME-WAIT state, if..."
* We ignore the conditions because we are in the
* TIME-WAIT state anyway.
*
* Handle aborted connections: we and the server
* think there is an existing connection but the client
* aborts it and starts a new one.
*/
if (((ct->proto.tcp.seen[dir].flags
| ct->proto.tcp.seen[!dir].flags)
& IP_CT_TCP_FLAG_CLOSE_INIT)
|| (ct->proto.tcp.last_dir == dir
&& ct->proto.tcp.last_index == TCP_RST_SET)) {
/* Attempt to reopen a closed/aborted connection.
* Delete this connection and look up again. */
spin_unlock_bh(&ct->lock);
/* Only repeat if we can actually remove the timer.
* Destruction may already be in progress in process
* context and we must give it a chance to terminate.
*/
if (nf_ct_kill(ct))
return -NF_REPEAT;
return NF_DROP;
}
/* Fall through */
case TCP_CONNTRACK_IGNORE:
/* Ignored packets:
*
* Our connection entry may be out of sync, so ignore
* packets which may signal the real connection between
* the client and the server.
*
* a) SYN in ORIGINAL
* b) SYN/ACK in REPLY
* c) ACK in reply direction after initial SYN in original.
*
* If the ignored packet is invalid, the receiver will send
* a RST we'll catch below.
*/
if (index == TCP_SYNACK_SET
&& ct->proto.tcp.last_index == TCP_SYN_SET
&& ct->proto.tcp.last_dir != dir
&& ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
/* b) This SYN/ACK acknowledges a SYN that we earlier
* ignored as invalid. This means that the client and
* the server are both in sync, while the firewall is
* not. We get in sync from the previously annotated
* values.
*/
old_state = TCP_CONNTRACK_SYN_SENT;
new_state = TCP_CONNTRACK_SYN_RECV;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_end =
ct->proto.tcp.last_end;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxend =
ct->proto.tcp.last_end;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxwin =
ct->proto.tcp.last_win == 0 ?
1 : ct->proto.tcp.last_win;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
ct->proto.tcp.last_wscale;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
ct->proto.tcp.last_flags;
memset(&ct->proto.tcp.seen[dir], 0,
sizeof(struct ip_ct_tcp_state));
break;
}
ct->proto.tcp.last_index = index;
ct->proto.tcp.last_dir = dir;
ct->proto.tcp.last_seq = ntohl(th->seq);
ct->proto.tcp.last_end =
segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
ct->proto.tcp.last_win = ntohs(th->window);
/* a) This is a SYN in ORIGINAL. The client and the server
* may be in sync but we are not. In that case, we annotate
* the TCP options and let the packet go through. If it is a
* valid SYN packet, the server will reply with a SYN/ACK, and
* then we'll get in sync. Otherwise, the server ignores it. */
if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
struct ip_ct_tcp_state seen = {};
ct->proto.tcp.last_flags =
ct->proto.tcp.last_wscale = 0;
tcp_options(skb, dataoff, th, &seen);
if (seen.flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
ct->proto.tcp.last_flags |=
IP_CT_TCP_FLAG_WINDOW_SCALE;
ct->proto.tcp.last_wscale = seen.td_scale;
}
if (seen.flags & IP_CT_TCP_FLAG_SACK_PERM) {
ct->proto.tcp.last_flags |=
IP_CT_TCP_FLAG_SACK_PERM;
}
}
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: invalid packet ignored ");
return NF_ACCEPT;
case TCP_CONNTRACK_MAX:
/* Invalid packet */
pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
dir, get_conntrack_index(th), old_state);
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: invalid state ");
return -NF_ACCEPT;
case TCP_CONNTRACK_CLOSE:
if (index == TCP_RST_SET
&& (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
&& before(ntohl(th->seq), ct->proto.tcp.seen[!dir].td_maxack)) {
/* Invalid RST */
spin_unlock_bh(&ct->lock);
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: invalid RST ");
return -NF_ACCEPT;
}
if (index == TCP_RST_SET
&& ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
&& ct->proto.tcp.last_index == TCP_SYN_SET)
|| (!test_bit(IPS_ASSURED_BIT, &ct->status)
&& ct->proto.tcp.last_index == TCP_ACK_SET))
&& ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
/* RST sent to invalid SYN or ACK we had let through
* at a) and c) above:
*
* a) SYN was in window then
* c) we hold a half-open connection.
*
* Delete our connection entry.
* We skip window checking, because packet might ACK
* segments we ignored. */
goto in_window;
}
/* Just fall through */
default:
/* Keep compilers happy. */
break;
}
if (!tcp_in_window(ct, &ct->proto.tcp, dir, index,
skb, dataoff, th, pf)) {
spin_unlock_bh(&ct->lock);
return -NF_ACCEPT;
}
in_window:
/* From now on we have got in-window packets */
ct->proto.tcp.last_index = index;
ct->proto.tcp.last_dir = dir;
pr_debug("tcp_conntracks: ");
nf_ct_dump_tuple(tuple);
pr_debug("syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
(th->syn ? 1 : 0), (th->ack ? 1 : 0),
(th->fin ? 1 : 0), (th->rst ? 1 : 0),
old_state, new_state);
ct->proto.tcp.state = new_state;
if (old_state != new_state
&& new_state == TCP_CONNTRACK_FIN_WAIT)
ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
if (ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans &&
tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans)
timeout = nf_ct_tcp_timeout_max_retrans;
else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
tcp_timeouts[new_state] > nf_ct_tcp_timeout_unacknowledged)
timeout = nf_ct_tcp_timeout_unacknowledged;
else
timeout = tcp_timeouts[new_state];
spin_unlock_bh(&ct->lock);
if (new_state != old_state)
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
if (!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
/* If only reply is a RST, we can consider ourselves not to
have an established connection: this is a fairly common
problem case, so we can delete the conntrack
immediately. --RR */
if (th->rst) {
nf_ct_kill_acct(ct, ctinfo, skb);
return NF_ACCEPT;
}
} else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
&& (old_state == TCP_CONNTRACK_SYN_RECV
|| old_state == TCP_CONNTRACK_ESTABLISHED)
&& new_state == TCP_CONNTRACK_ESTABLISHED) {
/* Set ASSURED if we see see valid ack in ESTABLISHED
after SYN_RECV or a valid answer for a picked up
connection. */
set_bit(IPS_ASSURED_BIT, &ct->status);
nf_conntrack_event_cache(IPCT_ASSURED, ct);
}
nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
{
enum tcp_conntrack new_state;
const struct tcphdr *th;
struct tcphdr _tcph;
const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
BUG_ON(th == NULL);
/* Don't need lock here: this conntrack not in circulation yet */
new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
/* Invalid: delete conntrack */
if (new_state >= TCP_CONNTRACK_MAX) {
pr_debug("nf_ct_tcp: invalid new deleting.\n");
return false;
}
if (new_state == TCP_CONNTRACK_SYN_SENT) {
memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
/* SYN packet */
ct->proto.tcp.seen[0].td_end =
segment_seq_plus_len(ntohl(th->seq), skb->len,
dataoff, th);
ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
if (ct->proto.tcp.seen[0].td_maxwin == 0)
ct->proto.tcp.seen[0].td_maxwin = 1;
ct->proto.tcp.seen[0].td_maxend =
ct->proto.tcp.seen[0].td_end;
tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
} else if (nf_ct_tcp_loose == 0) {
/* Don't try to pick up connections. */
return false;
} else {
memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
/*
* We are in the middle of a connection,
* its history is lost for us.
* Let's try to use the data from the packet.
*/
ct->proto.tcp.seen[0].td_end =
segment_seq_plus_len(ntohl(th->seq), skb->len,
dataoff, th);
ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
if (ct->proto.tcp.seen[0].td_maxwin == 0)
ct->proto.tcp.seen[0].td_maxwin = 1;
ct->proto.tcp.seen[0].td_maxend =
ct->proto.tcp.seen[0].td_end +
ct->proto.tcp.seen[0].td_maxwin;
/* We assume SACK and liberal window checking to handle
* window scaling */
ct->proto.tcp.seen[0].flags =
ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
IP_CT_TCP_FLAG_BE_LIBERAL;
}
/* tcp_packet will set them */
ct->proto.tcp.last_index = TCP_NONE_SET;
pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
"receiver end=%u maxend=%u maxwin=%u scale=%i\n",
sender->td_end, sender->td_maxend, sender->td_maxwin,
sender->td_scale,
receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
receiver->td_scale);
return true;
}
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct)
{
struct nlattr *nest_parms;
struct nf_ct_tcp_flags tmp = {};
spin_lock_bh(&ct->lock);
nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP | NLA_F_NESTED);
if (!nest_parms)
goto nla_put_failure;
NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state);
NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
ct->proto.tcp.seen[0].td_scale);
NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
ct->proto.tcp.seen[1].td_scale);
tmp.flags = ct->proto.tcp.seen[0].flags;
NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
sizeof(struct nf_ct_tcp_flags), &tmp);
tmp.flags = ct->proto.tcp.seen[1].flags;
NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
sizeof(struct nf_ct_tcp_flags), &tmp);
spin_unlock_bh(&ct->lock);
nla_nest_end(skb, nest_parms);
return 0;
nla_put_failure:
spin_unlock_bh(&ct->lock);
return -1;
}
static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
[CTA_PROTOINFO_TCP_STATE] = { .type = NLA_U8 },
[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] = { .type = NLA_U8 },
[CTA_PROTOINFO_TCP_WSCALE_REPLY] = { .type = NLA_U8 },
[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL] = { .len = sizeof(struct nf_ct_tcp_flags) },
[CTA_PROTOINFO_TCP_FLAGS_REPLY] = { .len = sizeof(struct nf_ct_tcp_flags) },
};
static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
{
struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1];
int err;
/* updates could not contain anything about the private
* protocol info, in that case skip the parsing */
if (!pattr)
return 0;
err = nla_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, pattr, tcp_nla_policy);
if (err < 0)
return err;
if (tb[CTA_PROTOINFO_TCP_STATE] &&
nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
return -EINVAL;
spin_lock_bh(&ct->lock);
if (tb[CTA_PROTOINFO_TCP_STATE])
ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
struct nf_ct_tcp_flags *attr =
nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]);
ct->proto.tcp.seen[0].flags &= ~attr->mask;
ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask;
}
if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) {
struct nf_ct_tcp_flags *attr =
nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]);
ct->proto.tcp.seen[1].flags &= ~attr->mask;
ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask;
}
if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] &&
tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] &&
ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
ct->proto.tcp.seen[0].td_scale =
nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
ct->proto.tcp.seen[1].td_scale =
nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
}
spin_unlock_bh(&ct->lock);
return 0;
}
static int tcp_nlattr_size(void)
{
return nla_total_size(0) /* CTA_PROTOINFO_TCP */
+ nla_policy_len(tcp_nla_policy, CTA_PROTOINFO_TCP_MAX + 1);
}
static int tcp_nlattr_tuple_size(void)
{
return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
}
#endif
#ifdef CONFIG_SYSCTL
static unsigned int tcp_sysctl_table_users;
static struct ctl_table_header *tcp_sysctl_header;
static struct ctl_table tcp_sysctl_table[] = {
{
.procname = "nf_conntrack_tcp_timeout_syn_sent",
.data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_syn_recv",
.data = &tcp_timeouts[TCP_CONNTRACK_SYN_RECV],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_established",
.data = &tcp_timeouts[TCP_CONNTRACK_ESTABLISHED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_fin_wait",
.data = &tcp_timeouts[TCP_CONNTRACK_FIN_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_close_wait",
.data = &tcp_timeouts[TCP_CONNTRACK_CLOSE_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_last_ack",
.data = &tcp_timeouts[TCP_CONNTRACK_LAST_ACK],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_time_wait",
.data = &tcp_timeouts[TCP_CONNTRACK_TIME_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_close",
.data = &tcp_timeouts[TCP_CONNTRACK_CLOSE],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_max_retrans",
.data = &nf_ct_tcp_timeout_max_retrans,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_unacknowledged",
.data = &nf_ct_tcp_timeout_unacknowledged,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_loose",
.data = &nf_ct_tcp_loose,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_tcp_be_liberal",
.data = &nf_ct_tcp_be_liberal,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_tcp_max_retrans",
.data = &nf_ct_tcp_max_retrans,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
static struct ctl_table tcp_compat_sysctl_table[] = {
{
.procname = "ip_conntrack_tcp_timeout_syn_sent",
.data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_syn_sent2",
.data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT2],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_syn_recv",
.data = &tcp_timeouts[TCP_CONNTRACK_SYN_RECV],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_established",
.data = &tcp_timeouts[TCP_CONNTRACK_ESTABLISHED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_fin_wait",
.data = &tcp_timeouts[TCP_CONNTRACK_FIN_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_close_wait",
.data = &tcp_timeouts[TCP_CONNTRACK_CLOSE_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_last_ack",
.data = &tcp_timeouts[TCP_CONNTRACK_LAST_ACK],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_time_wait",
.data = &tcp_timeouts[TCP_CONNTRACK_TIME_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_close",
.data = &tcp_timeouts[TCP_CONNTRACK_CLOSE],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_max_retrans",
.data = &nf_ct_tcp_timeout_max_retrans,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_loose",
.data = &nf_ct_tcp_loose,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_tcp_be_liberal",
.data = &nf_ct_tcp_be_liberal,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_tcp_max_retrans",
.data = &nf_ct_tcp_max_retrans,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
{
.l3proto = PF_INET,
.l4proto = IPPROTO_TCP,
.name = "tcp",
.pkt_to_tuple = tcp_pkt_to_tuple,
.invert_tuple = tcp_invert_tuple,
.print_tuple = tcp_print_tuple,
.print_conntrack = tcp_print_conntrack,
.packet = tcp_packet,
.new = tcp_new,
.error = tcp_error,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.to_nlattr = tcp_to_nlattr,
.nlattr_size = tcp_nlattr_size,
.from_nlattr = nlattr_to_tcp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nlattr_tuple_size = tcp_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_SYSCTL
.ctl_table_users = &tcp_sysctl_table_users,
.ctl_table_header = &tcp_sysctl_header,
.ctl_table = tcp_sysctl_table,
#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
.ctl_compat_table = tcp_compat_sysctl_table,
#endif
#endif
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
{
.l3proto = PF_INET6,
.l4proto = IPPROTO_TCP,
.name = "tcp",
.pkt_to_tuple = tcp_pkt_to_tuple,
.invert_tuple = tcp_invert_tuple,
.print_tuple = tcp_print_tuple,
.print_conntrack = tcp_print_conntrack,
.packet = tcp_packet,
.new = tcp_new,
.error = tcp_error,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.to_nlattr = tcp_to_nlattr,
.nlattr_size = tcp_nlattr_size,
.from_nlattr = nlattr_to_tcp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nlattr_tuple_size = tcp_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_SYSCTL
.ctl_table_users = &tcp_sysctl_table_users,
.ctl_table_header = &tcp_sysctl_header,
.ctl_table = tcp_sysctl_table,
#endif
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6);
| gpl-2.0 |
manveru0/FeaCore_Phoenix_S3 | drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c | 2420 | 35779 | /**
*
* Synaptics Register Mapped Interface (RMI4) I2C Physical Layer Driver.
* Copyright (c) 2007-2010, Synaptics Incorporated
*
* Author: Js HA <js.ha@stericsson.com> for ST-Ericsson
* Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
* Copyright 2010 (c) ST-Ericsson AB
*/
/*
* This file is licensed under the GPL2 license.
*
*#############################################################################
* GPL
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*#############################################################################
*/
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/regulator/consumer.h>
#include "synaptics_i2c_rmi4.h"
/* TODO: for multiple device support will need a per-device mutex */
#define DRIVER_NAME "synaptics_rmi4_i2c"
#define MAX_ERROR_REPORT 6
#define MAX_TOUCH_MAJOR 15
#define MAX_RETRY_COUNT 5
#define STD_QUERY_LEN 21
#define PAGE_LEN 2
#define DATA_BUF_LEN 32
#define BUF_LEN 37
#define QUERY_LEN 9
#define DATA_LEN 12
#define HAS_TAP 0x01
#define HAS_PALMDETECT 0x01
#define HAS_ROTATE 0x02
#define HAS_TAPANDHOLD 0x02
#define HAS_DOUBLETAP 0x04
#define HAS_EARLYTAP 0x08
#define HAS_RELEASE 0x08
#define HAS_FLICK 0x10
#define HAS_PRESS 0x20
#define HAS_PINCH 0x40
#define MASK_16BIT 0xFFFF
#define MASK_8BIT 0xFF
#define MASK_7BIT 0x7F
#define MASK_5BIT 0x1F
#define MASK_4BIT 0x0F
#define MASK_3BIT 0x07
#define MASK_2BIT 0x03
#define TOUCHPAD_CTRL_INTR 0x8
#define PDT_START_SCAN_LOCATION (0x00E9)
#define PDT_END_SCAN_LOCATION (0x000A)
#define PDT_ENTRY_SIZE (0x0006)
#define RMI4_NUMBER_OF_MAX_FINGERS (8)
#define SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM (0x11)
#define SYNAPTICS_RMI4_DEVICE_CONTROL_FUNC_NUM (0x01)
/**
* struct synaptics_rmi4_fn_desc - contains the function descriptor information
* @query_base_addr: base address for query
* @cmd_base_addr: base address for command
* @ctrl_base_addr: base address for control
* @data_base_addr: base address for data
* @intr_src_count: count for the interrupt source
* @fn_number: function number
*
* This structure is used to gives the function descriptor information
* of the particular functionality.
*/
struct synaptics_rmi4_fn_desc {
unsigned char query_base_addr;
unsigned char cmd_base_addr;
unsigned char ctrl_base_addr;
unsigned char data_base_addr;
unsigned char intr_src_count;
unsigned char fn_number;
};
/**
* struct synaptics_rmi4_fn - contains the function information
* @fn_number: function number
* @num_of_data_sources: number of data sources
* @num_of_data_points: number of fingers touched
* @size_of_data_register_block: data register block size
* @index_to_intr_reg: index for interrupt register
* @intr_mask: interrupt mask value
* @fn_desc: variable for function descriptor structure
* @link: linked list for function descriptors
*
* This structure gives information about the number of data sources and
* the number of data registers associated with the function.
*/
struct synaptics_rmi4_fn {
unsigned char fn_number;
unsigned char num_of_data_sources;
unsigned char num_of_data_points;
unsigned char size_of_data_register_block;
unsigned char index_to_intr_reg;
unsigned char intr_mask;
struct synaptics_rmi4_fn_desc fn_desc;
struct list_head link;
};
/**
* struct synaptics_rmi4_device_info - contains the rmi4 device information
* @version_major: protocol major version number
* @version_minor: protocol minor version number
* @manufacturer_id: manufacturer identification byte
* @product_props: product properties information
* @product_info: product info array
* @date_code: device manufacture date
* @tester_id: tester id array
* @serial_number: serial number for that device
* @product_id_string: product id for the device
* @support_fn_list: linked list for device information
*
* This structure gives information about the number of data sources and
* the number of data registers associated with the function.
*/
struct synaptics_rmi4_device_info {
unsigned int version_major;
unsigned int version_minor;
unsigned char manufacturer_id;
unsigned char product_props;
unsigned char product_info[2];
unsigned char date_code[3];
unsigned short tester_id;
unsigned short serial_number;
unsigned char product_id_string[11];
struct list_head support_fn_list;
};
/**
* struct synaptics_rmi4_data - contains the rmi4 device data
* @rmi4_mod_info: structure variable for rmi4 device info
* @input_dev: pointer for input device
* @i2c_client: pointer for i2c client
* @board: constant pointer for touch platform data
* @fn_list_mutex: mutex for function list
* @rmi4_page_mutex: mutex for rmi4 page
* @current_page: variable for integer
* @number_of_interrupt_register: interrupt registers count
* @fn01_ctrl_base_addr: control base address for fn01
* @fn01_query_base_addr: query base address for fn01
* @fn01_data_base_addr: data base address for fn01
* @sensor_max_x: sensor maximum x value
* @sensor_max_y: sensor maximum y value
* @regulator: pointer to the regulator structure
* @wait: wait queue structure variable
* @touch_stopped: flag to stop the thread function
*
* This structure gives the device data information.
*/
struct synaptics_rmi4_data {
struct synaptics_rmi4_device_info rmi4_mod_info;
struct input_dev *input_dev;
struct i2c_client *i2c_client;
const struct synaptics_rmi4_platform_data *board;
struct mutex fn_list_mutex;
struct mutex rmi4_page_mutex;
int current_page;
unsigned int number_of_interrupt_register;
unsigned short fn01_ctrl_base_addr;
unsigned short fn01_query_base_addr;
unsigned short fn01_data_base_addr;
int sensor_max_x;
int sensor_max_y;
struct regulator *regulator;
wait_queue_head_t wait;
bool touch_stopped;
};
/**
* synaptics_rmi4_set_page() - sets the page
* @pdata: pointer to synaptics_rmi4_data structure
* @address: set the address of the page
*
* This function is used to set the page and returns integer.
*/
static int synaptics_rmi4_set_page(struct synaptics_rmi4_data *pdata,
unsigned int address)
{
unsigned char txbuf[PAGE_LEN];
int retval;
unsigned int page;
struct i2c_client *i2c = pdata->i2c_client;
page = ((address >> 8) & MASK_8BIT);
if (page != pdata->current_page) {
txbuf[0] = MASK_8BIT;
txbuf[1] = page;
retval = i2c_master_send(i2c, txbuf, PAGE_LEN);
if (retval != PAGE_LEN)
dev_err(&i2c->dev, "%s:failed:%d\n", __func__, retval);
else
pdata->current_page = page;
} else
retval = PAGE_LEN;
return retval;
}
/**
* synaptics_rmi4_i2c_block_read() - read the block of data
* @pdata: pointer to synaptics_rmi4_data structure
* @address: read the block of data from this offset
* @valp: pointer to a buffer containing the data to be read
* @size: number of bytes to read
*
* This function is to read the block of data and returns integer.
*/
static int synaptics_rmi4_i2c_block_read(struct synaptics_rmi4_data *pdata,
unsigned short address,
unsigned char *valp, int size)
{
int retval = 0;
int retry_count = 0;
int index;
struct i2c_client *i2c = pdata->i2c_client;
mutex_lock(&(pdata->rmi4_page_mutex));
retval = synaptics_rmi4_set_page(pdata, address);
if (retval != PAGE_LEN)
goto exit;
index = address & MASK_8BIT;
retry:
retval = i2c_smbus_read_i2c_block_data(i2c, index, size, valp);
if (retval != size) {
if (++retry_count == MAX_RETRY_COUNT)
dev_err(&i2c->dev,
"%s:address 0x%04x size %d failed:%d\n",
__func__, address, size, retval);
else {
synaptics_rmi4_set_page(pdata, address);
goto retry;
}
}
exit:
mutex_unlock(&(pdata->rmi4_page_mutex));
return retval;
}
/**
* synaptics_rmi4_i2c_byte_write() - write the single byte data
* @pdata: pointer to synaptics_rmi4_data structure
* @address: write the block of data from this offset
* @data: data to be write
*
* This function is to write the single byte data and returns integer.
*/
static int synaptics_rmi4_i2c_byte_write(struct synaptics_rmi4_data *pdata,
unsigned short address,
unsigned char data)
{
unsigned char txbuf[2];
int retval = 0;
struct i2c_client *i2c = pdata->i2c_client;
/* Can't have anyone else changing the page behind our backs */
mutex_lock(&(pdata->rmi4_page_mutex));
retval = synaptics_rmi4_set_page(pdata, address);
if (retval != PAGE_LEN)
goto exit;
txbuf[0] = address & MASK_8BIT;
txbuf[1] = data;
retval = i2c_master_send(pdata->i2c_client, txbuf, 2);
/* Add in retry on writes only in certain error return values */
if (retval != 2) {
dev_err(&i2c->dev, "%s:failed:%d\n", __func__, retval);
retval = -EIO;
} else
retval = 1;
exit:
mutex_unlock(&(pdata->rmi4_page_mutex));
return retval;
}
/**
* synpatics_rmi4_touchpad_report() - reports for the rmi4 touchpad device
* @pdata: pointer to synaptics_rmi4_data structure
* @rfi: pointer to synaptics_rmi4_fn structure
*
* This function calls to reports for the rmi4 touchpad device
*/
static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
struct synaptics_rmi4_fn *rfi)
{
/* number of touch points - fingers down in this case */
int touch_count = 0;
int finger;
int fingers_supported;
int finger_registers;
int reg;
int finger_shift;
int finger_status;
int retval;
unsigned short data_base_addr;
unsigned short data_offset;
unsigned char data_reg_blk_size;
unsigned char values[2];
unsigned char data[DATA_LEN];
int x[RMI4_NUMBER_OF_MAX_FINGERS];
int y[RMI4_NUMBER_OF_MAX_FINGERS];
int wx[RMI4_NUMBER_OF_MAX_FINGERS];
int wy[RMI4_NUMBER_OF_MAX_FINGERS];
struct i2c_client *client = pdata->i2c_client;
/* get 2D sensor finger data */
/*
* First get the finger status field - the size of the finger status
* field is determined by the number of finger supporte - 2 bits per
* finger, so the number of registers to read is:
* registerCount = ceil(numberOfFingers/4).
* Read the required number of registers and check each 2 bit field to
* determine if a finger is down:
* 00 = finger not present,
* 01 = finger present and data accurate,
* 10 = finger present but data may not be accurate,
* 11 = reserved for product use.
*/
fingers_supported = rfi->num_of_data_points;
finger_registers = (fingers_supported + 3)/4;
data_base_addr = rfi->fn_desc.data_base_addr;
retval = synaptics_rmi4_i2c_block_read(pdata, data_base_addr, values,
finger_registers);
if (retval != finger_registers) {
dev_err(&client->dev, "%s:read status registers failed\n",
__func__);
return 0;
}
/*
* For each finger present, read the proper number of registers
* to get absolute data.
*/
data_reg_blk_size = rfi->size_of_data_register_block;
for (finger = 0; finger < fingers_supported; finger++) {
/* determine which data byte the finger status is in */
reg = finger/4;
/* bit shift to get finger's status */
finger_shift = (finger % 4) * 2;
finger_status = (values[reg] >> finger_shift) & 3;
/*
* if finger status indicates a finger is present then
* read the finger data and report it
*/
if (finger_status == 1 || finger_status == 2) {
/* Read the finger data */
data_offset = data_base_addr +
((finger * data_reg_blk_size) +
finger_registers);
retval = synaptics_rmi4_i2c_block_read(pdata,
data_offset, data,
data_reg_blk_size);
if (retval != data_reg_blk_size) {
printk(KERN_ERR "%s:read data failed\n",
__func__);
return 0;
} else {
x[touch_count] =
(data[0] << 4) | (data[2] & MASK_4BIT);
y[touch_count] =
(data[1] << 4) |
((data[2] >> 4) & MASK_4BIT);
wy[touch_count] =
(data[3] >> 4) & MASK_4BIT;
wx[touch_count] =
(data[3] & MASK_4BIT);
if (pdata->board->x_flip)
x[touch_count] =
pdata->sensor_max_x -
x[touch_count];
if (pdata->board->y_flip)
y[touch_count] =
pdata->sensor_max_y -
y[touch_count];
}
/* number of active touch points */
touch_count++;
}
}
/* report to input subsystem */
if (touch_count) {
for (finger = 0; finger < touch_count; finger++) {
input_report_abs(pdata->input_dev, ABS_MT_TOUCH_MAJOR,
max(wx[finger] , wy[finger]));
input_report_abs(pdata->input_dev, ABS_MT_POSITION_X,
x[finger]);
input_report_abs(pdata->input_dev, ABS_MT_POSITION_Y,
y[finger]);
input_mt_sync(pdata->input_dev);
}
} else
input_mt_sync(pdata->input_dev);
/* sync after groups of events */
input_sync(pdata->input_dev);
/* return the number of touch points */
return touch_count;
}
/**
* synaptics_rmi4_report_device() - reports the rmi4 device
* @pdata: pointer to synaptics_rmi4_data structure
* @rfi: pointer to synaptics_rmi4_fn
*
* This function is used to call the report function of the rmi4 device.
*/
static int synaptics_rmi4_report_device(struct synaptics_rmi4_data *pdata,
struct synaptics_rmi4_fn *rfi)
{
int touch = 0;
struct i2c_client *client = pdata->i2c_client;
static int num_error_reports;
if (rfi->fn_number != SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM) {
num_error_reports++;
if (num_error_reports < MAX_ERROR_REPORT)
dev_err(&client->dev, "%s:report not supported\n",
__func__);
} else
touch = synpatics_rmi4_touchpad_report(pdata, rfi);
return touch;
}
/**
* synaptics_rmi4_sensor_report() - reports to input subsystem
* @pdata: pointer to synaptics_rmi4_data structure
*
* This function is used to reads in all data sources and reports
* them to the input subsystem.
*/
static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *pdata)
{
unsigned char intr_status[4];
/* number of touch points - fingers or buttons */
int touch = 0;
unsigned int retval;
struct synaptics_rmi4_fn *rfi;
struct synaptics_rmi4_device_info *rmi;
struct i2c_client *client = pdata->i2c_client;
/*
* Get the interrupt status from the function $01
* control register+1 to find which source(s) were interrupting
* so we can read the data from the source(s) (2D sensor, buttons..)
*/
retval = synaptics_rmi4_i2c_block_read(pdata,
pdata->fn01_data_base_addr + 1,
intr_status,
pdata->number_of_interrupt_register);
if (retval != pdata->number_of_interrupt_register) {
dev_err(&client->dev,
"could not read interrupt status registers\n");
return 0;
}
/*
* check each function that has data sources and if the interrupt for
* that triggered then call that RMI4 functions report() function to
* gather data and report it to the input subsystem
*/
rmi = &(pdata->rmi4_mod_info);
list_for_each_entry(rfi, &rmi->support_fn_list, link) {
if (rfi->num_of_data_sources) {
if (intr_status[rfi->index_to_intr_reg] &
rfi->intr_mask)
touch = synaptics_rmi4_report_device(pdata,
rfi);
}
}
/* return the number of touch points */
return touch;
}
/**
* synaptics_rmi4_irq() - thread function for rmi4 attention line
* @irq: irq value
* @data: void pointer
*
* This function is interrupt thread function. It just notifies the
* application layer that attention is required.
*/
static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
{
struct synaptics_rmi4_data *pdata = data;
int touch_count;
do {
touch_count = synaptics_rmi4_sensor_report(pdata);
if (touch_count)
wait_event_timeout(pdata->wait, pdata->touch_stopped,
msecs_to_jiffies(1));
else
break;
} while (!pdata->touch_stopped);
return IRQ_HANDLED;
}
/**
* synpatics_rmi4_touchpad_detect() - detects the rmi4 touchpad device
* @pdata: pointer to synaptics_rmi4_data structure
* @rfi: pointer to synaptics_rmi4_fn structure
* @fd: pointer to synaptics_rmi4_fn_desc structure
* @interruptcount: count the number of interrupts
*
* This function calls to detects the rmi4 touchpad device
*/
static int synpatics_rmi4_touchpad_detect(struct synaptics_rmi4_data *pdata,
struct synaptics_rmi4_fn *rfi,
struct synaptics_rmi4_fn_desc *fd,
unsigned int interruptcount)
{
unsigned char queries[QUERY_LEN];
unsigned short intr_offset;
unsigned char abs_data_size;
unsigned char abs_data_blk_size;
unsigned char egr_0, egr_1;
unsigned int all_data_blk_size;
int has_pinch, has_flick, has_tap;
int has_tapandhold, has_doubletap;
int has_earlytap, has_press;
int has_palmdetect, has_rotate;
int has_rel;
int i;
int retval;
struct i2c_client *client = pdata->i2c_client;
rfi->fn_desc.query_base_addr = fd->query_base_addr;
rfi->fn_desc.data_base_addr = fd->data_base_addr;
rfi->fn_desc.intr_src_count = fd->intr_src_count;
rfi->fn_desc.fn_number = fd->fn_number;
rfi->fn_number = fd->fn_number;
rfi->num_of_data_sources = fd->intr_src_count;
rfi->fn_desc.ctrl_base_addr = fd->ctrl_base_addr;
rfi->fn_desc.cmd_base_addr = fd->cmd_base_addr;
/*
* need to get number of fingers supported, data size, etc.
* to be used when getting data since the number of registers to
* read depends on the number of fingers supported and data size.
*/
retval = synaptics_rmi4_i2c_block_read(pdata, fd->query_base_addr,
queries,
sizeof(queries));
if (retval != sizeof(queries)) {
dev_err(&client->dev, "%s:read function query registers\n",
__func__);
return retval;
}
/*
* 2D data sources have only 3 bits for the number of fingers
* supported - so the encoding is a bit weird.
*/
if ((queries[1] & MASK_3BIT) <= 4)
/* add 1 since zero based */
rfi->num_of_data_points = (queries[1] & MASK_3BIT) + 1;
else {
/*
* a value of 5 is up to 10 fingers - 6 and 7 are reserved
* (shouldn't get these i int retval;n a normal 2D source).
*/
if ((queries[1] & MASK_3BIT) == 5)
rfi->num_of_data_points = 10;
}
/* Need to get interrupt info for handling interrupts */
rfi->index_to_intr_reg = (interruptcount + 7)/8;
if (rfi->index_to_intr_reg != 0)
rfi->index_to_intr_reg -= 1;
/*
* loop through interrupts for each source in fn $11
* and or in a bit to the interrupt mask for each.
*/
intr_offset = interruptcount % 8;
rfi->intr_mask = 0;
for (i = intr_offset;
i < ((fd->intr_src_count & MASK_3BIT) + intr_offset); i++)
rfi->intr_mask |= 1 << i;
/* Size of just the absolute data for one finger */
abs_data_size = queries[5] & MASK_2BIT;
/* One each for X and Y, one for LSB for X & Y, one for W, one for Z */
abs_data_blk_size = 3 + (2 * (abs_data_size == 0 ? 1 : 0));
rfi->size_of_data_register_block = abs_data_blk_size;
/*
* need to determine the size of data to read - this depends on
* conditions such as whether Relative data is reported and if Gesture
* data is reported.
*/
egr_0 = queries[7];
egr_1 = queries[8];
/*
* Get info about what EGR data is supported, whether it has
* Relative data supported, etc.
*/
has_pinch = egr_0 & HAS_PINCH;
has_flick = egr_0 & HAS_FLICK;
has_tap = egr_0 & HAS_TAP;
has_earlytap = egr_0 & HAS_EARLYTAP;
has_press = egr_0 & HAS_PRESS;
has_rotate = egr_1 & HAS_ROTATE;
has_rel = queries[1] & HAS_RELEASE;
has_tapandhold = egr_0 & HAS_TAPANDHOLD;
has_doubletap = egr_0 & HAS_DOUBLETAP;
has_palmdetect = egr_1 & HAS_PALMDETECT;
/*
* Size of all data including finger status, absolute data for each
* finger, relative data and EGR data
*/
all_data_blk_size =
/* finger status, four fingers per register */
((rfi->num_of_data_points + 3) / 4) +
/* absolute data, per finger times number of fingers */
(abs_data_blk_size * rfi->num_of_data_points) +
/*
* two relative registers (if relative is being reported)
*/
2 * has_rel +
/*
* F11_2D_data8 is only present if the egr_0
* register is non-zero.
*/
!!(egr_0) +
/*
* F11_2D_data9 is only present if either egr_0 or
* egr_1 registers are non-zero.
*/
(egr_0 || egr_1) +
/*
* F11_2D_data10 is only present if EGR_PINCH or EGR_FLICK of
* egr_0 reports as 1.
*/
!!(has_pinch | has_flick) +
/*
* F11_2D_data11 and F11_2D_data12 are only present if
* EGR_FLICK of egr_0 reports as 1.
*/
2 * !!(has_flick);
return retval;
}
/**
* synpatics_rmi4_touchpad_config() - confiures the rmi4 touchpad device
* @pdata: pointer to synaptics_rmi4_data structure
* @rfi: pointer to synaptics_rmi4_fn structure
*
* This function calls to confiures the rmi4 touchpad device
*/
int synpatics_rmi4_touchpad_config(struct synaptics_rmi4_data *pdata,
struct synaptics_rmi4_fn *rfi)
{
/*
* For the data source - print info and do any
* source specific configuration.
*/
unsigned char data[BUF_LEN];
int retval = 0;
struct i2c_client *client = pdata->i2c_client;
/* Get and print some info about the data source... */
/* To Query 2D devices we need to read from the address obtained
* from the function descriptor stored in the RMI function info.
*/
retval = synaptics_rmi4_i2c_block_read(pdata,
rfi->fn_desc.query_base_addr,
data, QUERY_LEN);
if (retval != QUERY_LEN)
dev_err(&client->dev, "%s:read query registers failed\n",
__func__);
else {
retval = synaptics_rmi4_i2c_block_read(pdata,
rfi->fn_desc.ctrl_base_addr,
data, DATA_BUF_LEN);
if (retval != DATA_BUF_LEN) {
dev_err(&client->dev,
"%s:read control registers failed\n",
__func__);
return retval;
}
/* Store these for use later*/
pdata->sensor_max_x = ((data[6] & MASK_8BIT) << 0) |
((data[7] & MASK_4BIT) << 8);
pdata->sensor_max_y = ((data[8] & MASK_5BIT) << 0) |
((data[9] & MASK_4BIT) << 8);
}
return retval;
}
/**
* synaptics_rmi4_i2c_query_device() - query the rmi4 device
* @pdata: pointer to synaptics_rmi4_data structure
*
* This function is used to query the rmi4 device.
*/
static int synaptics_rmi4_i2c_query_device(struct synaptics_rmi4_data *pdata)
{
int i;
int retval;
unsigned char std_queries[STD_QUERY_LEN];
unsigned char intr_count = 0;
int data_sources = 0;
unsigned int ctrl_offset;
struct synaptics_rmi4_fn *rfi;
struct synaptics_rmi4_fn_desc rmi_fd;
struct synaptics_rmi4_device_info *rmi;
struct i2c_client *client = pdata->i2c_client;
/*
* init the physical drivers RMI module
* info list of functions
*/
INIT_LIST_HEAD(&pdata->rmi4_mod_info.support_fn_list);
/*
* Read the Page Descriptor Table to determine what functions
* are present
*/
for (i = PDT_START_SCAN_LOCATION; i > PDT_END_SCAN_LOCATION;
i -= PDT_ENTRY_SIZE) {
retval = synaptics_rmi4_i2c_block_read(pdata, i,
(unsigned char *)&rmi_fd,
sizeof(rmi_fd));
if (retval != sizeof(rmi_fd)) {
/* failed to read next PDT entry */
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
rfi = NULL;
if (rmi_fd.fn_number) {
switch (rmi_fd.fn_number & MASK_8BIT) {
case SYNAPTICS_RMI4_DEVICE_CONTROL_FUNC_NUM:
pdata->fn01_query_base_addr =
rmi_fd.query_base_addr;
pdata->fn01_ctrl_base_addr =
rmi_fd.ctrl_base_addr;
pdata->fn01_data_base_addr =
rmi_fd.data_base_addr;
break;
case SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM:
if (rmi_fd.intr_src_count) {
rfi = kmalloc(sizeof(*rfi),
GFP_KERNEL);
if (!rfi) {
dev_err(&client->dev,
"%s:kmalloc failed\n",
__func__);
return -ENOMEM;
}
retval = synpatics_rmi4_touchpad_detect
(pdata, rfi,
&rmi_fd,
intr_count);
if (retval < 0) {
kfree(rfi);
return retval;
}
}
break;
}
/* interrupt count for next iteration */
intr_count += (rmi_fd.intr_src_count & MASK_3BIT);
/*
* We only want to add functions to the list
* that have data associated with them.
*/
if (rfi && rmi_fd.intr_src_count) {
/* link this function info to the RMI module */
mutex_lock(&(pdata->fn_list_mutex));
list_add_tail(&rfi->link,
&pdata->rmi4_mod_info.support_fn_list);
mutex_unlock(&(pdata->fn_list_mutex));
}
} else {
/*
* A zero in the function number
* signals the end of the PDT
*/
dev_dbg(&client->dev,
"%s:end of PDT\n", __func__);
break;
}
}
/*
* calculate the interrupt register count - used in the
* ISR to read the correct number of interrupt registers
*/
pdata->number_of_interrupt_register = (intr_count + 7) / 8;
/*
* Function $01 will be used to query the product properties,
* and product ID so we had to read the PDT above first to get
* the Fn $01 query address and prior to filling in the product
* info. NOTE: Even an unflashed device will still have FN $01.
*/
/* Load up the standard queries and get the RMI4 module info */
retval = synaptics_rmi4_i2c_block_read(pdata,
pdata->fn01_query_base_addr,
std_queries,
sizeof(std_queries));
if (retval != sizeof(std_queries)) {
dev_err(&client->dev, "%s:Failed reading queries\n",
__func__);
return -EIO;
}
/* Currently supported RMI version is 4.0 */
pdata->rmi4_mod_info.version_major = 4;
pdata->rmi4_mod_info.version_minor = 0;
/*
* get manufacturer id, product_props, product info,
* date code, tester id, serial num and product id (name)
*/
pdata->rmi4_mod_info.manufacturer_id = std_queries[0];
pdata->rmi4_mod_info.product_props = std_queries[1];
pdata->rmi4_mod_info.product_info[0] = std_queries[2];
pdata->rmi4_mod_info.product_info[1] = std_queries[3];
/* year - 2001-2032 */
pdata->rmi4_mod_info.date_code[0] = std_queries[4] & MASK_5BIT;
/* month - 1-12 */
pdata->rmi4_mod_info.date_code[1] = std_queries[5] & MASK_4BIT;
/* day - 1-31 */
pdata->rmi4_mod_info.date_code[2] = std_queries[6] & MASK_5BIT;
pdata->rmi4_mod_info.tester_id = ((std_queries[7] & MASK_7BIT) << 8) |
(std_queries[8] & MASK_7BIT);
pdata->rmi4_mod_info.serial_number =
((std_queries[9] & MASK_7BIT) << 8) |
(std_queries[10] & MASK_7BIT);
memcpy(pdata->rmi4_mod_info.product_id_string, &std_queries[11], 10);
/* Check if this is a Synaptics device - report if not. */
if (pdata->rmi4_mod_info.manufacturer_id != 1)
dev_err(&client->dev, "%s: non-Synaptics mfg id:%d\n",
__func__, pdata->rmi4_mod_info.manufacturer_id);
list_for_each_entry(rfi, &pdata->rmi4_mod_info.support_fn_list, link)
data_sources += rfi->num_of_data_sources;
if (data_sources) {
rmi = &(pdata->rmi4_mod_info);
list_for_each_entry(rfi, &rmi->support_fn_list, link) {
if (rfi->num_of_data_sources) {
if (rfi->fn_number ==
SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM) {
retval = synpatics_rmi4_touchpad_config
(pdata, rfi);
if (retval < 0)
return retval;
} else
dev_err(&client->dev,
"%s:fn_number not supported\n",
__func__);
/*
* Turn on interrupts for this
* function's data sources.
*/
ctrl_offset = pdata->fn01_ctrl_base_addr + 1 +
rfi->index_to_intr_reg;
retval = synaptics_rmi4_i2c_byte_write(pdata,
ctrl_offset,
rfi->intr_mask);
if (retval < 0)
return retval;
}
}
}
return 0;
}
/**
* synaptics_rmi4_probe() - Initialze the i2c-client touchscreen driver
* @i2c: i2c client structure pointer
* @id:i2c device id pointer
*
* This function will allocate and initialize the instance
* data and request the irq and set the instance data as the clients
* platform data then register the physical driver which will do a scan of
* the rmi4 Physical Device Table and enumerate any rmi4 functions that
* have data sources associated with them.
*/
static int __devinit synaptics_rmi4_probe
(struct i2c_client *client, const struct i2c_device_id *dev_id)
{
int retval;
unsigned char intr_status[4];
struct synaptics_rmi4_data *rmi4_data;
const struct synaptics_rmi4_platform_data *platformdata =
client->dev.platform_data;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&client->dev, "i2c smbus byte data not supported\n");
return -EIO;
}
if (!platformdata) {
dev_err(&client->dev, "%s: no platform data\n", __func__);
return -EINVAL;
}
/* Allocate and initialize the instance data for this client */
rmi4_data = kzalloc(sizeof(struct synaptics_rmi4_data) * 2,
GFP_KERNEL);
if (!rmi4_data) {
dev_err(&client->dev, "%s: no memory allocated\n", __func__);
return -ENOMEM;
}
rmi4_data->input_dev = input_allocate_device();
if (rmi4_data->input_dev == NULL) {
dev_err(&client->dev, "%s:input device alloc failed\n",
__func__);
retval = -ENOMEM;
goto err_input;
}
if (platformdata->regulator_en) {
rmi4_data->regulator = regulator_get(&client->dev, "vdd");
if (IS_ERR(rmi4_data->regulator)) {
dev_err(&client->dev, "%s:get regulator failed\n",
__func__);
retval = PTR_ERR(rmi4_data->regulator);
goto err_regulator;
}
regulator_enable(rmi4_data->regulator);
}
init_waitqueue_head(&rmi4_data->wait);
/*
* Copy i2c_client pointer into RTID's i2c_client pointer for
* later use in rmi4_read, rmi4_write, etc.
*/
rmi4_data->i2c_client = client;
/* So we set the page correctly the first time */
rmi4_data->current_page = MASK_16BIT;
rmi4_data->board = platformdata;
rmi4_data->touch_stopped = false;
/* init the mutexes for maintain the lists */
mutex_init(&(rmi4_data->fn_list_mutex));
mutex_init(&(rmi4_data->rmi4_page_mutex));
/*
* Register physical driver - this will call the detect function that
* will then scan the device and determine the supported
* rmi4 functions.
*/
retval = synaptics_rmi4_i2c_query_device(rmi4_data);
if (retval) {
dev_err(&client->dev, "%s: rmi4 query device failed\n",
__func__);
goto err_query_dev;
}
/* Store the instance data in the i2c_client */
i2c_set_clientdata(client, rmi4_data);
/*initialize the input device parameters */
rmi4_data->input_dev->name = DRIVER_NAME;
rmi4_data->input_dev->phys = "Synaptics_Clearpad";
rmi4_data->input_dev->id.bustype = BUS_I2C;
rmi4_data->input_dev->dev.parent = &client->dev;
input_set_drvdata(rmi4_data->input_dev, rmi4_data);
/* Initialize the function handlers for rmi4 */
set_bit(EV_SYN, rmi4_data->input_dev->evbit);
set_bit(EV_KEY, rmi4_data->input_dev->evbit);
set_bit(EV_ABS, rmi4_data->input_dev->evbit);
input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_X, 0,
rmi4_data->sensor_max_x, 0, 0);
input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_Y, 0,
rmi4_data->sensor_max_y, 0, 0);
input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0,
MAX_TOUCH_MAJOR, 0, 0);
/* Clear interrupts */
synaptics_rmi4_i2c_block_read(rmi4_data,
rmi4_data->fn01_data_base_addr + 1, intr_status,
rmi4_data->number_of_interrupt_register);
retval = request_threaded_irq(platformdata->irq_number, NULL,
synaptics_rmi4_irq,
platformdata->irq_type,
DRIVER_NAME, rmi4_data);
if (retval) {
dev_err(&client->dev, "%s:Unable to get attn irq %d\n",
__func__, platformdata->irq_number);
goto err_query_dev;
}
retval = input_register_device(rmi4_data->input_dev);
if (retval) {
dev_err(&client->dev, "%s:input register failed\n", __func__);
goto err_free_irq;
}
return retval;
err_free_irq:
free_irq(platformdata->irq_number, rmi4_data);
err_query_dev:
if (platformdata->regulator_en) {
regulator_disable(rmi4_data->regulator);
regulator_put(rmi4_data->regulator);
}
err_regulator:
input_free_device(rmi4_data->input_dev);
rmi4_data->input_dev = NULL;
err_input:
kfree(rmi4_data);
return retval;
}
/**
* synaptics_rmi4_remove() - Removes the i2c-client touchscreen driver
* @client: i2c client structure pointer
*
* This function uses to remove the i2c-client
* touchscreen driver and returns integer.
*/
static int __devexit synaptics_rmi4_remove(struct i2c_client *client)
{
struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client);
const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
rmi4_data->touch_stopped = true;
wake_up(&rmi4_data->wait);
free_irq(pdata->irq_number, rmi4_data);
input_unregister_device(rmi4_data->input_dev);
if (pdata->regulator_en) {
regulator_disable(rmi4_data->regulator);
regulator_put(rmi4_data->regulator);
}
kfree(rmi4_data);
return 0;
}
#ifdef CONFIG_PM
/**
* synaptics_rmi4_suspend() - suspend the touch screen controller
* @dev: pointer to device structure
*
* This function is used to suspend the
* touch panel controller and returns integer
*/
static int synaptics_rmi4_suspend(struct device *dev)
{
/* Touch sleep mode */
int retval;
unsigned char intr_status;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
rmi4_data->touch_stopped = true;
disable_irq(pdata->irq_number);
retval = synaptics_rmi4_i2c_block_read(rmi4_data,
rmi4_data->fn01_data_base_addr + 1,
&intr_status,
rmi4_data->number_of_interrupt_register);
if (retval < 0)
return retval;
retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
rmi4_data->fn01_ctrl_base_addr + 1,
(intr_status & ~TOUCHPAD_CTRL_INTR));
if (retval < 0)
return retval;
if (pdata->regulator_en)
regulator_disable(rmi4_data->regulator);
return 0;
}
/**
* synaptics_rmi4_resume() - resume the touch screen controller
* @dev: pointer to device structure
*
* This function is used to resume the touch panel
* controller and returns integer.
*/
static int synaptics_rmi4_resume(struct device *dev)
{
int retval;
unsigned char intr_status;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
if (pdata->regulator_en)
regulator_enable(rmi4_data->regulator);
enable_irq(pdata->irq_number);
rmi4_data->touch_stopped = false;
retval = synaptics_rmi4_i2c_block_read(rmi4_data,
rmi4_data->fn01_data_base_addr + 1,
&intr_status,
rmi4_data->number_of_interrupt_register);
if (retval < 0)
return retval;
retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
rmi4_data->fn01_ctrl_base_addr + 1,
(intr_status | TOUCHPAD_CTRL_INTR));
if (retval < 0)
return retval;
return 0;
}
static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = {
.suspend = synaptics_rmi4_suspend,
.resume = synaptics_rmi4_resume,
};
#endif
static const struct i2c_device_id synaptics_rmi4_id_table[] = {
{ DRIVER_NAME, 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
static struct i2c_driver synaptics_rmi4_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &synaptics_rmi4_dev_pm_ops,
#endif
},
.probe = synaptics_rmi4_probe,
.remove = __devexit_p(synaptics_rmi4_remove),
.id_table = synaptics_rmi4_id_table,
};
/**
* synaptics_rmi4_init() - Initialize the touchscreen driver
*
* This function uses to initializes the synaptics
* touchscreen driver and returns integer.
*/
static int __init synaptics_rmi4_init(void)
{
return i2c_add_driver(&synaptics_rmi4_driver);
}
/**
* synaptics_rmi4_exit() - De-initialize the touchscreen driver
*
* This function uses to de-initialize the synaptics
* touchscreen driver and returns none.
*/
static void __exit synaptics_rmi4_exit(void)
{
i2c_del_driver(&synaptics_rmi4_driver);
}
module_init(synaptics_rmi4_init);
module_exit(synaptics_rmi4_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("naveen.gaddipati@stericsson.com, js.ha@stericsson.com");
MODULE_DESCRIPTION("synaptics rmi4 i2c touch Driver");
MODULE_ALIAS("i2c:synaptics_rmi4_ts");
| gpl-2.0 |
sloanyang/android_kernel_huawei_mediapad10fhd | net/netfilter/ipset/ip_set_bitmap_ip.c | 2420 | 13941 | /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
* Patrick Schaaf <bof@bof.de>
* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/* Kernel module implementing an IP set type: the bitmap:ip type */
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/netlink.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <net/netlink.h>
#include <net/tcp.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_bitmap.h>
#define IP_SET_BITMAP_TIMEOUT
#include <linux/netfilter/ipset/ip_set_timeout.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_DESCRIPTION("bitmap:ip type of IP sets");
MODULE_ALIAS("ip_set_bitmap:ip");
/* Type structure */
struct bitmap_ip {
void *members; /* the set members */
u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
u32 hosts; /* number of hosts in a subnet */
size_t memsize; /* members size */
u8 netmask; /* subnet netmask */
u32 timeout; /* timeout parameter */
struct timer_list gc; /* garbage collection */
};
/* Base variant */
static inline u32
ip_to_id(const struct bitmap_ip *m, u32 ip)
{
return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip)/m->hosts;
}
static int
bitmap_ip_test(struct ip_set *set, void *value, u32 timeout)
{
const struct bitmap_ip *map = set->data;
u16 id = *(u16 *)value;
return !!test_bit(id, map->members);
}
static int
bitmap_ip_add(struct ip_set *set, void *value, u32 timeout)
{
struct bitmap_ip *map = set->data;
u16 id = *(u16 *)value;
if (test_and_set_bit(id, map->members))
return -IPSET_ERR_EXIST;
return 0;
}
static int
bitmap_ip_del(struct ip_set *set, void *value, u32 timeout)
{
struct bitmap_ip *map = set->data;
u16 id = *(u16 *)value;
if (!test_and_clear_bit(id, map->members))
return -IPSET_ERR_EXIST;
return 0;
}
static int
bitmap_ip_list(const struct ip_set *set,
struct sk_buff *skb, struct netlink_callback *cb)
{
const struct bitmap_ip *map = set->data;
struct nlattr *atd, *nested;
u32 id, first = cb->args[2];
atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!atd)
return -EMSGSIZE;
for (; cb->args[2] < map->elements; cb->args[2]++) {
id = cb->args[2];
if (!test_bit(id, map->members))
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
if (id == first) {
nla_nest_cancel(skb, atd);
return -EMSGSIZE;
} else
goto nla_put_failure;
}
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
htonl(map->first_ip + id * map->hosts));
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, atd);
/* Set listing finished */
cb->args[2] = 0;
return 0;
nla_put_failure:
nla_nest_cancel(skb, nested);
ipset_nest_end(skb, atd);
if (unlikely(id == first)) {
cb->args[2] = 0;
return -EMSGSIZE;
}
return 0;
}
/* Timeout variant */
static int
bitmap_ip_ttest(struct ip_set *set, void *value, u32 timeout)
{
const struct bitmap_ip *map = set->data;
const unsigned long *members = map->members;
u16 id = *(u16 *)value;
return ip_set_timeout_test(members[id]);
}
static int
bitmap_ip_tadd(struct ip_set *set, void *value, u32 timeout)
{
struct bitmap_ip *map = set->data;
unsigned long *members = map->members;
u16 id = *(u16 *)value;
if (ip_set_timeout_test(members[id]))
return -IPSET_ERR_EXIST;
members[id] = ip_set_timeout_set(timeout);
return 0;
}
static int
bitmap_ip_tdel(struct ip_set *set, void *value, u32 timeout)
{
struct bitmap_ip *map = set->data;
unsigned long *members = map->members;
u16 id = *(u16 *)value;
int ret = -IPSET_ERR_EXIST;
if (ip_set_timeout_test(members[id]))
ret = 0;
members[id] = IPSET_ELEM_UNSET;
return ret;
}
static int
bitmap_ip_tlist(const struct ip_set *set,
struct sk_buff *skb, struct netlink_callback *cb)
{
const struct bitmap_ip *map = set->data;
struct nlattr *adt, *nested;
u32 id, first = cb->args[2];
const unsigned long *members = map->members;
adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!adt)
return -EMSGSIZE;
for (; cb->args[2] < map->elements; cb->args[2]++) {
id = cb->args[2];
if (!ip_set_timeout_test(members[id]))
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
if (id == first) {
nla_nest_cancel(skb, adt);
return -EMSGSIZE;
} else
goto nla_put_failure;
}
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
htonl(map->first_ip + id * map->hosts));
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(members[id])));
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, adt);
/* Set listing finished */
cb->args[2] = 0;
return 0;
nla_put_failure:
nla_nest_cancel(skb, nested);
ipset_nest_end(skb, adt);
if (unlikely(id == first)) {
cb->args[2] = 0;
return -EMSGSIZE;
}
return 0;
}
static int
bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
{
struct bitmap_ip *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
u32 ip;
ip = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
if (ip < map->first_ip || ip > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
ip = ip_to_id(map, ip);
return adtfn(set, &ip, map->timeout);
}
static int
bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags)
{
struct bitmap_ip *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
u32 timeout = map->timeout;
u32 ip, ip_to, id;
int ret = 0;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
if (ret)
return ret;
if (ip < map->first_ip || ip > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!with_timeout(map->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (adt == IPSET_TEST) {
id = ip_to_id(map, ip);
return adtfn(set, &id, timeout);
}
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
if (ip > ip_to) {
swap(ip, ip_to);
if (ip < map->first_ip)
return -IPSET_ERR_BITMAP_RANGE;
}
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (cidr > 32)
return -IPSET_ERR_INVALID_CIDR;
ip &= ip_set_hostmask(cidr);
ip_to = ip | ~ip_set_hostmask(cidr);
} else
ip_to = ip;
if (ip_to > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
for (; !before(ip_to, ip); ip += map->hosts) {
id = ip_to_id(map, ip);
ret = adtfn(set, &id, timeout);
if (ret && !ip_set_eexist(ret, flags))
return ret;
else
ret = 0;
}
return ret;
}
static void
bitmap_ip_destroy(struct ip_set *set)
{
struct bitmap_ip *map = set->data;
if (with_timeout(map->timeout))
del_timer_sync(&map->gc);
ip_set_free(map->members);
kfree(map);
set->data = NULL;
}
static void
bitmap_ip_flush(struct ip_set *set)
{
struct bitmap_ip *map = set->data;
memset(map->members, 0, map->memsize);
}
static int
bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
{
const struct bitmap_ip *map = set->data;
struct nlattr *nested;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
if (map->netmask != 32)
NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->memsize));
if (with_timeout(map->timeout))
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
ipset_nest_end(skb, nested);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static bool
bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
{
const struct bitmap_ip *x = a->data;
const struct bitmap_ip *y = b->data;
return x->first_ip == y->first_ip &&
x->last_ip == y->last_ip &&
x->netmask == y->netmask &&
x->timeout == y->timeout;
}
static const struct ip_set_type_variant bitmap_ip = {
.kadt = bitmap_ip_kadt,
.uadt = bitmap_ip_uadt,
.adt = {
[IPSET_ADD] = bitmap_ip_add,
[IPSET_DEL] = bitmap_ip_del,
[IPSET_TEST] = bitmap_ip_test,
},
.destroy = bitmap_ip_destroy,
.flush = bitmap_ip_flush,
.head = bitmap_ip_head,
.list = bitmap_ip_list,
.same_set = bitmap_ip_same_set,
};
static const struct ip_set_type_variant bitmap_tip = {
.kadt = bitmap_ip_kadt,
.uadt = bitmap_ip_uadt,
.adt = {
[IPSET_ADD] = bitmap_ip_tadd,
[IPSET_DEL] = bitmap_ip_tdel,
[IPSET_TEST] = bitmap_ip_ttest,
},
.destroy = bitmap_ip_destroy,
.flush = bitmap_ip_flush,
.head = bitmap_ip_head,
.list = bitmap_ip_tlist,
.same_set = bitmap_ip_same_set,
};
static void
bitmap_ip_gc(unsigned long ul_set)
{
struct ip_set *set = (struct ip_set *) ul_set;
struct bitmap_ip *map = set->data;
unsigned long *table = map->members;
u32 id;
/* We run parallel with other readers (test element)
* but adding/deleting new entries is locked out */
read_lock_bh(&set->lock);
for (id = 0; id < map->elements; id++)
if (ip_set_timeout_expired(table[id]))
table[id] = IPSET_ELEM_UNSET;
read_unlock_bh(&set->lock);
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
add_timer(&map->gc);
}
static void
bitmap_ip_gc_init(struct ip_set *set)
{
struct bitmap_ip *map = set->data;
init_timer(&map->gc);
map->gc.data = (unsigned long) set;
map->gc.function = bitmap_ip_gc;
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
add_timer(&map->gc);
}
/* Create bitmap:ip type of sets */
static bool
init_map_ip(struct ip_set *set, struct bitmap_ip *map,
u32 first_ip, u32 last_ip,
u32 elements, u32 hosts, u8 netmask)
{
map->members = ip_set_alloc(map->memsize);
if (!map->members)
return false;
map->first_ip = first_ip;
map->last_ip = last_ip;
map->elements = elements;
map->hosts = hosts;
map->netmask = netmask;
map->timeout = IPSET_NO_TIMEOUT;
set->data = map;
set->family = AF_INET;
return true;
}
static int
bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
{
struct bitmap_ip *map;
u32 first_ip, last_ip, hosts, elements;
u8 netmask = 32;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
if (ret)
return ret;
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
if (ret)
return ret;
if (first_ip > last_ip) {
u32 tmp = first_ip;
first_ip = last_ip;
last_ip = tmp;
}
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (cidr >= 32)
return -IPSET_ERR_INVALID_CIDR;
last_ip = first_ip | ~ip_set_hostmask(cidr);
} else
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_NETMASK]) {
netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
if (netmask > 32)
return -IPSET_ERR_INVALID_NETMASK;
first_ip &= ip_set_hostmask(netmask);
last_ip |= ~ip_set_hostmask(netmask);
}
if (netmask == 32) {
hosts = 1;
elements = last_ip - first_ip + 1;
} else {
u8 mask_bits;
u32 mask;
mask = range_to_mask(first_ip, last_ip, &mask_bits);
if ((!mask && (first_ip || last_ip != 0xFFFFFFFF)) ||
netmask <= mask_bits)
return -IPSET_ERR_BITMAP_RANGE;
pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask);
hosts = 2 << (32 - netmask - 1);
elements = 2 << (netmask - mask_bits - 1);
}
if (elements > IPSET_BITMAP_MAX_RANGE + 1)
return -IPSET_ERR_BITMAP_RANGE_SIZE;
pr_debug("hosts %u, elements %u\n", hosts, elements);
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return -ENOMEM;
if (tb[IPSET_ATTR_TIMEOUT]) {
map->memsize = elements * sizeof(unsigned long);
if (!init_map_ip(set, map, first_ip, last_ip,
elements, hosts, netmask)) {
kfree(map);
return -ENOMEM;
}
map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = &bitmap_tip;
bitmap_ip_gc_init(set);
} else {
map->memsize = bitmap_bytes(0, elements - 1);
if (!init_map_ip(set, map, first_ip, last_ip,
elements, hosts, netmask)) {
kfree(map);
return -ENOMEM;
}
set->variant = &bitmap_ip;
}
return 0;
}
static struct ip_set_type bitmap_ip_type __read_mostly = {
.name = "bitmap:ip",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP,
.dimension = IPSET_DIM_ONE,
.family = AF_INET,
.revision = 0,
.create = bitmap_ip_create,
.create_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
},
.me = THIS_MODULE,
};
static int __init
bitmap_ip_init(void)
{
return ip_set_type_register(&bitmap_ip_type);
}
static void __exit
bitmap_ip_fini(void)
{
ip_set_type_unregister(&bitmap_ip_type);
}
module_init(bitmap_ip_init);
module_exit(bitmap_ip_fini);
| gpl-2.0 |
rjmccabe3701/LinuxViewPageTables | drivers/staging/dgrp/dgrp_ports_ops.c | 2676 | 3693 | /*
*
* Copyright 1999-2000 Digi International (www.digi.com)
* James Puzzo <jamesp at digi dot com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
/*
*
* Filename:
*
* dgrp_ports_ops.c
*
* Description:
*
* Handle the file operations required for the /proc/dgrp/ports/...
* devices. Basically gathers tty status for the node and returns it.
*
* Author:
*
* James A. Puzzo
*
*/
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/tty.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include "dgrp_common.h"
/* File operation declarations */
static int dgrp_ports_open(struct inode *, struct file *);
const struct file_operations dgrp_ports_ops = {
.owner = THIS_MODULE,
.open = dgrp_ports_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release
};
static void *dgrp_ports_seq_start(struct seq_file *seq, loff_t *pos)
{
if (*pos == 0)
seq_puts(seq, "#num tty_open pr_open tot_wait MSTAT IFLAG OFLAG CFLAG BPS DIGIFLAGS\n");
return pos;
}
static void *dgrp_ports_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct nd_struct *nd = seq->private;
if (*pos >= nd->nd_chan_count)
return NULL;
*pos += 1;
return pos;
}
static void dgrp_ports_seq_stop(struct seq_file *seq, void *v)
{
}
static int dgrp_ports_seq_show(struct seq_file *seq, void *v)
{
loff_t *pos = v;
struct nd_struct *nd;
struct ch_struct *ch;
struct un_struct *tun, *pun;
unsigned int totcnt;
nd = seq->private;
if (!nd)
return 0;
if (*pos >= nd->nd_chan_count)
return 0;
ch = &nd->nd_chan[*pos];
tun = &ch->ch_tun;
pun = &ch->ch_pun;
/*
* If port is not open and no one is waiting to
* open it, the modem signal values can't be
* trusted, and will be zeroed.
*/
totcnt = tun->un_open_count +
pun->un_open_count +
ch->ch_wait_count[0] +
ch->ch_wait_count[1] +
ch->ch_wait_count[2];
seq_printf(seq, "%02d %02d %02d %02d 0x%04X 0x%04X 0x%04X 0x%04X %-6d 0x%04X\n",
(int) *pos,
tun->un_open_count,
pun->un_open_count,
ch->ch_wait_count[0] +
ch->ch_wait_count[1] +
ch->ch_wait_count[2],
(totcnt ? ch->ch_s_mlast : 0),
ch->ch_s_iflag,
ch->ch_s_oflag,
ch->ch_s_cflag,
(ch->ch_s_brate ? (1843200 / ch->ch_s_brate) : 0),
ch->ch_digi.digi_flags);
return 0;
}
static const struct seq_operations ports_seq_ops = {
.start = dgrp_ports_seq_start,
.next = dgrp_ports_seq_next,
.stop = dgrp_ports_seq_stop,
.show = dgrp_ports_seq_show,
};
/**
* dgrp_ports_open -- open the /proc/dgrp/ports/... device
* @inode: struct inode *
* @file: struct file *
*
* Open function to open the /proc/dgrp/ports device for a PortServer.
* This is the open function for struct file_operations
*/
static int dgrp_ports_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int rtn;
rtn = seq_open(file, &ports_seq_ops);
if (!rtn) {
seq = file->private_data;
seq->private = PDE_DATA(inode);
}
return rtn;
}
| gpl-2.0 |
axxx007xxxz/cm_kernel_motorola_msm8916 | drivers/staging/dgrp/dgrp_common.c | 2676 | 4274 | /*
*
* Copyright 1999 Digi International (www.digi.com)
* James Puzzo <jamesp at digi dot com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
*/
/*
*
* Filename:
*
* dgrp_common.c
*
* Description:
*
* Definitions of global variables and functions which are either
* shared by the tty, mon, and net drivers; or which cross them
* functionally (like the poller).
*
* Author:
*
* James A. Puzzo
*
*/
#include <linux/errno.h>
#include <linux/tty.h>
#include <linux/sched.h>
#include <linux/cred.h>
#include "dgrp_common.h"
/**
* dgrp_carrier -- check for carrier change state and act
* @ch: struct ch_struct *
*/
void dgrp_carrier(struct ch_struct *ch)
{
struct nd_struct *nd;
int virt_carrier = 0;
int phys_carrier = 0;
/* fix case when the tty has already closed. */
if (!ch)
return;
nd = ch->ch_nd;
if (!nd)
return;
/*
* If we are currently waiting to determine the status of the port,
* we don't yet know the state of the modem lines. As a result,
* we ignore state changes when we are waiting for the modem lines
* to be established. We know, as a result of code in dgrp_net_ops,
* that we will be called again immediately following the reception
* of the status message with the true modem status flags in it.
*/
if (ch->ch_expect & RR_STATUS)
return;
/*
* If CH_HANGUP is set, we gotta keep trying to get all the processes
* that have the port open to close the port.
* So lets just keep sending a hangup every time we get here.
*/
if ((ch->ch_flag & CH_HANGUP) &&
(ch->ch_tun.un_open_count > 0))
tty_hangup(ch->ch_tun.un_tty);
/*
* Compute the effective state of both the physical and virtual
* senses of carrier.
*/
if (ch->ch_s_mlast & DM_CD)
phys_carrier = 1;
if ((ch->ch_s_mlast & DM_CD) ||
(ch->ch_digi.digi_flags & DIGI_FORCEDCD) ||
(ch->ch_flag & CH_CLOCAL))
virt_carrier = 1;
/*
* Test for a VIRTUAL carrier transition to HIGH.
*
* The CH_HANGUP condition is intended to prevent any action
* except for close. As a result, we ignore positive carrier
* transitions during CH_HANGUP.
*/
if (((ch->ch_flag & CH_HANGUP) == 0) &&
((ch->ch_flag & CH_VIRT_CD) == 0) &&
(virt_carrier == 1)) {
/*
* When carrier rises, wake any threads waiting
* for carrier in the open routine.
*/
nd->nd_tx_work = 1;
if (waitqueue_active(&ch->ch_flag_wait))
wake_up_interruptible(&ch->ch_flag_wait);
}
/*
* Test for a PHYSICAL transition to low, so long as we aren't
* currently ignoring physical transitions (which is what "virtual
* carrier" indicates).
*
* The transition of the virtual carrier to low really doesn't
* matter... it really only means "ignore carrier state", not
* "make pretend that carrier is there".
*/
if ((virt_carrier == 0) &&
((ch->ch_flag & CH_PHYS_CD) != 0) &&
(phys_carrier == 0)) {
/*
* When carrier drops:
*
* Do a Hard Hangup if that is called for.
*
* Drop carrier on all open units.
*
* Flush queues, waking up any task waiting in the
* line discipline.
*
* Send a hangup to the control terminal.
*
* Enable all select calls.
*/
nd->nd_tx_work = 1;
ch->ch_flag &= ~(CH_LOW | CH_EMPTY | CH_DRAIN | CH_INPUT);
if (waitqueue_active(&ch->ch_flag_wait))
wake_up_interruptible(&ch->ch_flag_wait);
if (ch->ch_tun.un_open_count > 0)
tty_hangup(ch->ch_tun.un_tty);
if (ch->ch_pun.un_open_count > 0)
tty_hangup(ch->ch_pun.un_tty);
}
/*
* Make sure that our cached values reflect the current reality.
*/
if (virt_carrier == 1)
ch->ch_flag |= CH_VIRT_CD;
else
ch->ch_flag &= ~CH_VIRT_CD;
if (phys_carrier == 1)
ch->ch_flag |= CH_PHYS_CD;
else
ch->ch_flag &= ~CH_PHYS_CD;
}
| gpl-2.0 |
Gamersab/android_kernel_htc_pico | drivers/edac/edac_device.c | 2932 | 20421 |
/*
* edac_device.c
* (C) 2007 www.douglaskthompson.com
*
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Written by Doug Thompson <norsk5@xmission.com>
*
* edac_device API implementation
* 19 Jan 2007
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/sysdev.h>
#include <linux/ctype.h>
#include <linux/workqueue.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include "edac_core.h"
#include "edac_module.h"
/* lock for the list: 'edac_device_list', manipulation of this list
* is protected by the 'device_ctls_mutex' lock
*/
static DEFINE_MUTEX(device_ctls_mutex);
static LIST_HEAD(edac_device_list);
#ifdef CONFIG_EDAC_DEBUG
static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
{
debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx);
debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
debugf3("\tdev = %p\n", edac_dev->dev);
debugf3("\tmod_name:ctl_name = %s:%s\n",
edac_dev->mod_name, edac_dev->ctl_name);
debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info);
}
#endif /* CONFIG_EDAC_DEBUG */
/*
* edac_device_alloc_ctl_info()
* Allocate a new edac device control info structure
*
* The control structure is allocated in complete chunk
* from the OS. It is in turn sub allocated to the
* various objects that compose the struture
*
* The structure has a 'nr_instance' array within itself.
* Each instance represents a major component
* Example: L1 cache and L2 cache are 2 instance components
*
* Within each instance is an array of 'nr_blocks' blockoffsets
*/
struct edac_device_ctl_info *edac_device_alloc_ctl_info(
unsigned sz_private,
char *edac_device_name, unsigned nr_instances,
char *edac_block_name, unsigned nr_blocks,
unsigned offset_value, /* zero, 1, or other based offset */
struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib,
int device_index)
{
struct edac_device_ctl_info *dev_ctl;
struct edac_device_instance *dev_inst, *inst;
struct edac_device_block *dev_blk, *blk_p, *blk;
struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
unsigned total_size;
unsigned count;
unsigned instance, block, attr;
void *pvt;
int err;
debugf4("%s() instances=%d blocks=%d\n",
__func__, nr_instances, nr_blocks);
/* Calculate the size of memory we need to allocate AND
* determine the offsets of the various item arrays
* (instance,block,attrib) from the start of an allocated structure.
* We want the alignment of each item (instance,block,attrib)
* to be at least as stringent as what the compiler would
* provide if we could simply hardcode everything into a single struct.
*/
dev_ctl = (struct edac_device_ctl_info *)NULL;
/* Calc the 'end' offset past end of ONE ctl_info structure
* which will become the start of the 'instance' array
*/
dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
/* Calc the 'end' offset past the instance array within the ctl_info
* which will become the start of the block array
*/
dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
/* Calc the 'end' offset past the dev_blk array
* which will become the start of the attrib array, if any.
*/
count = nr_instances * nr_blocks;
dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
/* Check for case of when an attribute array is specified */
if (nr_attrib > 0) {
/* calc how many nr_attrib we need */
count *= nr_attrib;
/* Calc the 'end' offset past the attributes array */
pvt = edac_align_ptr(&dev_attrib[count], sz_private);
} else {
/* no attribute array specificed */
pvt = edac_align_ptr(dev_attrib, sz_private);
}
/* 'pvt' now points to where the private data area is.
* At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
* is baselined at ZERO
*/
total_size = ((unsigned long)pvt) + sz_private;
/* Allocate the amount of memory for the set of control structures */
dev_ctl = kzalloc(total_size, GFP_KERNEL);
if (dev_ctl == NULL)
return NULL;
/* Adjust pointers so they point within the actual memory we
* just allocated rather than an imaginary chunk of memory
* located at address 0.
* 'dev_ctl' points to REAL memory, while the others are
* ZERO based and thus need to be adjusted to point within
* the allocated memory.
*/
dev_inst = (struct edac_device_instance *)
(((char *)dev_ctl) + ((unsigned long)dev_inst));
dev_blk = (struct edac_device_block *)
(((char *)dev_ctl) + ((unsigned long)dev_blk));
dev_attrib = (struct edac_dev_sysfs_block_attribute *)
(((char *)dev_ctl) + ((unsigned long)dev_attrib));
pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL;
/* Begin storing the information into the control info structure */
dev_ctl->dev_idx = device_index;
dev_ctl->nr_instances = nr_instances;
dev_ctl->instances = dev_inst;
dev_ctl->pvt_info = pvt;
/* Default logging of CEs and UEs */
dev_ctl->log_ce = 1;
dev_ctl->log_ue = 1;
/* Name of this edac device */
snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
debugf4("%s() edac_dev=%p next after end=%p\n",
__func__, dev_ctl, pvt + sz_private );
/* Initialize every Instance */
for (instance = 0; instance < nr_instances; instance++) {
inst = &dev_inst[instance];
inst->ctl = dev_ctl;
inst->nr_blocks = nr_blocks;
blk_p = &dev_blk[instance * nr_blocks];
inst->blocks = blk_p;
/* name of this instance */
snprintf(inst->name, sizeof(inst->name),
"%s%u", edac_device_name, instance);
/* Initialize every block in each instance */
for (block = 0; block < nr_blocks; block++) {
blk = &blk_p[block];
blk->instance = inst;
snprintf(blk->name, sizeof(blk->name),
"%s%d", edac_block_name, block+offset_value);
debugf4("%s() instance=%d inst_p=%p block=#%d "
"block_p=%p name='%s'\n",
__func__, instance, inst, block,
blk, blk->name);
/* if there are NO attributes OR no attribute pointer
* then continue on to next block iteration
*/
if ((nr_attrib == 0) || (attrib_spec == NULL))
continue;
/* setup the attribute array for this block */
blk->nr_attribs = nr_attrib;
attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
blk->block_attributes = attrib_p;
debugf4("%s() THIS BLOCK_ATTRIB=%p\n",
__func__, blk->block_attributes);
/* Initialize every user specified attribute in this
* block with the data the caller passed in
* Each block gets its own copy of pointers,
* and its unique 'value'
*/
for (attr = 0; attr < nr_attrib; attr++) {
attrib = &attrib_p[attr];
/* populate the unique per attrib
* with the code pointers and info
*/
attrib->attr = attrib_spec[attr].attr;
attrib->show = attrib_spec[attr].show;
attrib->store = attrib_spec[attr].store;
attrib->block = blk; /* up link */
debugf4("%s() alloc-attrib=%p attrib_name='%s' "
"attrib-spec=%p spec-name=%s\n",
__func__, attrib, attrib->attr.name,
&attrib_spec[attr],
attrib_spec[attr].attr.name
);
}
}
}
/* Mark this instance as merely ALLOCATED */
dev_ctl->op_state = OP_ALLOC;
/*
* Initialize the 'root' kobj for the edac_device controller
*/
err = edac_device_register_sysfs_main_kobj(dev_ctl);
if (err) {
kfree(dev_ctl);
return NULL;
}
/* at this point, the root kobj is valid, and in order to
* 'free' the object, then the function:
* edac_device_unregister_sysfs_main_kobj() must be called
* which will perform kobj unregistration and the actual free
* will occur during the kobject callback operation
*/
return dev_ctl;
}
EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
/*
* edac_device_free_ctl_info()
* frees the memory allocated by the edac_device_alloc_ctl_info()
* function
*/
void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
{
edac_device_unregister_sysfs_main_kobj(ctl_info);
}
EXPORT_SYMBOL_GPL(edac_device_free_ctl_info);
/*
* find_edac_device_by_dev
* scans the edac_device list for a specific 'struct device *'
*
* lock to be held prior to call: device_ctls_mutex
*
* Return:
* pointer to control structure managing 'dev'
* NULL if not found on list
*/
static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
{
struct edac_device_ctl_info *edac_dev;
struct list_head *item;
debugf0("%s()\n", __func__);
list_for_each(item, &edac_device_list) {
edac_dev = list_entry(item, struct edac_device_ctl_info, link);
if (edac_dev->dev == dev)
return edac_dev;
}
return NULL;
}
/*
* add_edac_dev_to_global_list
* Before calling this function, caller must
* assign a unique value to edac_dev->dev_idx.
*
* lock to be held prior to call: device_ctls_mutex
*
* Return:
* 0 on success
* 1 on failure.
*/
static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
{
struct list_head *item, *insert_before;
struct edac_device_ctl_info *rover;
insert_before = &edac_device_list;
/* Determine if already on the list */
rover = find_edac_device_by_dev(edac_dev->dev);
if (unlikely(rover != NULL))
goto fail0;
/* Insert in ascending order by 'dev_idx', so find position */
list_for_each(item, &edac_device_list) {
rover = list_entry(item, struct edac_device_ctl_info, link);
if (rover->dev_idx >= edac_dev->dev_idx) {
if (unlikely(rover->dev_idx == edac_dev->dev_idx))
goto fail1;
insert_before = item;
break;
}
}
list_add_tail_rcu(&edac_dev->link, insert_before);
return 0;
fail0:
edac_printk(KERN_WARNING, EDAC_MC,
"%s (%s) %s %s already assigned %d\n",
dev_name(rover->dev), edac_dev_name(rover),
rover->mod_name, rover->ctl_name, rover->dev_idx);
return 1;
fail1:
edac_printk(KERN_WARNING, EDAC_MC,
"bug in low-level driver: attempt to assign\n"
" duplicate dev_idx %d in %s()\n", rover->dev_idx,
__func__);
return 1;
}
/*
* del_edac_device_from_global_list
*/
static void del_edac_device_from_global_list(struct edac_device_ctl_info
*edac_device)
{
list_del_rcu(&edac_device->link);
/* these are for safe removal of devices from global list while
* NMI handlers may be traversing list
*/
synchronize_rcu();
INIT_LIST_HEAD(&edac_device->link);
}
/*
* edac_device_workq_function
* performs the operation scheduled by a workq request
*
* this workq is embedded within an edac_device_ctl_info
* structure, that needs to be polled for possible error events.
*
* This operation is to acquire the list mutex lock
* (thus preventing insertation or deletion)
* and then call the device's poll function IFF this device is
* running polled and there is a poll function defined.
*/
static void edac_device_workq_function(struct work_struct *work_req)
{
struct delayed_work *d_work = to_delayed_work(work_req);
struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
mutex_lock(&device_ctls_mutex);
/* If we are being removed, bail out immediately */
if (edac_dev->op_state == OP_OFFLINE) {
mutex_unlock(&device_ctls_mutex);
return;
}
/* Only poll controllers that are running polled and have a check */
if ((edac_dev->op_state == OP_RUNNING_POLL) &&
(edac_dev->edac_check != NULL)) {
edac_dev->edac_check(edac_dev);
}
mutex_unlock(&device_ctls_mutex);
/* Reschedule the workq for the next time period to start again
* if the number of msec is for 1 sec, then adjust to the next
* whole one second to save timers fireing all over the period
* between integral seconds
*/
if (edac_dev->poll_msec == 1000)
queue_delayed_work(edac_workqueue, &edac_dev->work,
round_jiffies_relative(edac_dev->delay));
else
queue_delayed_work(edac_workqueue, &edac_dev->work,
edac_dev->delay);
}
/*
* edac_device_workq_setup
* initialize a workq item for this edac_device instance
* passing in the new delay period in msec
*/
void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
unsigned msec)
{
debugf0("%s()\n", __func__);
/* take the arg 'msec' and set it into the control structure
* to used in the time period calculation
* then calc the number of jiffies that represents
*/
edac_dev->poll_msec = msec;
edac_dev->delay = msecs_to_jiffies(msec);
INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
/* optimize here for the 1 second case, which will be normal value, to
* fire ON the 1 second time event. This helps reduce all sorts of
* timers firing on sub-second basis, while they are happy
* to fire together on the 1 second exactly
*/
if (edac_dev->poll_msec == 1000)
queue_delayed_work(edac_workqueue, &edac_dev->work,
round_jiffies_relative(edac_dev->delay));
else
queue_delayed_work(edac_workqueue, &edac_dev->work,
edac_dev->delay);
}
/*
* edac_device_workq_teardown
* stop the workq processing on this edac_dev
*/
void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
{
int status;
status = cancel_delayed_work(&edac_dev->work);
if (status == 0) {
/* workq instance might be running, wait for it */
flush_workqueue(edac_workqueue);
}
}
/*
* edac_device_reset_delay_period
*
* need to stop any outstanding workq queued up at this time
* because we will be resetting the sleep time.
* Then restart the workq on the new delay
*/
void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
unsigned long value)
{
/* cancel the current workq request, without the mutex lock */
edac_device_workq_teardown(edac_dev);
/* acquire the mutex before doing the workq setup */
mutex_lock(&device_ctls_mutex);
/* restart the workq request, with new delay value */
edac_device_workq_setup(edac_dev, value);
mutex_unlock(&device_ctls_mutex);
}
/*
* edac_device_alloc_index: Allocate a unique device index number
*
* Return:
* allocated index number
*/
int edac_device_alloc_index(void)
{
static atomic_t device_indexes = ATOMIC_INIT(0);
return atomic_inc_return(&device_indexes) - 1;
}
EXPORT_SYMBOL_GPL(edac_device_alloc_index);
/**
* edac_device_add_device: Insert the 'edac_dev' structure into the
* edac_device global list and create sysfs entries associated with
* edac_device structure.
* @edac_device: pointer to the edac_device structure to be added to the list
* 'edac_device' structure.
*
* Return:
* 0 Success
* !0 Failure
*/
int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
{
debugf0("%s()\n", __func__);
#ifdef CONFIG_EDAC_DEBUG
if (edac_debug_level >= 3)
edac_device_dump_device(edac_dev);
#endif
mutex_lock(&device_ctls_mutex);
if (add_edac_dev_to_global_list(edac_dev))
goto fail0;
/* set load time so that error rate can be tracked */
edac_dev->start_time = jiffies;
/* create this instance's sysfs entries */
if (edac_device_create_sysfs(edac_dev)) {
edac_device_printk(edac_dev, KERN_WARNING,
"failed to create sysfs device\n");
goto fail1;
}
/* If there IS a check routine, then we are running POLLED */
if (edac_dev->edac_check != NULL) {
/* This instance is NOW RUNNING */
edac_dev->op_state = OP_RUNNING_POLL;
/*
* enable workq processing on this instance,
* default = 1000 msec
*/
edac_device_workq_setup(edac_dev, 1000);
} else {
edac_dev->op_state = OP_RUNNING_INTERRUPT;
}
/* Report action taken */
edac_device_printk(edac_dev, KERN_INFO,
"Giving out device to module '%s' controller "
"'%s': DEV '%s' (%s)\n",
edac_dev->mod_name,
edac_dev->ctl_name,
edac_dev_name(edac_dev),
edac_op_state_to_string(edac_dev->op_state));
mutex_unlock(&device_ctls_mutex);
return 0;
fail1:
/* Some error, so remove the entry from the lsit */
del_edac_device_from_global_list(edac_dev);
fail0:
mutex_unlock(&device_ctls_mutex);
return 1;
}
EXPORT_SYMBOL_GPL(edac_device_add_device);
/**
* edac_device_del_device:
* Remove sysfs entries for specified edac_device structure and
* then remove edac_device structure from global list
*
* @pdev:
* Pointer to 'struct device' representing edac_device
* structure to remove.
*
* Return:
* Pointer to removed edac_device structure,
* OR NULL if device not found.
*/
struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
{
struct edac_device_ctl_info *edac_dev;
debugf0("%s()\n", __func__);
mutex_lock(&device_ctls_mutex);
/* Find the structure on the list, if not there, then leave */
edac_dev = find_edac_device_by_dev(dev);
if (edac_dev == NULL) {
mutex_unlock(&device_ctls_mutex);
return NULL;
}
/* mark this instance as OFFLINE */
edac_dev->op_state = OP_OFFLINE;
/* deregister from global list */
del_edac_device_from_global_list(edac_dev);
mutex_unlock(&device_ctls_mutex);
/* clear workq processing on this instance */
edac_device_workq_teardown(edac_dev);
/* Tear down the sysfs entries for this instance */
edac_device_remove_sysfs(edac_dev);
edac_printk(KERN_INFO, EDAC_MC,
"Removed device %d for %s %s: DEV %s\n",
edac_dev->dev_idx,
edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev));
return edac_dev;
}
EXPORT_SYMBOL_GPL(edac_device_del_device);
static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev)
{
return edac_dev->log_ce;
}
static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev)
{
return edac_dev->log_ue;
}
static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
*edac_dev)
{
return edac_dev->panic_on_ue;
}
/*
* edac_device_handle_ce
* perform a common output and handling of an 'edac_dev' CE event
*/
void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg)
{
struct edac_device_instance *instance;
struct edac_device_block *block = NULL;
if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
"INTERNAL ERROR: 'instance' out of range "
"(%d >= %d)\n", inst_nr,
edac_dev->nr_instances);
return;
}
instance = edac_dev->instances + inst_nr;
if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
"INTERNAL ERROR: instance %d 'block' "
"out of range (%d >= %d)\n",
inst_nr, block_nr,
instance->nr_blocks);
return;
}
if (instance->nr_blocks > 0) {
block = instance->blocks + block_nr;
block->counters.ce_count++;
}
/* Propagate the count up the 'totals' tree */
instance->counters.ce_count++;
edac_dev->counters.ce_count++;
if (edac_device_get_log_ce(edac_dev))
edac_device_printk(edac_dev, KERN_WARNING,
"CE: %s instance: %s block: %s '%s'\n",
edac_dev->ctl_name, instance->name,
block ? block->name : "N/A", msg);
}
EXPORT_SYMBOL_GPL(edac_device_handle_ce);
/*
* edac_device_handle_ue
* perform a common output and handling of an 'edac_dev' UE event
*/
void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg)
{
struct edac_device_instance *instance;
struct edac_device_block *block = NULL;
if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
"INTERNAL ERROR: 'instance' out of range "
"(%d >= %d)\n", inst_nr,
edac_dev->nr_instances);
return;
}
instance = edac_dev->instances + inst_nr;
if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
"INTERNAL ERROR: instance %d 'block' "
"out of range (%d >= %d)\n",
inst_nr, block_nr,
instance->nr_blocks);
return;
}
if (instance->nr_blocks > 0) {
block = instance->blocks + block_nr;
block->counters.ue_count++;
}
/* Propagate the count up the 'totals' tree */
instance->counters.ue_count++;
edac_dev->counters.ue_count++;
if (edac_device_get_log_ue(edac_dev))
edac_device_printk(edac_dev, KERN_EMERG,
"UE: %s instance: %s block: %s '%s'\n",
edac_dev->ctl_name, instance->name,
block ? block->name : "N/A", msg);
if (edac_device_get_panic_on_ue(edac_dev))
panic("EDAC %s: UE instance: %s block %s '%s'\n",
edac_dev->ctl_name, instance->name,
block ? block->name : "N/A", msg);
}
EXPORT_SYMBOL_GPL(edac_device_handle_ue);
| gpl-2.0 |
Eagerestwolf/kernel_device_lge_connect | drivers/media/video/adv7343.c | 3188 | 11984 | /*
* adv7343 - ADV7343 Video Encoder Driver
*
* The encoder hardware does not support SECAM.
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed .as is. WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/videodev2.h>
#include <linux/uaccess.h>
#include <media/adv7343.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-ctrls.h>
#include "adv7343_regs.h"
MODULE_DESCRIPTION("ADV7343 video encoder driver");
MODULE_LICENSE("GPL");
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level 0-1");
struct adv7343_state {
struct v4l2_subdev sd;
struct v4l2_ctrl_handler hdl;
u8 reg00;
u8 reg01;
u8 reg02;
u8 reg35;
u8 reg80;
u8 reg82;
u32 output;
v4l2_std_id std;
};
static inline struct adv7343_state *to_state(struct v4l2_subdev *sd)
{
return container_of(sd, struct adv7343_state, sd);
}
static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
{
return &container_of(ctrl->handler, struct adv7343_state, hdl)->sd;
}
static inline int adv7343_write(struct v4l2_subdev *sd, u8 reg, u8 value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return i2c_smbus_write_byte_data(client, reg, value);
}
static const u8 adv7343_init_reg_val[] = {
ADV7343_SOFT_RESET, ADV7343_SOFT_RESET_DEFAULT,
ADV7343_POWER_MODE_REG, ADV7343_POWER_MODE_REG_DEFAULT,
ADV7343_HD_MODE_REG1, ADV7343_HD_MODE_REG1_DEFAULT,
ADV7343_HD_MODE_REG2, ADV7343_HD_MODE_REG2_DEFAULT,
ADV7343_HD_MODE_REG3, ADV7343_HD_MODE_REG3_DEFAULT,
ADV7343_HD_MODE_REG4, ADV7343_HD_MODE_REG4_DEFAULT,
ADV7343_HD_MODE_REG5, ADV7343_HD_MODE_REG5_DEFAULT,
ADV7343_HD_MODE_REG6, ADV7343_HD_MODE_REG6_DEFAULT,
ADV7343_HD_MODE_REG7, ADV7343_HD_MODE_REG7_DEFAULT,
ADV7343_SD_MODE_REG1, ADV7343_SD_MODE_REG1_DEFAULT,
ADV7343_SD_MODE_REG2, ADV7343_SD_MODE_REG2_DEFAULT,
ADV7343_SD_MODE_REG3, ADV7343_SD_MODE_REG3_DEFAULT,
ADV7343_SD_MODE_REG4, ADV7343_SD_MODE_REG4_DEFAULT,
ADV7343_SD_MODE_REG5, ADV7343_SD_MODE_REG5_DEFAULT,
ADV7343_SD_MODE_REG6, ADV7343_SD_MODE_REG6_DEFAULT,
ADV7343_SD_MODE_REG7, ADV7343_SD_MODE_REG7_DEFAULT,
ADV7343_SD_MODE_REG8, ADV7343_SD_MODE_REG8_DEFAULT,
ADV7343_SD_HUE_REG, ADV7343_SD_HUE_REG_DEFAULT,
ADV7343_SD_CGMS_WSS0, ADV7343_SD_CGMS_WSS0_DEFAULT,
ADV7343_SD_BRIGHTNESS_WSS, ADV7343_SD_BRIGHTNESS_WSS_DEFAULT,
};
/*
* 2^32
* FSC(reg) = FSC (HZ) * --------
* 27000000
*/
static const struct adv7343_std_info stdinfo[] = {
{
/* FSC(Hz) = 3,579,545.45 Hz */
SD_STD_NTSC, 569408542, V4L2_STD_NTSC,
}, {
/* FSC(Hz) = 3,575,611.00 Hz */
SD_STD_PAL_M, 568782678, V4L2_STD_PAL_M,
}, {
/* FSC(Hz) = 3,582,056.00 */
SD_STD_PAL_N, 569807903, V4L2_STD_PAL_Nc,
}, {
/* FSC(Hz) = 4,433,618.75 Hz */
SD_STD_PAL_N, 705268427, V4L2_STD_PAL_N,
}, {
/* FSC(Hz) = 4,433,618.75 Hz */
SD_STD_PAL_BDGHI, 705268427, V4L2_STD_PAL,
}, {
/* FSC(Hz) = 4,433,618.75 Hz */
SD_STD_NTSC, 705268427, V4L2_STD_NTSC_443,
}, {
/* FSC(Hz) = 4,433,618.75 Hz */
SD_STD_PAL_M, 705268427, V4L2_STD_PAL_60,
},
};
static int adv7343_setstd(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct adv7343_state *state = to_state(sd);
struct adv7343_std_info *std_info;
int output_idx, num_std;
char *fsc_ptr;
u8 reg, val;
int err = 0;
int i = 0;
output_idx = state->output;
std_info = (struct adv7343_std_info *)stdinfo;
num_std = ARRAY_SIZE(stdinfo);
for (i = 0; i < num_std; i++) {
if (std_info[i].stdid & std)
break;
}
if (i == num_std) {
v4l2_dbg(1, debug, sd,
"Invalid std or std is not supported: %llx\n",
(unsigned long long)std);
return -EINVAL;
}
/* Set the standard */
val = state->reg80 & (~(SD_STD_MASK));
val |= std_info[i].standard_val3;
err = adv7343_write(sd, ADV7343_SD_MODE_REG1, val);
if (err < 0)
goto setstd_exit;
state->reg80 = val;
/* Configure the input mode register */
val = state->reg01 & (~((u8) INPUT_MODE_MASK));
val |= SD_INPUT_MODE;
err = adv7343_write(sd, ADV7343_MODE_SELECT_REG, val);
if (err < 0)
goto setstd_exit;
state->reg01 = val;
/* Program the sub carrier frequency registers */
fsc_ptr = (unsigned char *)&std_info[i].fsc_val;
reg = ADV7343_FSC_REG0;
for (i = 0; i < 4; i++, reg++, fsc_ptr++) {
err = adv7343_write(sd, reg, *fsc_ptr);
if (err < 0)
goto setstd_exit;
}
val = state->reg80;
/* Filter settings */
if (std & (V4L2_STD_NTSC | V4L2_STD_NTSC_443))
val &= 0x03;
else if (std & ~V4L2_STD_SECAM)
val |= 0x04;
err = adv7343_write(sd, ADV7343_SD_MODE_REG1, val);
if (err < 0)
goto setstd_exit;
state->reg80 = val;
setstd_exit:
if (err != 0)
v4l2_err(sd, "Error setting std, write failed\n");
return err;
}
static int adv7343_setoutput(struct v4l2_subdev *sd, u32 output_type)
{
struct adv7343_state *state = to_state(sd);
unsigned char val;
int err = 0;
if (output_type > ADV7343_SVIDEO_ID) {
v4l2_dbg(1, debug, sd,
"Invalid output type or output type not supported:%d\n",
output_type);
return -EINVAL;
}
/* Enable Appropriate DAC */
val = state->reg00 & 0x03;
if (output_type == ADV7343_COMPOSITE_ID)
val |= ADV7343_COMPOSITE_POWER_VALUE;
else if (output_type == ADV7343_COMPONENT_ID)
val |= ADV7343_COMPONENT_POWER_VALUE;
else
val |= ADV7343_SVIDEO_POWER_VALUE;
err = adv7343_write(sd, ADV7343_POWER_MODE_REG, val);
if (err < 0)
goto setoutput_exit;
state->reg00 = val;
/* Enable YUV output */
val = state->reg02 | YUV_OUTPUT_SELECT;
err = adv7343_write(sd, ADV7343_MODE_REG0, val);
if (err < 0)
goto setoutput_exit;
state->reg02 = val;
/* configure SD DAC Output 2 and SD DAC Output 1 bit to zero */
val = state->reg82 & (SD_DAC_1_DI & SD_DAC_2_DI);
err = adv7343_write(sd, ADV7343_SD_MODE_REG2, val);
if (err < 0)
goto setoutput_exit;
state->reg82 = val;
/* configure ED/HD Color DAC Swap and ED/HD RGB Input Enable bit to
* zero */
val = state->reg35 & (HD_RGB_INPUT_DI & HD_DAC_SWAP_DI);
err = adv7343_write(sd, ADV7343_HD_MODE_REG6, val);
if (err < 0)
goto setoutput_exit;
state->reg35 = val;
setoutput_exit:
if (err != 0)
v4l2_err(sd, "Error setting output, write failed\n");
return err;
}
static int adv7343_log_status(struct v4l2_subdev *sd)
{
struct adv7343_state *state = to_state(sd);
v4l2_info(sd, "Standard: %llx\n", (unsigned long long)state->std);
v4l2_info(sd, "Output: %s\n", (state->output == 0) ? "Composite" :
((state->output == 1) ? "Component" : "S-Video"));
return 0;
}
static int adv7343_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
return adv7343_write(sd, ADV7343_SD_BRIGHTNESS_WSS,
ctrl->val);
case V4L2_CID_HUE:
return adv7343_write(sd, ADV7343_SD_HUE_REG, ctrl->val);
case V4L2_CID_GAIN:
return adv7343_write(sd, ADV7343_DAC2_OUTPUT_LEVEL, ctrl->val);
}
return -EINVAL;
}
static int adv7343_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7343, 0);
}
static const struct v4l2_ctrl_ops adv7343_ctrl_ops = {
.s_ctrl = adv7343_s_ctrl,
};
static const struct v4l2_subdev_core_ops adv7343_core_ops = {
.log_status = adv7343_log_status,
.g_chip_ident = adv7343_g_chip_ident,
.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
.g_ctrl = v4l2_subdev_g_ctrl,
.s_ctrl = v4l2_subdev_s_ctrl,
.queryctrl = v4l2_subdev_queryctrl,
.querymenu = v4l2_subdev_querymenu,
};
static int adv7343_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct adv7343_state *state = to_state(sd);
int err = 0;
if (state->std == std)
return 0;
err = adv7343_setstd(sd, std);
if (!err)
state->std = std;
return err;
}
static int adv7343_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct adv7343_state *state = to_state(sd);
int err = 0;
if (state->output == output)
return 0;
err = adv7343_setoutput(sd, output);
if (!err)
state->output = output;
return err;
}
static const struct v4l2_subdev_video_ops adv7343_video_ops = {
.s_std_output = adv7343_s_std_output,
.s_routing = adv7343_s_routing,
};
static const struct v4l2_subdev_ops adv7343_ops = {
.core = &adv7343_core_ops,
.video = &adv7343_video_ops,
};
static int adv7343_initialize(struct v4l2_subdev *sd)
{
struct adv7343_state *state = to_state(sd);
int err = 0;
int i;
for (i = 0; i < ARRAY_SIZE(adv7343_init_reg_val); i += 2) {
err = adv7343_write(sd, adv7343_init_reg_val[i],
adv7343_init_reg_val[i+1]);
if (err) {
v4l2_err(sd, "Error initializing\n");
return err;
}
}
/* Configure for default video standard */
err = adv7343_setoutput(sd, state->output);
if (err < 0) {
v4l2_err(sd, "Error setting output during init\n");
return -EINVAL;
}
err = adv7343_setstd(sd, state->std);
if (err < 0) {
v4l2_err(sd, "Error setting std during init\n");
return -EINVAL;
}
return err;
}
static int adv7343_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adv7343_state *state;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
state = kzalloc(sizeof(struct adv7343_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
state->reg00 = 0x80;
state->reg01 = 0x00;
state->reg02 = 0x20;
state->reg35 = 0x00;
state->reg80 = ADV7343_SD_MODE_REG1_DEFAULT;
state->reg82 = ADV7343_SD_MODE_REG2_DEFAULT;
state->output = ADV7343_COMPOSITE_ID;
state->std = V4L2_STD_NTSC;
v4l2_i2c_subdev_init(&state->sd, client, &adv7343_ops);
v4l2_ctrl_handler_init(&state->hdl, 2);
v4l2_ctrl_new_std(&state->hdl, &adv7343_ctrl_ops,
V4L2_CID_BRIGHTNESS, ADV7343_BRIGHTNESS_MIN,
ADV7343_BRIGHTNESS_MAX, 1,
ADV7343_BRIGHTNESS_DEF);
v4l2_ctrl_new_std(&state->hdl, &adv7343_ctrl_ops,
V4L2_CID_HUE, ADV7343_HUE_MIN,
ADV7343_HUE_MAX, 1,
ADV7343_HUE_DEF);
v4l2_ctrl_new_std(&state->hdl, &adv7343_ctrl_ops,
V4L2_CID_GAIN, ADV7343_GAIN_MIN,
ADV7343_GAIN_MAX, 1,
ADV7343_GAIN_DEF);
state->sd.ctrl_handler = &state->hdl;
if (state->hdl.error) {
int err = state->hdl.error;
v4l2_ctrl_handler_free(&state->hdl);
kfree(state);
return err;
}
v4l2_ctrl_handler_setup(&state->hdl);
err = adv7343_initialize(&state->sd);
if (err) {
v4l2_ctrl_handler_free(&state->hdl);
kfree(state);
}
return err;
}
static int adv7343_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7343_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
kfree(state);
return 0;
}
static const struct i2c_device_id adv7343_id[] = {
{"adv7343", 0},
{},
};
MODULE_DEVICE_TABLE(i2c, adv7343_id);
static struct i2c_driver adv7343_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "adv7343",
},
.probe = adv7343_probe,
.remove = adv7343_remove,
.id_table = adv7343_id,
};
static __init int init_adv7343(void)
{
return i2c_add_driver(&adv7343_driver);
}
static __exit void exit_adv7343(void)
{
i2c_del_driver(&adv7343_driver);
}
module_init(init_adv7343);
module_exit(exit_adv7343);
| gpl-2.0 |
Evervolv/android_kernel_htc_msm8660 | drivers/pcmcia/bfin_cf_pcmcia.c | 3188 | 7775 | /*
* file: drivers/pcmcia/bfin_cf.c
*
* based on: drivers/pcmcia/omap_cf.c
* omap_cf.c -- OMAP 16xx CompactFlash controller driver
*
* Copyright (c) 2005 David Brownell
* Copyright (c) 2006-2008 Michael Hennerich Analog Devices Inc.
*
* bugs: enter bugs at http://blackfin.uclinux.org/
*
* this program is free software; you can redistribute it and/or modify
* it under the terms of the gnu general public license as published by
* the free software foundation; either version 2, or (at your option)
* any later version.
*
* this program is distributed in the hope that it will be useful,
* but without any warranty; without even the implied warranty of
* merchantability or fitness for a particular purpose. see the
* gnu general public license for more details.
*
* you should have received a copy of the gnu general public license
* along with this program; see the file copying.
* if not, write to the free software foundation,
* 59 temple place - suite 330, boston, ma 02111-1307, usa.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/platform_device.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <pcmcia/ss.h>
#include <pcmcia/cisreg.h>
#include <asm/gpio.h>
#define SZ_1K 0x00000400
#define SZ_8K 0x00002000
#define SZ_2K (2 * SZ_1K)
#define POLL_INTERVAL (2 * HZ)
#define CF_ATASEL_ENA 0x20311802 /* Inverts RESET */
#define CF_ATASEL_DIS 0x20311800
#define bfin_cf_present(pfx) (gpio_get_value(pfx))
/*--------------------------------------------------------------------------*/
static const char driver_name[] = "bfin_cf_pcmcia";
struct bfin_cf_socket {
struct pcmcia_socket socket;
struct timer_list timer;
unsigned present:1;
unsigned active:1;
struct platform_device *pdev;
unsigned long phys_cf_io;
unsigned long phys_cf_attr;
u_int irq;
u_short cd_pfx;
};
/*--------------------------------------------------------------------------*/
static int bfin_cf_reset(void)
{
outw(0, CF_ATASEL_ENA);
mdelay(200);
outw(0, CF_ATASEL_DIS);
return 0;
}
static int bfin_cf_ss_init(struct pcmcia_socket *s)
{
return 0;
}
/* the timer is primarily to kick this socket's pccardd */
static void bfin_cf_timer(unsigned long _cf)
{
struct bfin_cf_socket *cf = (void *)_cf;
unsigned short present = bfin_cf_present(cf->cd_pfx);
if (present != cf->present) {
cf->present = present;
dev_dbg(&cf->pdev->dev, ": card %s\n",
present ? "present" : "gone");
pcmcia_parse_events(&cf->socket, SS_DETECT);
}
if (cf->active)
mod_timer(&cf->timer, jiffies + POLL_INTERVAL);
}
static int bfin_cf_get_status(struct pcmcia_socket *s, u_int *sp)
{
struct bfin_cf_socket *cf;
if (!sp)
return -EINVAL;
cf = container_of(s, struct bfin_cf_socket, socket);
if (bfin_cf_present(cf->cd_pfx)) {
*sp = SS_READY | SS_DETECT | SS_POWERON | SS_3VCARD;
s->pcmcia_irq = 0;
s->pci_irq = cf->irq;
} else
*sp = 0;
return 0;
}
static int
bfin_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
{
struct bfin_cf_socket *cf;
cf = container_of(sock, struct bfin_cf_socket, socket);
switch (s->Vcc) {
case 0:
case 33:
break;
case 50:
break;
default:
return -EINVAL;
}
if (s->flags & SS_RESET) {
disable_irq(cf->irq);
bfin_cf_reset();
enable_irq(cf->irq);
}
dev_dbg(&cf->pdev->dev, ": Vcc %d, io_irq %d, flags %04x csc %04x\n",
s->Vcc, s->io_irq, s->flags, s->csc_mask);
return 0;
}
static int bfin_cf_ss_suspend(struct pcmcia_socket *s)
{
return bfin_cf_set_socket(s, &dead_socket);
}
/* regions are 2K each: mem, attrib, io (and reserved-for-ide) */
static int bfin_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
{
struct bfin_cf_socket *cf;
cf = container_of(s, struct bfin_cf_socket, socket);
io->flags &= MAP_ACTIVE | MAP_ATTRIB | MAP_16BIT;
io->start = cf->phys_cf_io;
io->stop = io->start + SZ_2K - 1;
return 0;
}
static int
bfin_cf_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *map)
{
struct bfin_cf_socket *cf;
if (map->card_start)
return -EINVAL;
cf = container_of(s, struct bfin_cf_socket, socket);
map->static_start = cf->phys_cf_io;
map->flags &= MAP_ACTIVE | MAP_ATTRIB | MAP_16BIT;
if (map->flags & MAP_ATTRIB)
map->static_start = cf->phys_cf_attr;
return 0;
}
static struct pccard_operations bfin_cf_ops = {
.init = bfin_cf_ss_init,
.suspend = bfin_cf_ss_suspend,
.get_status = bfin_cf_get_status,
.set_socket = bfin_cf_set_socket,
.set_io_map = bfin_cf_set_io_map,
.set_mem_map = bfin_cf_set_mem_map,
};
/*--------------------------------------------------------------------------*/
static int __devinit bfin_cf_probe(struct platform_device *pdev)
{
struct bfin_cf_socket *cf;
struct resource *io_mem, *attr_mem;
int irq;
unsigned short cd_pfx;
int status = 0;
dev_info(&pdev->dev, "Blackfin CompactFlash/PCMCIA Socket Driver\n");
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return -EINVAL;
cd_pfx = platform_get_irq(pdev, 1); /*Card Detect GPIO PIN */
if (gpio_request(cd_pfx, "pcmcia: CD")) {
dev_err(&pdev->dev,
"Failed ro request Card Detect GPIO_%d\n",
cd_pfx);
return -EBUSY;
}
gpio_direction_input(cd_pfx);
cf = kzalloc(sizeof *cf, GFP_KERNEL);
if (!cf) {
gpio_free(cd_pfx);
return -ENOMEM;
}
cf->cd_pfx = cd_pfx;
setup_timer(&cf->timer, bfin_cf_timer, (unsigned long)cf);
cf->pdev = pdev;
platform_set_drvdata(pdev, cf);
cf->irq = irq;
cf->socket.pci_irq = irq;
irq_set_irq_type(irq, IRQF_TRIGGER_LOW);
io_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
attr_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!io_mem || !attr_mem)
goto fail0;
cf->phys_cf_io = io_mem->start;
cf->phys_cf_attr = attr_mem->start;
/* pcmcia layer only remaps "real" memory */
cf->socket.io_offset = (unsigned long)
ioremap(cf->phys_cf_io, SZ_2K);
if (!cf->socket.io_offset)
goto fail0;
dev_err(&pdev->dev, ": on irq %d\n", irq);
dev_dbg(&pdev->dev, ": %s\n",
bfin_cf_present(cf->cd_pfx) ? "present" : "(not present)");
cf->socket.owner = THIS_MODULE;
cf->socket.dev.parent = &pdev->dev;
cf->socket.ops = &bfin_cf_ops;
cf->socket.resource_ops = &pccard_static_ops;
cf->socket.features = SS_CAP_PCCARD | SS_CAP_STATIC_MAP
| SS_CAP_MEM_ALIGN;
cf->socket.map_size = SZ_2K;
status = pcmcia_register_socket(&cf->socket);
if (status < 0)
goto fail2;
cf->active = 1;
mod_timer(&cf->timer, jiffies + POLL_INTERVAL);
return 0;
fail2:
iounmap((void __iomem *)cf->socket.io_offset);
release_mem_region(cf->phys_cf_io, SZ_8K);
fail0:
gpio_free(cf->cd_pfx);
kfree(cf);
platform_set_drvdata(pdev, NULL);
return status;
}
static int __devexit bfin_cf_remove(struct platform_device *pdev)
{
struct bfin_cf_socket *cf = platform_get_drvdata(pdev);
gpio_free(cf->cd_pfx);
cf->active = 0;
pcmcia_unregister_socket(&cf->socket);
del_timer_sync(&cf->timer);
iounmap((void __iomem *)cf->socket.io_offset);
release_mem_region(cf->phys_cf_io, SZ_8K);
platform_set_drvdata(pdev, NULL);
kfree(cf);
return 0;
}
static struct platform_driver bfin_cf_driver = {
.driver = {
.name = (char *)driver_name,
.owner = THIS_MODULE,
},
.probe = bfin_cf_probe,
.remove = __devexit_p(bfin_cf_remove),
};
static int __init bfin_cf_init(void)
{
return platform_driver_register(&bfin_cf_driver);
}
static void __exit bfin_cf_exit(void)
{
platform_driver_unregister(&bfin_cf_driver);
}
module_init(bfin_cf_init);
module_exit(bfin_cf_exit);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("BFIN CF/PCMCIA Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Zaphod-Beeblebrox/kernel_rockchip_rk3188 | drivers/scsi/aacraid/comminit.c | 3188 | 14467 | /*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
* 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* comminit.c
*
* Abstract: This supports the initialization of the host adapter commuication interface.
* This is a platform dependent module for the pci cyclone board.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/mm.h>
#include <scsi/scsi_host.h>
#include "aacraid.h"
struct aac_common aac_config = {
.irq_mod = 1
};
static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
{
unsigned char *base;
unsigned long size, align;
const unsigned long fibsize = 4096;
const unsigned long printfbufsiz = 256;
unsigned long host_rrq_size = 0;
struct aac_init *init;
dma_addr_t phys;
unsigned long aac_max_hostphysmempages;
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1)
host_rrq_size = (dev->scsi_host_ptr->can_queue
+ AAC_NUM_MGT_FIB) * sizeof(u32);
size = fibsize + sizeof(struct aac_init) + commsize +
commalign + printfbufsiz + host_rrq_size;
base = pci_alloc_consistent(dev->pdev, size, &phys);
if(base == NULL)
{
printk(KERN_ERR "aacraid: unable to create mapping.\n");
return 0;
}
dev->comm_addr = (void *)base;
dev->comm_phys = phys;
dev->comm_size = size;
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
dev->host_rrq = (u32 *)(base + fibsize);
dev->host_rrq_pa = phys + fibsize;
memset(dev->host_rrq, 0, host_rrq_size);
}
dev->init = (struct aac_init *)(base + fibsize + host_rrq_size);
dev->init_pa = phys + fibsize + host_rrq_size;
init = dev->init;
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
if (dev->max_fib_size != sizeof(struct hw_fib))
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
init->MiniPortRevision = cpu_to_le32(Sa_MINIPORT_REVISION);
init->fsrev = cpu_to_le32(dev->fsrev);
/*
* Adapter Fibs are the first thing allocated so that they
* start page aligned
*/
dev->aif_base_va = (struct hw_fib *)base;
init->AdapterFibsVirtualAddress = 0;
init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
init->AdapterFibsSize = cpu_to_le32(fibsize);
init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
/*
* number of 4k pages of host physical memory. The aacraid fw needs
* this number to be less than 4gb worth of pages. New firmware doesn't
* have any issues with the mapping system, but older Firmware did, and
* had *troubles* dealing with the math overloading past 32 bits, thus
* we must limit this field.
*/
aac_max_hostphysmempages = dma_get_required_mask(&dev->pdev->dev) >> 12;
if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES)
init->HostPhysMemPages = cpu_to_le32(aac_max_hostphysmempages);
else
init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
init->InitFlags = 0;
if (dev->comm_interface == AAC_COMM_MESSAGE) {
init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
} else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_TYPE1_SUPPORTED);
dprintk((KERN_WARNING
"aacraid: New Comm Interface type1 enabled\n"));
}
init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
INITFLAGS_DRIVER_SUPPORTS_PM);
init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
init->MaxNumAif = cpu_to_le32(dev->max_num_aif);
init->HostRRQ_AddrHigh = (u32)((u64)dev->host_rrq_pa >> 32);
init->HostRRQ_AddrLow = (u32)(dev->host_rrq_pa & 0xffffffff);
/*
* Increment the base address by the amount already used
*/
base = base + fibsize + host_rrq_size + sizeof(struct aac_init);
phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size +
sizeof(struct aac_init));
/*
* Align the beginning of Headers to commalign
*/
align = (commalign - ((uintptr_t)(base) & (commalign - 1)));
base = base + align;
phys = phys + align;
/*
* Fill in addresses of the Comm Area Headers and Queues
*/
*commaddr = base;
init->CommHeaderAddress = cpu_to_le32((u32)phys);
/*
* Increment the base address by the size of the CommArea
*/
base = base + commsize;
phys = phys + commsize;
/*
* Place the Printf buffer area after the Fast I/O comm area.
*/
dev->printfbuf = (void *)base;
init->printfbuf = cpu_to_le32(phys);
init->printfbufsiz = cpu_to_le32(printfbufsiz);
memset(base, 0, printfbufsiz);
return 1;
}
static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
{
q->numpending = 0;
q->dev = dev;
init_waitqueue_head(&q->cmdready);
INIT_LIST_HEAD(&q->cmdq);
init_waitqueue_head(&q->qfull);
spin_lock_init(&q->lockdata);
q->lock = &q->lockdata;
q->headers.producer = (__le32 *)mem;
q->headers.consumer = (__le32 *)(mem+1);
*(q->headers.producer) = cpu_to_le32(qsize);
*(q->headers.consumer) = cpu_to_le32(qsize);
q->entries = qsize;
}
/**
* aac_send_shutdown - shutdown an adapter
* @dev: Adapter to shutdown
*
* This routine will send a VM_CloseAll (shutdown) request to the adapter.
*/
int aac_send_shutdown(struct aac_dev * dev)
{
struct fib * fibctx;
struct aac_close *cmd;
int status;
fibctx = aac_fib_alloc(dev);
if (!fibctx)
return -ENOMEM;
aac_fib_init(fibctx);
cmd = (struct aac_close *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_CloseAll);
cmd->cid = cpu_to_le32(0xffffffff);
status = aac_fib_send(ContainerCommand,
fibctx,
sizeof(struct aac_close),
FsaNormal,
-2 /* Timeout silently */, 1,
NULL, NULL);
if (status >= 0)
aac_fib_complete(fibctx);
/* FIB should be freed only after getting the response from the F/W */
if (status != -ERESTARTSYS)
aac_fib_free(fibctx);
return status;
}
/**
* aac_comm_init - Initialise FSA data structures
* @dev: Adapter to initialise
*
* Initializes the data structures that are required for the FSA commuication
* interface to operate.
* Returns
* 1 - if we were able to init the commuication interface.
* 0 - If there were errors initing. This is a fatal error.
*/
static int aac_comm_init(struct aac_dev * dev)
{
unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2;
unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES;
u32 *headers;
struct aac_entry * queues;
unsigned long size;
struct aac_queue_block * comm = dev->queues;
/*
* Now allocate and initialize the zone structures used as our
* pool of FIB context records. The size of the zone is based
* on the system memory size. We also initialize the mutex used
* to protect the zone.
*/
spin_lock_init(&dev->fib_lock);
/*
* Allocate the physically contiguous space for the commuication
* queue headers.
*/
size = hdrsize + queuesize;
if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
return -ENOMEM;
queues = (struct aac_entry *)(((ulong)headers) + hdrsize);
/* Adapter to Host normal priority Command queue */
comm->queue[HostNormCmdQueue].base = queues;
aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
queues += HOST_NORM_CMD_ENTRIES;
headers += 2;
/* Adapter to Host high priority command queue */
comm->queue[HostHighCmdQueue].base = queues;
aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES);
queues += HOST_HIGH_CMD_ENTRIES;
headers +=2;
/* Host to adapter normal priority command queue */
comm->queue[AdapNormCmdQueue].base = queues;
aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES);
queues += ADAP_NORM_CMD_ENTRIES;
headers += 2;
/* host to adapter high priority command queue */
comm->queue[AdapHighCmdQueue].base = queues;
aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES);
queues += ADAP_HIGH_CMD_ENTRIES;
headers += 2;
/* adapter to host normal priority response queue */
comm->queue[HostNormRespQueue].base = queues;
aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES);
queues += HOST_NORM_RESP_ENTRIES;
headers += 2;
/* adapter to host high priority response queue */
comm->queue[HostHighRespQueue].base = queues;
aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES);
queues += HOST_HIGH_RESP_ENTRIES;
headers += 2;
/* host to adapter normal priority response queue */
comm->queue[AdapNormRespQueue].base = queues;
aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES);
queues += ADAP_NORM_RESP_ENTRIES;
headers += 2;
/* host to adapter high priority response queue */
comm->queue[AdapHighRespQueue].base = queues;
aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES);
comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock;
comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock;
comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock;
comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock;
return 0;
}
struct aac_dev *aac_init_adapter(struct aac_dev *dev)
{
u32 status[5];
struct Scsi_Host * host = dev->scsi_host_ptr;
/*
* Check the preferred comm settings, defaults from template.
*/
dev->management_fib_count = 0;
spin_lock_init(&dev->manage_lock);
dev->max_fib_size = sizeof(struct hw_fib);
dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
- sizeof(struct aac_fibhdr)
- sizeof(struct aac_write) + sizeof(struct sgentry))
/ sizeof(struct sgentry);
dev->comm_interface = AAC_COMM_PRODUCER;
dev->raw_io_interface = dev->raw_io_64 = 0;
if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
(status[0] == 0x00000001)) {
if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
dev->raw_io_64 = 1;
if (dev->a_ops.adapter_comm) {
if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1)) {
dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
dev->raw_io_interface = 1;
} else if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)) {
dev->comm_interface = AAC_COMM_MESSAGE;
dev->raw_io_interface = 1;
}
}
if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
(status[2] > dev->base_size)) {
aac_adapter_ioremap(dev, 0);
dev->base_size = status[2];
if (aac_adapter_ioremap(dev, status[2])) {
/* remap failed, go back ... */
dev->comm_interface = AAC_COMM_PRODUCER;
if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) {
printk(KERN_WARNING
"aacraid: unable to map adapter.\n");
return NULL;
}
}
}
}
if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
0, 0, 0, 0, 0, 0,
status+0, status+1, status+2, status+3, status+4))
&& (status[0] == 0x00000001)) {
/*
* status[1] >> 16 maximum command size in KB
* status[1] & 0xFFFF maximum FIB size
* status[2] >> 16 maximum SG elements to driver
* status[2] & 0xFFFF maximum SG elements from driver
* status[3] & 0xFFFF maximum number FIBs outstanding
*/
host->max_sectors = (status[1] >> 16) << 1;
/* Multiple of 32 for PMC */
dev->max_fib_size = status[1] & 0xFFE0;
host->sg_tablesize = status[2] >> 16;
dev->sg_tablesize = status[2] & 0xFFFF;
host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
dev->max_num_aif = status[4] & 0xFFFF;
/*
* NOTE:
* All these overrides are based on a fixed internal
* knowledge and understanding of existing adapters,
* acbsize should be set with caution.
*/
if (acbsize == 512) {
host->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
dev->max_fib_size = 512;
dev->sg_tablesize = host->sg_tablesize
= (512 - sizeof(struct aac_fibhdr)
- sizeof(struct aac_write) + sizeof(struct sgentry))
/ sizeof(struct sgentry);
host->can_queue = AAC_NUM_IO_FIB;
} else if (acbsize == 2048) {
host->max_sectors = 512;
dev->max_fib_size = 2048;
host->sg_tablesize = 65;
dev->sg_tablesize = 81;
host->can_queue = 512 - AAC_NUM_MGT_FIB;
} else if (acbsize == 4096) {
host->max_sectors = 1024;
dev->max_fib_size = 4096;
host->sg_tablesize = 129;
dev->sg_tablesize = 166;
host->can_queue = 256 - AAC_NUM_MGT_FIB;
} else if (acbsize == 8192) {
host->max_sectors = 2048;
dev->max_fib_size = 8192;
host->sg_tablesize = 257;
dev->sg_tablesize = 337;
host->can_queue = 128 - AAC_NUM_MGT_FIB;
} else if (acbsize > 0) {
printk("Illegal acbsize=%d ignored\n", acbsize);
}
}
{
if (numacb > 0) {
if (numacb < host->can_queue)
host->can_queue = numacb;
else
printk("numacb=%d ignored\n", numacb);
}
}
/*
* Ok now init the communication subsystem
*/
dev->queues = kzalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
if (dev->queues == NULL) {
printk(KERN_ERR "Error could not allocate comm region.\n");
return NULL;
}
if (aac_comm_init(dev)<0){
kfree(dev->queues);
return NULL;
}
/*
* Initialize the list of fibs
*/
if (aac_fib_setup(dev) < 0) {
kfree(dev->queues);
return NULL;
}
INIT_LIST_HEAD(&dev->fib_list);
return dev;
}
| gpl-2.0 |
engine95/navel-855 | arch/s390/kernel/vdso.c | 4468 | 8391 | /*
* vdso setup for s390
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/security.h>
#include <linux/bootmem.h>
#include <linux/compat.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
#include <asm/vdso.h>
#include <asm/facility.h>
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
extern char vdso32_start, vdso32_end;
static void *vdso32_kbase = &vdso32_start;
static unsigned int vdso32_pages;
static struct page **vdso32_pagelist;
#endif
#ifdef CONFIG_64BIT
extern char vdso64_start, vdso64_end;
static void *vdso64_kbase = &vdso64_start;
static unsigned int vdso64_pages;
static struct page **vdso64_pagelist;
#endif /* CONFIG_64BIT */
/*
* Should the kernel map a VDSO page into processes and pass its
* address down to glibc upon exec()?
*/
unsigned int __read_mostly vdso_enabled = 1;
static int __init vdso_setup(char *s)
{
unsigned long val;
int rc;
rc = 0;
if (strncmp(s, "on", 3) == 0)
vdso_enabled = 1;
else if (strncmp(s, "off", 4) == 0)
vdso_enabled = 0;
else {
rc = strict_strtoul(s, 0, &val);
vdso_enabled = rc ? 0 : !!val;
}
return !rc;
}
__setup("vdso=", vdso_setup);
/*
* The vdso data page
*/
static union {
struct vdso_data data;
u8 page[PAGE_SIZE];
} vdso_data_store __page_aligned_data;
struct vdso_data *vdso_data = &vdso_data_store.data;
/*
* Setup vdso data page.
*/
static void vdso_init_data(struct vdso_data *vd)
{
vd->ectg_available = user_mode != HOME_SPACE_MODE && test_facility(31);
}
#ifdef CONFIG_64BIT
/*
* Allocate/free per cpu vdso data.
*/
#define SEGMENT_ORDER 2
int vdso_alloc_per_cpu(struct _lowcore *lowcore)
{
unsigned long segment_table, page_table, page_frame;
u32 *psal, *aste;
int i;
lowcore->vdso_per_cpu_data = __LC_PASTE;
if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
return 0;
segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
page_frame = get_zeroed_page(GFP_KERNEL);
if (!segment_table || !page_table || !page_frame)
goto out;
clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
PAGE_SIZE << SEGMENT_ORDER);
clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY,
256*sizeof(unsigned long));
*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
*(unsigned long *) page_table = _PAGE_RO + page_frame;
psal = (u32 *) (page_table + 256*sizeof(unsigned long));
aste = psal + 32;
for (i = 4; i < 32; i += 4)
psal[i] = 0x80000000;
lowcore->paste[4] = (u32)(addr_t) psal;
psal[0] = 0x20000000;
psal[2] = (u32)(addr_t) aste;
*(unsigned long *) (aste + 2) = segment_table +
_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
aste[4] = (u32)(addr_t) psal;
lowcore->vdso_per_cpu_data = page_frame;
return 0;
out:
free_page(page_frame);
free_page(page_table);
free_pages(segment_table, SEGMENT_ORDER);
return -ENOMEM;
}
void vdso_free_per_cpu(struct _lowcore *lowcore)
{
unsigned long segment_table, page_table, page_frame;
u32 *psal, *aste;
if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
return;
psal = (u32 *)(addr_t) lowcore->paste[4];
aste = (u32 *)(addr_t) psal[2];
segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
page_table = *(unsigned long *) segment_table;
page_frame = *(unsigned long *) page_table;
free_page(page_frame);
free_page(page_table);
free_pages(segment_table, SEGMENT_ORDER);
}
static void vdso_init_cr5(void)
{
unsigned long cr5;
if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
return;
cr5 = offsetof(struct _lowcore, paste);
__ctl_load(cr5, 5, 5);
}
#endif /* CONFIG_64BIT */
/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
struct page **vdso_pagelist;
unsigned long vdso_pages;
unsigned long vdso_base;
int rc;
if (!vdso_enabled)
return 0;
/*
* Only map the vdso for dynamically linked elf binaries.
*/
if (!uses_interp)
return 0;
#ifdef CONFIG_64BIT
vdso_pagelist = vdso64_pagelist;
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
if (is_compat_task()) {
vdso_pagelist = vdso32_pagelist;
vdso_pages = vdso32_pages;
}
#endif
#else
vdso_pagelist = vdso32_pagelist;
vdso_pages = vdso32_pages;
#endif
/*
* vDSO has a problem and was disabled, just don't "enable" it for
* the process
*/
if (vdso_pages == 0)
return 0;
current->mm->context.vdso_base = 0;
/*
* pick a base address for the vDSO in process space. We try to put
* it at vdso_base which is the "natural" base for it, but we might
* fail and end up putting it elsewhere.
*/
down_write(&mm->mmap_sem);
vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
rc = vdso_base;
goto out_up;
}
/*
* Put vDSO base into mm struct. We need to do this before calling
* install_special_mapping or the perf counter mmap tracking code
* will fail to recognise it as a vDSO (since arch_vma_name fails).
*/
current->mm->context.vdso_base = vdso_base;
/*
* our vma flags don't have VM_WRITE so by default, the process
* isn't allowed to write those pages.
* gdb can break that with ptrace interface, and thus trigger COW
* on those pages but it's then your responsibility to never do that
* on the "data" page of the vDSO or you'll stop getting kernel
* updates and your nice userland gettimeofday will be totally dead.
* It's fine to use that for setting breakpoints in the vDSO code
* pages though.
*/
rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_pagelist);
if (rc)
current->mm->context.vdso_base = 0;
out_up:
up_write(&mm->mmap_sem);
return rc;
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
return "[vdso]";
return NULL;
}
static int __init vdso_init(void)
{
int i;
if (!vdso_enabled)
return 0;
vdso_init_data(vdso_data);
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
/* Calculate the size of the 32 bit vDSO */
vdso32_pages = ((&vdso32_end - &vdso32_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
/* Make sure pages are in the correct state */
vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
GFP_KERNEL);
BUG_ON(vdso32_pagelist == NULL);
for (i = 0; i < vdso32_pages - 1; i++) {
struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
ClearPageReserved(pg);
get_page(pg);
vdso32_pagelist[i] = pg;
}
vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
vdso32_pagelist[vdso32_pages] = NULL;
#endif
#ifdef CONFIG_64BIT
/* Calculate the size of the 64 bit vDSO */
vdso64_pages = ((&vdso64_end - &vdso64_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
/* Make sure pages are in the correct state */
vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
GFP_KERNEL);
BUG_ON(vdso64_pagelist == NULL);
for (i = 0; i < vdso64_pages - 1; i++) {
struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
ClearPageReserved(pg);
get_page(pg);
vdso64_pagelist[i] = pg;
}
vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
vdso64_pagelist[vdso64_pages] = NULL;
if (vdso_alloc_per_cpu(&S390_lowcore))
BUG();
vdso_init_cr5();
#endif /* CONFIG_64BIT */
get_page(virt_to_page(vdso_data));
smp_wmb();
return 0;
}
early_initcall(vdso_init);
int in_gate_area_no_mm(unsigned long addr)
{
return 0;
}
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
return 0;
}
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return NULL;
}
| gpl-2.0 |
AOKP/kernel_samsung_exynos5410 | drivers/usb/storage/ene_ub6250.c | 4724 | 68064 | /*
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <linux/firmware.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
MODULE_DESCRIPTION("Driver for ENE UB6250 reader");
MODULE_LICENSE("GPL");
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
static struct usb_device_id ene_ub6250_usb_ids[] = {
# include "unusual_ene_ub6250.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, ene_ub6250_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev ene_ub6250_unusual_dev_list[] = {
# include "unusual_ene_ub6250.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
/* ENE bin code len */
#define ENE_BIN_CODE_LEN 0x800
/* EnE HW Register */
#define REG_CARD_STATUS 0xFF83
#define REG_HW_TRAP1 0xFF89
/* SRB Status */
#define SS_SUCCESS 0x00 /* No Sense */
#define SS_NOT_READY 0x02
#define SS_MEDIUM_ERR 0x03
#define SS_HW_ERR 0x04
#define SS_ILLEGAL_REQUEST 0x05
#define SS_UNIT_ATTENTION 0x06
/* ENE Load FW Pattern */
#define SD_INIT1_PATTERN 1
#define SD_INIT2_PATTERN 2
#define SD_RW_PATTERN 3
#define MS_INIT_PATTERN 4
#define MSP_RW_PATTERN 5
#define MS_RW_PATTERN 6
#define SM_INIT_PATTERN 7
#define SM_RW_PATTERN 8
#define FDIR_WRITE 0
#define FDIR_READ 1
/* For MS Card */
/* Status Register 1 */
#define MS_REG_ST1_MB 0x80 /* media busy */
#define MS_REG_ST1_FB1 0x40 /* flush busy 1 */
#define MS_REG_ST1_DTER 0x20 /* error on data(corrected) */
#define MS_REG_ST1_UCDT 0x10 /* unable to correct data */
#define MS_REG_ST1_EXER 0x08 /* error on extra(corrected) */
#define MS_REG_ST1_UCEX 0x04 /* unable to correct extra */
#define MS_REG_ST1_FGER 0x02 /* error on overwrite flag(corrected) */
#define MS_REG_ST1_UCFG 0x01 /* unable to correct overwrite flag */
#define MS_REG_ST1_DEFAULT (MS_REG_ST1_MB | MS_REG_ST1_FB1 | MS_REG_ST1_DTER | MS_REG_ST1_UCDT | MS_REG_ST1_EXER | MS_REG_ST1_UCEX | MS_REG_ST1_FGER | MS_REG_ST1_UCFG)
/* Overwrite Area */
#define MS_REG_OVR_BKST 0x80 /* block status */
#define MS_REG_OVR_BKST_OK MS_REG_OVR_BKST /* OK */
#define MS_REG_OVR_BKST_NG 0x00 /* NG */
#define MS_REG_OVR_PGST0 0x40 /* page status */
#define MS_REG_OVR_PGST1 0x20
#define MS_REG_OVR_PGST_MASK (MS_REG_OVR_PGST0 | MS_REG_OVR_PGST1)
#define MS_REG_OVR_PGST_OK (MS_REG_OVR_PGST0 | MS_REG_OVR_PGST1) /* OK */
#define MS_REG_OVR_PGST_NG MS_REG_OVR_PGST1 /* NG */
#define MS_REG_OVR_PGST_DATA_ERROR 0x00 /* data error */
#define MS_REG_OVR_UDST 0x10 /* update status */
#define MS_REG_OVR_UDST_UPDATING 0x00 /* updating */
#define MS_REG_OVR_UDST_NO_UPDATE MS_REG_OVR_UDST
#define MS_REG_OVR_RESERVED 0x08
#define MS_REG_OVR_DEFAULT (MS_REG_OVR_BKST_OK | MS_REG_OVR_PGST_OK | MS_REG_OVR_UDST_NO_UPDATE | MS_REG_OVR_RESERVED)
/* Management Flag */
#define MS_REG_MNG_SCMS0 0x20 /* serial copy management system */
#define MS_REG_MNG_SCMS1 0x10
#define MS_REG_MNG_SCMS_MASK (MS_REG_MNG_SCMS0 | MS_REG_MNG_SCMS1)
#define MS_REG_MNG_SCMS_COPY_OK (MS_REG_MNG_SCMS0 | MS_REG_MNG_SCMS1)
#define MS_REG_MNG_SCMS_ONE_COPY MS_REG_MNG_SCMS1
#define MS_REG_MNG_SCMS_NO_COPY 0x00
#define MS_REG_MNG_ATFLG 0x08 /* address transfer table flag */
#define MS_REG_MNG_ATFLG_OTHER MS_REG_MNG_ATFLG /* other */
#define MS_REG_MNG_ATFLG_ATTBL 0x00 /* address transfer table */
#define MS_REG_MNG_SYSFLG 0x04 /* system flag */
#define MS_REG_MNG_SYSFLG_USER MS_REG_MNG_SYSFLG /* user block */
#define MS_REG_MNG_SYSFLG_BOOT 0x00 /* system block */
#define MS_REG_MNG_RESERVED 0xc3
#define MS_REG_MNG_DEFAULT (MS_REG_MNG_SCMS_COPY_OK | MS_REG_MNG_ATFLG_OTHER | MS_REG_MNG_SYSFLG_USER | MS_REG_MNG_RESERVED)
#define MS_MAX_PAGES_PER_BLOCK 32
#define MS_MAX_INITIAL_ERROR_BLOCKS 10
#define MS_LIB_BITS_PER_BYTE 8
#define MS_SYSINF_FORMAT_FAT 1
#define MS_SYSINF_USAGE_GENERAL 0
#define MS_SYSINF_MSCLASS_TYPE_1 1
#define MS_SYSINF_PAGE_SIZE MS_BYTES_PER_PAGE /* fixed */
#define MS_SYSINF_CARDTYPE_RDONLY 1
#define MS_SYSINF_CARDTYPE_RDWR 2
#define MS_SYSINF_CARDTYPE_HYBRID 3
#define MS_SYSINF_SECURITY 0x01
#define MS_SYSINF_SECURITY_NO_SUPPORT MS_SYSINF_SECURITY
#define MS_SYSINF_SECURITY_SUPPORT 0
#define MS_SYSINF_RESERVED1 1
#define MS_SYSINF_RESERVED2 1
#define MS_SYSENT_TYPE_INVALID_BLOCK 0x01
#define MS_SYSENT_TYPE_CIS_IDI 0x0a /* CIS/IDI */
#define SIZE_OF_KIRO 1024
#define BYTE_MASK 0xff
/* ms error code */
#define MS_STATUS_WRITE_PROTECT 0x0106
#define MS_STATUS_SUCCESS 0x0000
#define MS_ERROR_FLASH_READ 0x8003
#define MS_ERROR_FLASH_ERASE 0x8005
#define MS_LB_ERROR 0xfff0
#define MS_LB_BOOT_BLOCK 0xfff1
#define MS_LB_INITIAL_ERROR 0xfff2
#define MS_STATUS_SUCCESS_WITH_ECC 0xfff3
#define MS_LB_ACQUIRED_ERROR 0xfff4
#define MS_LB_NOT_USED_ERASED 0xfff5
#define MS_NOCARD_ERROR 0xfff8
#define MS_NO_MEMORY_ERROR 0xfff9
#define MS_STATUS_INT_ERROR 0xfffa
#define MS_STATUS_ERROR 0xfffe
#define MS_LB_NOT_USED 0xffff
#define MS_REG_MNG_SYSFLG 0x04 /* system flag */
#define MS_REG_MNG_SYSFLG_USER MS_REG_MNG_SYSFLG /* user block */
#define MS_BOOT_BLOCK_ID 0x0001
#define MS_BOOT_BLOCK_FORMAT_VERSION 0x0100
#define MS_BOOT_BLOCK_DATA_ENTRIES 2
#define MS_NUMBER_OF_SYSTEM_ENTRY 4
#define MS_NUMBER_OF_BOOT_BLOCK 2
#define MS_BYTES_PER_PAGE 512
#define MS_LOGICAL_BLOCKS_PER_SEGMENT 496
#define MS_LOGICAL_BLOCKS_IN_1ST_SEGMENT 494
#define MS_PHYSICAL_BLOCKS_PER_SEGMENT 0x200 /* 512 */
#define MS_PHYSICAL_BLOCKS_PER_SEGMENT_MASK 0x1ff
/* overwrite area */
#define MS_REG_OVR_BKST 0x80 /* block status */
#define MS_REG_OVR_BKST_OK MS_REG_OVR_BKST /* OK */
#define MS_REG_OVR_BKST_NG 0x00 /* NG */
/* Status Register 1 */
#define MS_REG_ST1_DTER 0x20 /* error on data(corrected) */
#define MS_REG_ST1_EXER 0x08 /* error on extra(corrected) */
#define MS_REG_ST1_FGER 0x02 /* error on overwrite flag(corrected) */
/* MemoryStick Register */
/* Status Register 0 */
#define MS_REG_ST0_WP 0x01 /* write protected */
#define MS_REG_ST0_WP_ON MS_REG_ST0_WP
#define MS_LIB_CTRL_RDONLY 0
#define MS_LIB_CTRL_WRPROTECT 1
/*dphy->log table */
#define ms_libconv_to_logical(pdx, PhyBlock) (((PhyBlock) >= (pdx)->MS_Lib.NumberOfPhyBlock) ? MS_STATUS_ERROR : (pdx)->MS_Lib.Phy2LogMap[PhyBlock])
#define ms_libconv_to_physical(pdx, LogBlock) (((LogBlock) >= (pdx)->MS_Lib.NumberOfLogBlock) ? MS_STATUS_ERROR : (pdx)->MS_Lib.Log2PhyMap[LogBlock])
#define ms_lib_ctrl_set(pdx, Flag) ((pdx)->MS_Lib.flags |= (1 << (Flag)))
#define ms_lib_ctrl_reset(pdx, Flag) ((pdx)->MS_Lib.flags &= ~(1 << (Flag)))
#define ms_lib_ctrl_check(pdx, Flag) ((pdx)->MS_Lib.flags & (1 << (Flag)))
#define ms_lib_iswritable(pdx) ((ms_lib_ctrl_check((pdx), MS_LIB_CTRL_RDONLY) == 0) && (ms_lib_ctrl_check(pdx, MS_LIB_CTRL_WRPROTECT) == 0))
#define ms_lib_clear_pagemap(pdx) memset((pdx)->MS_Lib.pagemap, 0, sizeof((pdx)->MS_Lib.pagemap))
#define memstick_logaddr(logadr1, logadr0) ((((u16)(logadr1)) << 8) | (logadr0))
struct SD_STATUS {
u8 Insert:1;
u8 Ready:1;
u8 MediaChange:1;
u8 IsMMC:1;
u8 HiCapacity:1;
u8 HiSpeed:1;
u8 WtP:1;
u8 Reserved:1;
};
struct MS_STATUS {
u8 Insert:1;
u8 Ready:1;
u8 MediaChange:1;
u8 IsMSPro:1;
u8 IsMSPHG:1;
u8 Reserved1:1;
u8 WtP:1;
u8 Reserved2:1;
};
struct SM_STATUS {
u8 Insert:1;
u8 Ready:1;
u8 MediaChange:1;
u8 Reserved:3;
u8 WtP:1;
u8 IsMS:1;
};
struct ms_bootblock_cis {
u8 bCistplDEVICE[6]; /* 0 */
u8 bCistplDEVICE0C[6]; /* 6 */
u8 bCistplJEDECC[4]; /* 12 */
u8 bCistplMANFID[6]; /* 16 */
u8 bCistplVER1[32]; /* 22 */
u8 bCistplFUNCID[4]; /* 54 */
u8 bCistplFUNCE0[4]; /* 58 */
u8 bCistplFUNCE1[5]; /* 62 */
u8 bCistplCONF[7]; /* 67 */
u8 bCistplCFTBLENT0[10];/* 74 */
u8 bCistplCFTBLENT1[8]; /* 84 */
u8 bCistplCFTBLENT2[12];/* 92 */
u8 bCistplCFTBLENT3[8]; /* 104 */
u8 bCistplCFTBLENT4[17];/* 112 */
u8 bCistplCFTBLENT5[8]; /* 129 */
u8 bCistplCFTBLENT6[17];/* 137 */
u8 bCistplCFTBLENT7[8]; /* 154 */
u8 bCistplNOLINK[3]; /* 162 */
} ;
struct ms_bootblock_idi {
#define MS_IDI_GENERAL_CONF 0x848A
u16 wIDIgeneralConfiguration; /* 0 */
u16 wIDInumberOfCylinder; /* 1 */
u16 wIDIreserved0; /* 2 */
u16 wIDInumberOfHead; /* 3 */
u16 wIDIbytesPerTrack; /* 4 */
u16 wIDIbytesPerSector; /* 5 */
u16 wIDIsectorsPerTrack; /* 6 */
u16 wIDItotalSectors[2]; /* 7-8 high,low */
u16 wIDIreserved1[11]; /* 9-19 */
u16 wIDIbufferType; /* 20 */
u16 wIDIbufferSize; /* 21 */
u16 wIDIlongCmdECC; /* 22 */
u16 wIDIfirmVersion[4]; /* 23-26 */
u16 wIDImodelName[20]; /* 27-46 */
u16 wIDIreserved2; /* 47 */
u16 wIDIlongWordSupported; /* 48 */
u16 wIDIdmaSupported; /* 49 */
u16 wIDIreserved3; /* 50 */
u16 wIDIpioTiming; /* 51 */
u16 wIDIdmaTiming; /* 52 */
u16 wIDItransferParameter; /* 53 */
u16 wIDIformattedCylinder; /* 54 */
u16 wIDIformattedHead; /* 55 */
u16 wIDIformattedSectorsPerTrack;/* 56 */
u16 wIDIformattedTotalSectors[2];/* 57-58 */
u16 wIDImultiSector; /* 59 */
u16 wIDIlbaSectors[2]; /* 60-61 */
u16 wIDIsingleWordDMA; /* 62 */
u16 wIDImultiWordDMA; /* 63 */
u16 wIDIreserved4[192]; /* 64-255 */
};
struct ms_bootblock_sysent_rec {
u32 dwStart;
u32 dwSize;
u8 bType;
u8 bReserved[3];
};
struct ms_bootblock_sysent {
struct ms_bootblock_sysent_rec entry[MS_NUMBER_OF_SYSTEM_ENTRY];
};
struct ms_bootblock_sysinf {
u8 bMsClass; /* must be 1 */
u8 bCardType; /* see below */
u16 wBlockSize; /* n KB */
u16 wBlockNumber; /* number of physical block */
u16 wTotalBlockNumber; /* number of logical block */
u16 wPageSize; /* must be 0x200 */
u8 bExtraSize; /* 0x10 */
u8 bSecuritySupport;
u8 bAssemblyDate[8];
u8 bFactoryArea[4];
u8 bAssemblyMakerCode;
u8 bAssemblyMachineCode[3];
u16 wMemoryMakerCode;
u16 wMemoryDeviceCode;
u16 wMemorySize;
u8 bReserved1;
u8 bReserved2;
u8 bVCC;
u8 bVPP;
u16 wControllerChipNumber;
u16 wControllerFunction; /* New MS */
u8 bReserved3[9]; /* New MS */
u8 bParallelSupport; /* New MS */
u16 wFormatValue; /* New MS */
u8 bFormatType;
u8 bUsage;
u8 bDeviceType;
u8 bReserved4[22];
u8 bFUValue3;
u8 bFUValue4;
u8 bReserved5[15];
};
struct ms_bootblock_header {
u16 wBlockID;
u16 wFormatVersion;
u8 bReserved1[184];
u8 bNumberOfDataEntry;
u8 bReserved2[179];
};
struct ms_bootblock_page0 {
struct ms_bootblock_header header;
struct ms_bootblock_sysent sysent;
struct ms_bootblock_sysinf sysinf;
};
struct ms_bootblock_cis_idi {
union {
struct ms_bootblock_cis cis;
u8 dmy[256];
} cis;
union {
struct ms_bootblock_idi idi;
u8 dmy[256];
} idi;
};
/* ENE MS Lib struct */
struct ms_lib_type_extdat {
u8 reserved;
u8 intr;
u8 status0;
u8 status1;
u8 ovrflg;
u8 mngflg;
u16 logadr;
};
struct ms_lib_ctrl {
u32 flags;
u32 BytesPerSector;
u32 NumberOfCylinder;
u32 SectorsPerCylinder;
u16 cardType; /* R/W, RO, Hybrid */
u16 blockSize;
u16 PagesPerBlock;
u16 NumberOfPhyBlock;
u16 NumberOfLogBlock;
u16 NumberOfSegment;
u16 *Phy2LogMap; /* phy2log table */
u16 *Log2PhyMap; /* log2phy table */
u16 wrtblk;
unsigned char *pagemap[(MS_MAX_PAGES_PER_BLOCK + (MS_LIB_BITS_PER_BYTE-1)) / MS_LIB_BITS_PER_BYTE];
unsigned char *blkpag;
struct ms_lib_type_extdat *blkext;
unsigned char copybuf[512];
};
/* SD Block Length */
/* 2^9 = 512 Bytes, The HW maximum read/write data length */
#define SD_BLOCK_LEN 9
struct ene_ub6250_info {
/* for 6250 code */
struct SD_STATUS SD_Status;
struct MS_STATUS MS_Status;
struct SM_STATUS SM_Status;
/* ----- SD Control Data ---------------- */
/*SD_REGISTER SD_Regs; */
u16 SD_Block_Mult;
u8 SD_READ_BL_LEN;
u16 SD_C_SIZE;
u8 SD_C_SIZE_MULT;
/* SD/MMC New spec. */
u8 SD_SPEC_VER;
u8 SD_CSD_VER;
u8 SD20_HIGH_CAPACITY;
u32 HC_C_SIZE;
u8 MMC_SPEC_VER;
u8 MMC_BusWidth;
u8 MMC_HIGH_CAPACITY;
/*----- MS Control Data ---------------- */
bool MS_SWWP;
u32 MSP_TotalBlock;
struct ms_lib_ctrl MS_Lib;
bool MS_IsRWPage;
u16 MS_Model;
/*----- SM Control Data ---------------- */
u8 SM_DeviceID;
u8 SM_CardID;
unsigned char *testbuf;
u8 BIN_FLAG;
u32 bl_num;
int SrbStatus;
/*------Power Managerment ---------------*/
bool Power_IsResum;
};
static int ene_sd_init(struct us_data *us);
static int ene_ms_init(struct us_data *us);
static int ene_load_bincode(struct us_data *us, unsigned char flag);
static void ene_ub6250_info_destructor(void *extra)
{
if (!extra)
return;
}
static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
int result;
unsigned int residue;
unsigned int cswlen = 0, partial = 0;
unsigned int transfer_length = bcb->DataTransferLength;
/* US_DEBUGP("transport --- ene_send_scsi_cmd\n"); */
/* send cmd to out endpoint */
result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
bcb, US_BULK_CB_WRAP_LEN, NULL);
if (result != USB_STOR_XFER_GOOD) {
US_DEBUGP("send cmd to out endpoint fail ---\n");
return USB_STOR_TRANSPORT_ERROR;
}
if (buf) {
unsigned int pipe = fDir;
if (fDir == FDIR_READ)
pipe = us->recv_bulk_pipe;
else
pipe = us->send_bulk_pipe;
/* Bulk */
if (use_sg) {
result = usb_stor_bulk_srb(us, pipe, us->srb);
} else {
result = usb_stor_bulk_transfer_sg(us, pipe, buf,
transfer_length, 0, &partial);
}
if (result != USB_STOR_XFER_GOOD) {
US_DEBUGP("data transfer fail ---\n");
return USB_STOR_TRANSPORT_ERROR;
}
}
/* Get CSW for device status */
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs,
US_BULK_CS_WRAP_LEN, &cswlen);
if (result == USB_STOR_XFER_SHORT && cswlen == 0) {
US_DEBUGP("Received 0-length CSW; retrying...\n");
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
bcs, US_BULK_CS_WRAP_LEN, &cswlen);
}
if (result == USB_STOR_XFER_STALLED) {
/* get the status again */
US_DEBUGP("Attempting to get CSW (2nd try)...\n");
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
bcs, US_BULK_CS_WRAP_LEN, NULL);
}
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* check bulk status */
residue = le32_to_cpu(bcs->Residue);
/* try to compute the actual residue, based on how much data
* was really transferred and what the device tells us */
if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {
residue = min(residue, transfer_length);
if (us->srb != NULL)
scsi_set_resid(us->srb, max(scsi_get_resid(us->srb),
(int)residue));
}
if (bcs->Status != US_BULK_STAT_OK)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
static int sd_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if (info->SD_Status.Insert && info->SD_Status.Ready)
return USB_STOR_TRANSPORT_GOOD;
else {
ene_sd_init(us);
return USB_STOR_TRANSPORT_GOOD;
}
return USB_STOR_TRANSPORT_GOOD;
}
static int sd_scsi_inquiry(struct us_data *us, struct scsi_cmnd *srb)
{
unsigned char data_ptr[36] = {
0x00, 0x80, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x55,
0x53, 0x42, 0x32, 0x2E, 0x30, 0x20, 0x20, 0x43, 0x61,
0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x31, 0x30, 0x30 };
usb_stor_set_xfer_buf(data_ptr, 36, srb);
return USB_STOR_TRANSPORT_GOOD;
}
static int sd_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
unsigned char mediaNoWP[12] = {
0x0b, 0x00, 0x00, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 };
unsigned char mediaWP[12] = {
0x0b, 0x00, 0x80, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 };
if (info->SD_Status.WtP)
usb_stor_set_xfer_buf(mediaWP, 12, srb);
else
usb_stor_set_xfer_buf(mediaNoWP, 12, srb);
return USB_STOR_TRANSPORT_GOOD;
}
static int sd_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb)
{
u32 bl_num;
u32 bl_len;
unsigned int offset = 0;
unsigned char buf[8];
struct scatterlist *sg = NULL;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
US_DEBUGP("sd_scsi_read_capacity\n");
if (info->SD_Status.HiCapacity) {
bl_len = 0x200;
if (info->SD_Status.IsMMC)
bl_num = info->HC_C_SIZE-1;
else
bl_num = (info->HC_C_SIZE + 1) * 1024 - 1;
} else {
bl_len = 1 << (info->SD_READ_BL_LEN);
bl_num = info->SD_Block_Mult * (info->SD_C_SIZE + 1)
* (1 << (info->SD_C_SIZE_MULT + 2)) - 1;
}
info->bl_num = bl_num;
US_DEBUGP("bl_len = %x\n", bl_len);
US_DEBUGP("bl_num = %x\n", bl_num);
/*srb->request_bufflen = 8; */
buf[0] = (bl_num >> 24) & 0xff;
buf[1] = (bl_num >> 16) & 0xff;
buf[2] = (bl_num >> 8) & 0xff;
buf[3] = (bl_num >> 0) & 0xff;
buf[4] = (bl_len >> 24) & 0xff;
buf[5] = (bl_len >> 16) & 0xff;
buf[6] = (bl_len >> 8) & 0xff;
buf[7] = (bl_len >> 0) & 0xff;
usb_stor_access_xfer_buf(buf, 8, srb, &sg, &offset, TO_XFER_BUF);
return USB_STOR_TRANSPORT_GOOD;
}
static int sd_scsi_read(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
unsigned char *cdb = srb->cmnd;
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
u32 bn = ((cdb[2] << 24) & 0xff000000) | ((cdb[3] << 16) & 0x00ff0000) |
((cdb[4] << 8) & 0x0000ff00) | ((cdb[5] << 0) & 0x000000ff);
u16 blen = ((cdb[7] << 8) & 0xff00) | ((cdb[8] << 0) & 0x00ff);
u32 bnByte = bn * 0x200;
u32 blenByte = blen * 0x200;
if (bn > info->bl_num)
return USB_STOR_TRANSPORT_ERROR;
result = ene_load_bincode(us, SD_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
US_DEBUGP("Load SD RW pattern Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
if (info->SD_Status.HiCapacity)
bnByte = bn;
/* set up the command wrapper */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = blenByte;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[5] = (unsigned char)(bnByte);
bcb->CDB[4] = (unsigned char)(bnByte>>8);
bcb->CDB[3] = (unsigned char)(bnByte>>16);
bcb->CDB[2] = (unsigned char)(bnByte>>24);
result = ene_send_scsi_cmd(us, FDIR_READ, scsi_sglist(srb), 1);
return result;
}
static int sd_scsi_write(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
unsigned char *cdb = srb->cmnd;
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
u32 bn = ((cdb[2] << 24) & 0xff000000) | ((cdb[3] << 16) & 0x00ff0000) |
((cdb[4] << 8) & 0x0000ff00) | ((cdb[5] << 0) & 0x000000ff);
u16 blen = ((cdb[7] << 8) & 0xff00) | ((cdb[8] << 0) & 0x00ff);
u32 bnByte = bn * 0x200;
u32 blenByte = blen * 0x200;
if (bn > info->bl_num)
return USB_STOR_TRANSPORT_ERROR;
result = ene_load_bincode(us, SD_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
US_DEBUGP("Load SD RW pattern Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
if (info->SD_Status.HiCapacity)
bnByte = bn;
/* set up the command wrapper */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = blenByte;
bcb->Flags = 0x00;
bcb->CDB[0] = 0xF0;
bcb->CDB[5] = (unsigned char)(bnByte);
bcb->CDB[4] = (unsigned char)(bnByte>>8);
bcb->CDB[3] = (unsigned char)(bnByte>>16);
bcb->CDB[2] = (unsigned char)(bnByte>>24);
result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1);
return result;
}
/*
* ENE MS Card
*/
static int ms_lib_set_logicalpair(struct us_data *us, u16 logblk, u16 phyblk)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if ((logblk >= info->MS_Lib.NumberOfLogBlock) || (phyblk >= info->MS_Lib.NumberOfPhyBlock))
return (u32)-1;
info->MS_Lib.Phy2LogMap[phyblk] = logblk;
info->MS_Lib.Log2PhyMap[logblk] = phyblk;
return 0;
}
static int ms_lib_set_logicalblockmark(struct us_data *us, u16 phyblk, u16 mark)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if (phyblk >= info->MS_Lib.NumberOfPhyBlock)
return (u32)-1;
info->MS_Lib.Phy2LogMap[phyblk] = mark;
return 0;
}
static int ms_lib_set_initialerrorblock(struct us_data *us, u16 phyblk)
{
return ms_lib_set_logicalblockmark(us, phyblk, MS_LB_INITIAL_ERROR);
}
static int ms_lib_set_bootblockmark(struct us_data *us, u16 phyblk)
{
return ms_lib_set_logicalblockmark(us, phyblk, MS_LB_BOOT_BLOCK);
}
static int ms_lib_free_logicalmap(struct us_data *us)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
kfree(info->MS_Lib.Phy2LogMap);
info->MS_Lib.Phy2LogMap = NULL;
kfree(info->MS_Lib.Log2PhyMap);
info->MS_Lib.Log2PhyMap = NULL;
return 0;
}
static int ms_lib_alloc_logicalmap(struct us_data *us)
{
u32 i;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
info->MS_Lib.Phy2LogMap = kmalloc(info->MS_Lib.NumberOfPhyBlock * sizeof(u16), GFP_KERNEL);
info->MS_Lib.Log2PhyMap = kmalloc(info->MS_Lib.NumberOfLogBlock * sizeof(u16), GFP_KERNEL);
if ((info->MS_Lib.Phy2LogMap == NULL) || (info->MS_Lib.Log2PhyMap == NULL)) {
ms_lib_free_logicalmap(us);
return (u32)-1;
}
for (i = 0; i < info->MS_Lib.NumberOfPhyBlock; i++)
info->MS_Lib.Phy2LogMap[i] = MS_LB_NOT_USED;
for (i = 0; i < info->MS_Lib.NumberOfLogBlock; i++)
info->MS_Lib.Log2PhyMap[i] = MS_LB_NOT_USED;
return 0;
}
static void ms_lib_clear_writebuf(struct us_data *us)
{
int i;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
info->MS_Lib.wrtblk = (u16)-1;
ms_lib_clear_pagemap(info);
if (info->MS_Lib.blkpag)
memset(info->MS_Lib.blkpag, 0xff, info->MS_Lib.PagesPerBlock * info->MS_Lib.BytesPerSector);
if (info->MS_Lib.blkext) {
for (i = 0; i < info->MS_Lib.PagesPerBlock; i++) {
info->MS_Lib.blkext[i].status1 = MS_REG_ST1_DEFAULT;
info->MS_Lib.blkext[i].ovrflg = MS_REG_OVR_DEFAULT;
info->MS_Lib.blkext[i].mngflg = MS_REG_MNG_DEFAULT;
info->MS_Lib.blkext[i].logadr = MS_LB_NOT_USED;
}
}
}
static int ms_count_freeblock(struct us_data *us, u16 PhyBlock)
{
u32 Ende, Count;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
Ende = PhyBlock + MS_PHYSICAL_BLOCKS_PER_SEGMENT;
for (Count = 0; PhyBlock < Ende; PhyBlock++) {
switch (info->MS_Lib.Phy2LogMap[PhyBlock]) {
case MS_LB_NOT_USED:
case MS_LB_NOT_USED_ERASED:
Count++;
default:
break;
}
}
return Count;
}
static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
u8 ExtBuf[4];
u32 bn = PhyBlockAddr * 0x20 + PageNum;
/* printk(KERN_INFO "MS --- MS_ReaderReadPage,
PhyBlockAddr = %x, PageNum = %x\n", PhyBlockAddr, PageNum); */
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* Read Page Data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x02; /* in init.c ENE_MSInit() is 0x01 */
bcb->CDB[5] = (unsigned char)(bn);
bcb->CDB[4] = (unsigned char)(bn>>8);
bcb->CDB[3] = (unsigned char)(bn>>16);
bcb->CDB[2] = (unsigned char)(bn>>24);
result = ene_send_scsi_cmd(us, FDIR_READ, PageBuf, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* Read Extra Data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x4;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x03;
bcb->CDB[5] = (unsigned char)(PageNum);
bcb->CDB[4] = (unsigned char)(PhyBlockAddr);
bcb->CDB[3] = (unsigned char)(PhyBlockAddr>>8);
bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16);
bcb->CDB[6] = 0x01;
result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
ExtraDat->reserved = 0;
ExtraDat->intr = 0x80; /* Not yet,fireware support */
ExtraDat->status0 = 0x10; /* Not yet,fireware support */
ExtraDat->status1 = 0x00; /* Not yet,fireware support */
ExtraDat->ovrflg = ExtBuf[0];
ExtraDat->mngflg = ExtBuf[1];
ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]);
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_lib_process_bootblock(struct us_data *us, u16 PhyBlock, u8 *PageData)
{
struct ms_bootblock_sysent *SysEntry;
struct ms_bootblock_sysinf *SysInfo;
u32 i, result;
u8 PageNumber;
u8 *PageBuffer;
struct ms_lib_type_extdat ExtraData;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
PageBuffer = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
if (PageBuffer == NULL)
return (u32)-1;
result = (u32)-1;
SysInfo = &(((struct ms_bootblock_page0 *)PageData)->sysinf);
if ((SysInfo->bMsClass != MS_SYSINF_MSCLASS_TYPE_1) ||
(be16_to_cpu(SysInfo->wPageSize) != MS_SYSINF_PAGE_SIZE) ||
((SysInfo->bSecuritySupport & MS_SYSINF_SECURITY) == MS_SYSINF_SECURITY_SUPPORT) ||
(SysInfo->bReserved1 != MS_SYSINF_RESERVED1) ||
(SysInfo->bReserved2 != MS_SYSINF_RESERVED2) ||
(SysInfo->bFormatType != MS_SYSINF_FORMAT_FAT) ||
(SysInfo->bUsage != MS_SYSINF_USAGE_GENERAL))
goto exit;
/* */
switch (info->MS_Lib.cardType = SysInfo->bCardType) {
case MS_SYSINF_CARDTYPE_RDONLY:
ms_lib_ctrl_set(info, MS_LIB_CTRL_RDONLY);
break;
case MS_SYSINF_CARDTYPE_RDWR:
ms_lib_ctrl_reset(info, MS_LIB_CTRL_RDONLY);
break;
case MS_SYSINF_CARDTYPE_HYBRID:
default:
goto exit;
}
info->MS_Lib.blockSize = be16_to_cpu(SysInfo->wBlockSize);
info->MS_Lib.NumberOfPhyBlock = be16_to_cpu(SysInfo->wBlockNumber);
info->MS_Lib.NumberOfLogBlock = be16_to_cpu(SysInfo->wTotalBlockNumber)-2;
info->MS_Lib.PagesPerBlock = info->MS_Lib.blockSize * SIZE_OF_KIRO / MS_BYTES_PER_PAGE;
info->MS_Lib.NumberOfSegment = info->MS_Lib.NumberOfPhyBlock / MS_PHYSICAL_BLOCKS_PER_SEGMENT;
info->MS_Model = be16_to_cpu(SysInfo->wMemorySize);
/*Allocate to all number of logicalblock and physicalblock */
if (ms_lib_alloc_logicalmap(us))
goto exit;
/* Mark the book block */
ms_lib_set_bootblockmark(us, PhyBlock);
SysEntry = &(((struct ms_bootblock_page0 *)PageData)->sysent);
for (i = 0; i < MS_NUMBER_OF_SYSTEM_ENTRY; i++) {
u32 EntryOffset, EntrySize;
EntryOffset = be32_to_cpu(SysEntry->entry[i].dwStart);
if (EntryOffset == 0xffffff)
continue;
EntrySize = be32_to_cpu(SysEntry->entry[i].dwSize);
if (EntrySize == 0)
continue;
if (EntryOffset + MS_BYTES_PER_PAGE + EntrySize > info->MS_Lib.blockSize * (u32)SIZE_OF_KIRO)
continue;
if (i == 0) {
u8 PrevPageNumber = 0;
u16 phyblk;
if (SysEntry->entry[i].bType != MS_SYSENT_TYPE_INVALID_BLOCK)
goto exit;
while (EntrySize > 0) {
PageNumber = (u8)(EntryOffset / MS_BYTES_PER_PAGE + 1);
if (PageNumber != PrevPageNumber) {
switch (ms_read_readpage(us, PhyBlock, PageNumber, (u32 *)PageBuffer, &ExtraData)) {
case MS_STATUS_SUCCESS:
break;
case MS_STATUS_WRITE_PROTECT:
case MS_ERROR_FLASH_READ:
case MS_STATUS_ERROR:
default:
goto exit;
}
PrevPageNumber = PageNumber;
}
phyblk = be16_to_cpu(*(u16 *)(PageBuffer + (EntryOffset % MS_BYTES_PER_PAGE)));
if (phyblk < 0x0fff)
ms_lib_set_initialerrorblock(us, phyblk);
EntryOffset += 2;
EntrySize -= 2;
}
} else if (i == 1) { /* CIS/IDI */
struct ms_bootblock_idi *idi;
if (SysEntry->entry[i].bType != MS_SYSENT_TYPE_CIS_IDI)
goto exit;
switch (ms_read_readpage(us, PhyBlock, (u8)(EntryOffset / MS_BYTES_PER_PAGE + 1), (u32 *)PageBuffer, &ExtraData)) {
case MS_STATUS_SUCCESS:
break;
case MS_STATUS_WRITE_PROTECT:
case MS_ERROR_FLASH_READ:
case MS_STATUS_ERROR:
default:
goto exit;
}
idi = &((struct ms_bootblock_cis_idi *)(PageBuffer + (EntryOffset % MS_BYTES_PER_PAGE)))->idi.idi;
if (le16_to_cpu(idi->wIDIgeneralConfiguration) != MS_IDI_GENERAL_CONF)
goto exit;
info->MS_Lib.BytesPerSector = le16_to_cpu(idi->wIDIbytesPerSector);
if (info->MS_Lib.BytesPerSector != MS_BYTES_PER_PAGE)
goto exit;
}
} /* End for .. */
result = 0;
exit:
if (result)
ms_lib_free_logicalmap(us);
kfree(PageBuffer);
result = 0;
return result;
}
static void ms_lib_free_writebuf(struct us_data *us)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
info->MS_Lib.wrtblk = (u16)-1; /* set to -1 */
/* memset((fdoExt)->MS_Lib.pagemap, 0, sizeof((fdoExt)->MS_Lib.pagemap)) */
ms_lib_clear_pagemap(info); /* (pdx)->MS_Lib.pagemap memset 0 in ms.h */
if (info->MS_Lib.blkpag) {
kfree((u8 *)(info->MS_Lib.blkpag)); /* Arnold test ... */
info->MS_Lib.blkpag = NULL;
}
if (info->MS_Lib.blkext) {
kfree((u8 *)(info->MS_Lib.blkext)); /* Arnold test ... */
info->MS_Lib.blkext = NULL;
}
}
static void ms_lib_free_allocatedarea(struct us_data *us)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
ms_lib_free_writebuf(us); /* Free MS_Lib.pagemap */
ms_lib_free_logicalmap(us); /* kfree MS_Lib.Phy2LogMap and MS_Lib.Log2PhyMap */
/* set struct us point flag to 0 */
info->MS_Lib.flags = 0;
info->MS_Lib.BytesPerSector = 0;
info->MS_Lib.SectorsPerCylinder = 0;
info->MS_Lib.cardType = 0;
info->MS_Lib.blockSize = 0;
info->MS_Lib.PagesPerBlock = 0;
info->MS_Lib.NumberOfPhyBlock = 0;
info->MS_Lib.NumberOfLogBlock = 0;
}
static int ms_lib_alloc_writebuf(struct us_data *us)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
info->MS_Lib.wrtblk = (u16)-1;
info->MS_Lib.blkpag = kmalloc(info->MS_Lib.PagesPerBlock * info->MS_Lib.BytesPerSector, GFP_KERNEL);
info->MS_Lib.blkext = kmalloc(info->MS_Lib.PagesPerBlock * sizeof(struct ms_lib_type_extdat), GFP_KERNEL);
if ((info->MS_Lib.blkpag == NULL) || (info->MS_Lib.blkext == NULL)) {
ms_lib_free_writebuf(us);
return (u32)-1;
}
ms_lib_clear_writebuf(us);
return 0;
}
static int ms_lib_force_setlogical_pair(struct us_data *us, u16 logblk, u16 phyblk)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if (logblk == MS_LB_NOT_USED)
return 0;
if ((logblk >= info->MS_Lib.NumberOfLogBlock) ||
(phyblk >= info->MS_Lib.NumberOfPhyBlock))
return (u32)-1;
info->MS_Lib.Phy2LogMap[phyblk] = logblk;
info->MS_Lib.Log2PhyMap[logblk] = phyblk;
return 0;
}
static int ms_read_copyblock(struct us_data *us, u16 oldphy, u16 newphy,
u16 PhyBlockAddr, u8 PageNum, unsigned char *buf, u16 len)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
/* printk(KERN_INFO "MS_ReaderCopyBlock --- PhyBlockAddr = %x,
PageNum = %x\n", PhyBlockAddr, PageNum); */
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200*len;
bcb->Flags = 0x00;
bcb->CDB[0] = 0xF0;
bcb->CDB[1] = 0x08;
bcb->CDB[4] = (unsigned char)(oldphy);
bcb->CDB[3] = (unsigned char)(oldphy>>8);
bcb->CDB[2] = 0; /* (BYTE)(oldphy>>16) */
bcb->CDB[7] = (unsigned char)(newphy);
bcb->CDB[6] = (unsigned char)(newphy>>8);
bcb->CDB[5] = 0; /* (BYTE)(newphy>>16) */
bcb->CDB[9] = (unsigned char)(PhyBlockAddr);
bcb->CDB[8] = (unsigned char)(PhyBlockAddr>>8);
bcb->CDB[10] = PageNum;
result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_read_eraseblock(struct us_data *us, u32 PhyBlockAddr)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
u32 bn = PhyBlockAddr;
/* printk(KERN_INFO "MS --- ms_read_eraseblock,
PhyBlockAddr = %x\n", PhyBlockAddr); */
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF2;
bcb->CDB[1] = 0x06;
bcb->CDB[4] = (unsigned char)(bn);
bcb->CDB[3] = (unsigned char)(bn>>8);
bcb->CDB[2] = (unsigned char)(bn>>16);
result = ene_send_scsi_cmd(us, FDIR_READ, NULL, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_lib_check_disableblock(struct us_data *us, u16 PhyBlock)
{
unsigned char *PageBuf = NULL;
u16 result = MS_STATUS_SUCCESS;
u16 blk, index = 0;
struct ms_lib_type_extdat extdat;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
PageBuf = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
if (PageBuf == NULL) {
result = MS_NO_MEMORY_ERROR;
goto exit;
}
ms_read_readpage(us, PhyBlock, 1, (u32 *)PageBuf, &extdat);
do {
blk = be16_to_cpu(PageBuf[index]);
if (blk == MS_LB_NOT_USED)
break;
if (blk == info->MS_Lib.Log2PhyMap[0]) {
result = MS_ERROR_FLASH_READ;
break;
}
index++;
} while (1);
exit:
kfree(PageBuf);
return result;
}
static int ms_lib_setacquired_errorblock(struct us_data *us, u16 phyblk)
{
u16 log;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if (phyblk >= info->MS_Lib.NumberOfPhyBlock)
return (u32)-1;
log = info->MS_Lib.Phy2LogMap[phyblk];
if (log < info->MS_Lib.NumberOfLogBlock)
info->MS_Lib.Log2PhyMap[log] = MS_LB_NOT_USED;
if (info->MS_Lib.Phy2LogMap[phyblk] != MS_LB_INITIAL_ERROR)
info->MS_Lib.Phy2LogMap[phyblk] = MS_LB_ACQUIRED_ERROR;
return 0;
}
static int ms_lib_overwrite_extra(struct us_data *us, u32 PhyBlockAddr,
u8 PageNum, u8 OverwriteFlag)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
/* printk("MS --- MS_LibOverwriteExtra,
PhyBlockAddr = %x, PageNum = %x\n", PhyBlockAddr, PageNum); */
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x4;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF2;
bcb->CDB[1] = 0x05;
bcb->CDB[5] = (unsigned char)(PageNum);
bcb->CDB[4] = (unsigned char)(PhyBlockAddr);
bcb->CDB[3] = (unsigned char)(PhyBlockAddr>>8);
bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16);
bcb->CDB[6] = OverwriteFlag;
bcb->CDB[7] = 0xFF;
bcb->CDB[8] = 0xFF;
bcb->CDB[9] = 0xFF;
result = ene_send_scsi_cmd(us, FDIR_READ, NULL, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_lib_error_phyblock(struct us_data *us, u16 phyblk)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if (phyblk >= info->MS_Lib.NumberOfPhyBlock)
return MS_STATUS_ERROR;
ms_lib_setacquired_errorblock(us, phyblk);
if (ms_lib_iswritable(info))
return ms_lib_overwrite_extra(us, phyblk, 0, (u8)(~MS_REG_OVR_BKST & BYTE_MASK));
return MS_STATUS_SUCCESS;
}
static int ms_lib_erase_phyblock(struct us_data *us, u16 phyblk)
{
u16 log;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if (phyblk >= info->MS_Lib.NumberOfPhyBlock)
return MS_STATUS_ERROR;
log = info->MS_Lib.Phy2LogMap[phyblk];
if (log < info->MS_Lib.NumberOfLogBlock)
info->MS_Lib.Log2PhyMap[log] = MS_LB_NOT_USED;
info->MS_Lib.Phy2LogMap[phyblk] = MS_LB_NOT_USED;
if (ms_lib_iswritable(info)) {
switch (ms_read_eraseblock(us, phyblk)) {
case MS_STATUS_SUCCESS:
info->MS_Lib.Phy2LogMap[phyblk] = MS_LB_NOT_USED_ERASED;
return MS_STATUS_SUCCESS;
case MS_ERROR_FLASH_ERASE:
case MS_STATUS_INT_ERROR:
ms_lib_error_phyblock(us, phyblk);
return MS_ERROR_FLASH_ERASE;
case MS_STATUS_ERROR:
default:
ms_lib_ctrl_set(info, MS_LIB_CTRL_RDONLY); /* MS_LibCtrlSet will used by ENE_MSInit ,need check, and why us to info*/
ms_lib_setacquired_errorblock(us, phyblk);
return MS_STATUS_ERROR;
}
}
ms_lib_setacquired_errorblock(us, phyblk);
return MS_STATUS_SUCCESS;
}
static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
u8 PageNum, struct ms_lib_type_extdat *ExtraDat)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
u8 ExtBuf[4];
/* printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum); */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x4;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x03;
bcb->CDB[5] = (unsigned char)(PageNum);
bcb->CDB[4] = (unsigned char)(PhyBlock);
bcb->CDB[3] = (unsigned char)(PhyBlock>>8);
bcb->CDB[2] = (unsigned char)(PhyBlock>>16);
bcb->CDB[6] = 0x01;
result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
ExtraDat->reserved = 0;
ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */
ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */
ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */
ExtraDat->ovrflg = ExtBuf[0];
ExtraDat->mngflg = ExtBuf[1];
ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]);
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_libsearch_block_from_physical(struct us_data *us, u16 phyblk)
{
u16 Newblk;
u16 blk;
struct ms_lib_type_extdat extdat; /* need check */
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if (phyblk >= info->MS_Lib.NumberOfPhyBlock)
return MS_LB_ERROR;
for (blk = phyblk + 1; blk != phyblk; blk++) {
if ((blk & MS_PHYSICAL_BLOCKS_PER_SEGMENT_MASK) == 0)
blk -= MS_PHYSICAL_BLOCKS_PER_SEGMENT;
Newblk = info->MS_Lib.Phy2LogMap[blk];
if (info->MS_Lib.Phy2LogMap[blk] == MS_LB_NOT_USED_ERASED) {
return blk;
} else if (info->MS_Lib.Phy2LogMap[blk] == MS_LB_NOT_USED) {
switch (ms_lib_read_extra(us, blk, 0, &extdat)) {
case MS_STATUS_SUCCESS:
case MS_STATUS_SUCCESS_WITH_ECC:
break;
case MS_NOCARD_ERROR:
return MS_NOCARD_ERROR;
case MS_STATUS_INT_ERROR:
return MS_LB_ERROR;
case MS_ERROR_FLASH_READ:
default:
ms_lib_setacquired_errorblock(us, blk);
continue;
} /* End switch */
if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) {
ms_lib_setacquired_errorblock(us, blk);
continue;
}
switch (ms_lib_erase_phyblock(us, blk)) {
case MS_STATUS_SUCCESS:
return blk;
case MS_STATUS_ERROR:
return MS_LB_ERROR;
case MS_ERROR_FLASH_ERASE:
default:
ms_lib_error_phyblock(us, blk);
break;
}
}
} /* End for */
return MS_LB_ERROR;
}
static int ms_libsearch_block_from_logical(struct us_data *us, u16 logblk)
{
u16 phyblk;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
phyblk = ms_libconv_to_physical(info, logblk);
if (phyblk >= MS_LB_ERROR) {
if (logblk >= info->MS_Lib.NumberOfLogBlock)
return MS_LB_ERROR;
phyblk = (logblk + MS_NUMBER_OF_BOOT_BLOCK) / MS_LOGICAL_BLOCKS_PER_SEGMENT;
phyblk *= MS_PHYSICAL_BLOCKS_PER_SEGMENT;
phyblk += MS_PHYSICAL_BLOCKS_PER_SEGMENT - 1;
}
return ms_libsearch_block_from_physical(us, phyblk);
}
static int ms_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
/* pr_info("MS_SCSI_Test_Unit_Ready\n"); */
if (info->MS_Status.Insert && info->MS_Status.Ready) {
return USB_STOR_TRANSPORT_GOOD;
} else {
ene_ms_init(us);
return USB_STOR_TRANSPORT_GOOD;
}
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_scsi_inquiry(struct us_data *us, struct scsi_cmnd *srb)
{
/* pr_info("MS_SCSI_Inquiry\n"); */
unsigned char data_ptr[36] = {
0x00, 0x80, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x55,
0x53, 0x42, 0x32, 0x2E, 0x30, 0x20, 0x20, 0x43, 0x61,
0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x31, 0x30, 0x30};
usb_stor_set_xfer_buf(data_ptr, 36, srb);
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb)
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
unsigned char mediaNoWP[12] = {
0x0b, 0x00, 0x00, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 };
unsigned char mediaWP[12] = {
0x0b, 0x00, 0x80, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 };
if (info->MS_Status.WtP)
usb_stor_set_xfer_buf(mediaWP, 12, srb);
else
usb_stor_set_xfer_buf(mediaNoWP, 12, srb);
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb)
{
u32 bl_num;
u16 bl_len;
unsigned int offset = 0;
unsigned char buf[8];
struct scatterlist *sg = NULL;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
US_DEBUGP("ms_scsi_read_capacity\n");
bl_len = 0x200;
if (info->MS_Status.IsMSPro)
bl_num = info->MSP_TotalBlock - 1;
else
bl_num = info->MS_Lib.NumberOfLogBlock * info->MS_Lib.blockSize * 2 - 1;
info->bl_num = bl_num;
US_DEBUGP("bl_len = %x\n", bl_len);
US_DEBUGP("bl_num = %x\n", bl_num);
/*srb->request_bufflen = 8; */
buf[0] = (bl_num >> 24) & 0xff;
buf[1] = (bl_num >> 16) & 0xff;
buf[2] = (bl_num >> 8) & 0xff;
buf[3] = (bl_num >> 0) & 0xff;
buf[4] = (bl_len >> 24) & 0xff;
buf[5] = (bl_len >> 16) & 0xff;
buf[6] = (bl_len >> 8) & 0xff;
buf[7] = (bl_len >> 0) & 0xff;
usb_stor_access_xfer_buf(buf, 8, srb, &sg, &offset, TO_XFER_BUF);
return USB_STOR_TRANSPORT_GOOD;
}
static void ms_lib_phy_to_log_range(u16 PhyBlock, u16 *LogStart, u16 *LogEnde)
{
PhyBlock /= MS_PHYSICAL_BLOCKS_PER_SEGMENT;
if (PhyBlock) {
*LogStart = MS_LOGICAL_BLOCKS_IN_1ST_SEGMENT + (PhyBlock - 1) * MS_LOGICAL_BLOCKS_PER_SEGMENT;/*496*/
*LogEnde = *LogStart + MS_LOGICAL_BLOCKS_PER_SEGMENT;/*496*/
} else {
*LogStart = 0;
*LogEnde = MS_LOGICAL_BLOCKS_IN_1ST_SEGMENT;/*494*/
}
}
static int ms_lib_read_extrablock(struct us_data *us, u32 PhyBlock,
u8 PageNum, u8 blen, void *buf)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
/* printk("MS_LibReadExtraBlock --- PhyBlock = %x,
PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen); */
/* Read Extra Data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x4 * blen;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x03;
bcb->CDB[5] = (unsigned char)(PageNum);
bcb->CDB[4] = (unsigned char)(PhyBlock);
bcb->CDB[3] = (unsigned char)(PhyBlock>>8);
bcb->CDB[2] = (unsigned char)(PhyBlock>>16);
bcb->CDB[6] = blen;
result = ene_send_scsi_cmd(us, FDIR_READ, buf, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st)
{
u16 PhyBlock, newblk, i;
u16 LogStart, LogEnde;
struct ms_lib_type_extdat extdat;
u8 buf[0x200];
u32 count = 0, index = 0;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) {
ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde);
for (i = 0; i < MS_PHYSICAL_BLOCKS_PER_SEGMENT; i++, PhyBlock++) {
switch (ms_libconv_to_logical(info, PhyBlock)) {
case MS_STATUS_ERROR:
continue;
default:
break;
}
if (count == PhyBlock) {
ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, &buf);
count += 0x80;
}
index = (PhyBlock % 0x80) * 4;
extdat.ovrflg = buf[index];
extdat.mngflg = buf[index+1];
extdat.logadr = memstick_logaddr(buf[index+2], buf[index+3]);
if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) {
ms_lib_setacquired_errorblock(us, PhyBlock);
continue;
}
if ((extdat.mngflg & MS_REG_MNG_ATFLG) == MS_REG_MNG_ATFLG_ATTBL) {
ms_lib_erase_phyblock(us, PhyBlock);
continue;
}
if (extdat.logadr != MS_LB_NOT_USED) {
if ((extdat.logadr < LogStart) || (LogEnde <= extdat.logadr)) {
ms_lib_erase_phyblock(us, PhyBlock);
continue;
}
newblk = ms_libconv_to_physical(info, extdat.logadr);
if (newblk != MS_LB_NOT_USED) {
if (extdat.logadr == 0) {
ms_lib_set_logicalpair(us, extdat.logadr, PhyBlock);
if (ms_lib_check_disableblock(us, btBlk1st)) {
ms_lib_set_logicalpair(us, extdat.logadr, newblk);
continue;
}
}
ms_lib_read_extra(us, newblk, 0, &extdat);
if ((extdat.ovrflg & MS_REG_OVR_UDST) == MS_REG_OVR_UDST_UPDATING) {
ms_lib_erase_phyblock(us, PhyBlock);
continue;
} else {
ms_lib_erase_phyblock(us, newblk);
}
}
ms_lib_set_logicalpair(us, extdat.logadr, PhyBlock);
}
}
} /* End for ... */
return MS_STATUS_SUCCESS;
}
static int ms_scsi_read(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
unsigned char *cdb = srb->cmnd;
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
u32 bn = ((cdb[2] << 24) & 0xff000000) | ((cdb[3] << 16) & 0x00ff0000) |
((cdb[4] << 8) & 0x0000ff00) | ((cdb[5] << 0) & 0x000000ff);
u16 blen = ((cdb[7] << 8) & 0xff00) | ((cdb[8] << 0) & 0x00ff);
u32 blenByte = blen * 0x200;
if (bn > info->bl_num)
return USB_STOR_TRANSPORT_ERROR;
if (info->MS_Status.IsMSPro) {
result = ene_load_bincode(us, MSP_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
US_DEBUGP("Load MPS RW pattern Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
/* set up the command wrapper */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = blenByte;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x02;
bcb->CDB[5] = (unsigned char)(bn);
bcb->CDB[4] = (unsigned char)(bn>>8);
bcb->CDB[3] = (unsigned char)(bn>>16);
bcb->CDB[2] = (unsigned char)(bn>>24);
result = ene_send_scsi_cmd(us, FDIR_READ, scsi_sglist(srb), 1);
} else {
void *buf;
int offset = 0;
u16 phyblk, logblk;
u8 PageNum;
u16 len;
u32 blkno;
buf = kmalloc(blenByte, GFP_KERNEL);
if (buf == NULL)
return USB_STOR_TRANSPORT_ERROR;
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
pr_info("Load MS RW pattern Fail !!\n");
result = USB_STOR_TRANSPORT_ERROR;
goto exit;
}
logblk = (u16)(bn / info->MS_Lib.PagesPerBlock);
PageNum = (u8)(bn % info->MS_Lib.PagesPerBlock);
while (1) {
if (blen > (info->MS_Lib.PagesPerBlock-PageNum))
len = info->MS_Lib.PagesPerBlock-PageNum;
else
len = blen;
phyblk = ms_libconv_to_physical(info, logblk);
blkno = phyblk * 0x20 + PageNum;
/* set up the command wrapper */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200 * len;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x02;
bcb->CDB[5] = (unsigned char)(blkno);
bcb->CDB[4] = (unsigned char)(blkno>>8);
bcb->CDB[3] = (unsigned char)(blkno>>16);
bcb->CDB[2] = (unsigned char)(blkno>>24);
result = ene_send_scsi_cmd(us, FDIR_READ, buf+offset, 0);
if (result != USB_STOR_XFER_GOOD) {
pr_info("MS_SCSI_Read --- result = %x\n", result);
result = USB_STOR_TRANSPORT_ERROR;
goto exit;
}
blen -= len;
if (blen <= 0)
break;
logblk++;
PageNum = 0;
offset += MS_BYTES_PER_PAGE*len;
}
usb_stor_set_xfer_buf(buf, blenByte, srb);
exit:
kfree(buf);
}
return result;
}
static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
unsigned char *cdb = srb->cmnd;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
u32 bn = ((cdb[2] << 24) & 0xff000000) |
((cdb[3] << 16) & 0x00ff0000) |
((cdb[4] << 8) & 0x0000ff00) |
((cdb[5] << 0) & 0x000000ff);
u16 blen = ((cdb[7] << 8) & 0xff00) | ((cdb[8] << 0) & 0x00ff);
u32 blenByte = blen * 0x200;
if (bn > info->bl_num)
return USB_STOR_TRANSPORT_ERROR;
if (info->MS_Status.IsMSPro) {
result = ene_load_bincode(us, MSP_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
pr_info("Load MSP RW pattern Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
/* set up the command wrapper */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = blenByte;
bcb->Flags = 0x00;
bcb->CDB[0] = 0xF0;
bcb->CDB[1] = 0x04;
bcb->CDB[5] = (unsigned char)(bn);
bcb->CDB[4] = (unsigned char)(bn>>8);
bcb->CDB[3] = (unsigned char)(bn>>16);
bcb->CDB[2] = (unsigned char)(bn>>24);
result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1);
} else {
void *buf;
int offset = 0;
u16 PhyBlockAddr;
u8 PageNum;
u16 len, oldphy, newphy;
buf = kmalloc(blenByte, GFP_KERNEL);
if (buf == NULL)
return USB_STOR_TRANSPORT_ERROR;
usb_stor_set_xfer_buf(buf, blenByte, srb);
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
pr_info("Load MS RW pattern Fail !!\n");
result = USB_STOR_TRANSPORT_ERROR;
goto exit;
}
PhyBlockAddr = (u16)(bn / info->MS_Lib.PagesPerBlock);
PageNum = (u8)(bn % info->MS_Lib.PagesPerBlock);
while (1) {
if (blen > (info->MS_Lib.PagesPerBlock-PageNum))
len = info->MS_Lib.PagesPerBlock-PageNum;
else
len = blen;
oldphy = ms_libconv_to_physical(info, PhyBlockAddr); /* need check us <-> info */
newphy = ms_libsearch_block_from_logical(us, PhyBlockAddr);
result = ms_read_copyblock(us, oldphy, newphy, PhyBlockAddr, PageNum, buf+offset, len);
if (result != USB_STOR_XFER_GOOD) {
pr_info("MS_SCSI_Write --- result = %x\n", result);
result = USB_STOR_TRANSPORT_ERROR;
goto exit;
}
info->MS_Lib.Phy2LogMap[oldphy] = MS_LB_NOT_USED_ERASED;
ms_lib_force_setlogical_pair(us, PhyBlockAddr, newphy);
blen -= len;
if (blen <= 0)
break;
PhyBlockAddr++;
PageNum = 0;
offset += MS_BYTES_PER_PAGE*len;
}
exit:
kfree(buf);
}
return result;
}
/*
* ENE MS Card
*/
static int ene_get_card_type(struct us_data *us, u16 index, void *buf)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x01;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xED;
bcb->CDB[2] = (unsigned char)(index>>8);
bcb->CDB[3] = (unsigned char)index;
result = ene_send_scsi_cmd(us, FDIR_READ, buf, 0);
return result;
}
static int ene_get_card_status(struct us_data *us, u8 *buf)
{
u16 tmpreg;
u32 reg4b;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
/*US_DEBUGP("transport --- ENE_ReadSDReg\n");*/
reg4b = *(u32 *)&buf[0x18];
info->SD_READ_BL_LEN = (u8)((reg4b >> 8) & 0x0f);
tmpreg = (u16) reg4b;
reg4b = *(u32 *)(&buf[0x14]);
if (info->SD_Status.HiCapacity && !info->SD_Status.IsMMC)
info->HC_C_SIZE = (reg4b >> 8) & 0x3fffff;
info->SD_C_SIZE = ((tmpreg & 0x03) << 10) | (u16)(reg4b >> 22);
info->SD_C_SIZE_MULT = (u8)(reg4b >> 7) & 0x07;
if (info->SD_Status.HiCapacity && info->SD_Status.IsMMC)
info->HC_C_SIZE = *(u32 *)(&buf[0x100]);
if (info->SD_READ_BL_LEN > SD_BLOCK_LEN) {
info->SD_Block_Mult = 1 << (info->SD_READ_BL_LEN-SD_BLOCK_LEN);
info->SD_READ_BL_LEN = SD_BLOCK_LEN;
} else {
info->SD_Block_Mult = 1;
}
return USB_STOR_TRANSPORT_GOOD;
}
static int ene_load_bincode(struct us_data *us, unsigned char flag)
{
int err;
char *fw_name = NULL;
unsigned char *buf = NULL;
const struct firmware *sd_fw = NULL;
int result = USB_STOR_TRANSPORT_ERROR;
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
if (info->BIN_FLAG == flag)
return USB_STOR_TRANSPORT_GOOD;
switch (flag) {
/* For SD */
case SD_INIT1_PATTERN:
US_DEBUGP("SD_INIT1_PATTERN\n");
fw_name = "ene-ub6250/sd_init1.bin";
break;
case SD_INIT2_PATTERN:
US_DEBUGP("SD_INIT2_PATTERN\n");
fw_name = "ene-ub6250/sd_init2.bin";
break;
case SD_RW_PATTERN:
US_DEBUGP("SD_RDWR_PATTERN\n");
fw_name = "ene-ub6250/sd_rdwr.bin";
break;
/* For MS */
case MS_INIT_PATTERN:
US_DEBUGP("MS_INIT_PATTERN\n");
fw_name = "ene-ub6250/ms_init.bin";
break;
case MSP_RW_PATTERN:
US_DEBUGP("MSP_RW_PATTERN\n");
fw_name = "ene-ub6250/msp_rdwr.bin";
break;
case MS_RW_PATTERN:
US_DEBUGP("MS_RW_PATTERN\n");
fw_name = "ene-ub6250/ms_rdwr.bin";
break;
default:
US_DEBUGP("----------- Unknown PATTERN ----------\n");
goto nofw;
}
err = request_firmware(&sd_fw, fw_name, &us->pusb_dev->dev);
if (err) {
US_DEBUGP("load firmware %s failed\n", fw_name);
goto nofw;
}
buf = kmalloc(sd_fw->size, GFP_KERNEL);
if (buf == NULL) {
US_DEBUGP("Malloc memory for fireware failed!\n");
goto nofw;
}
memcpy(buf, sd_fw->data, sd_fw->size);
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = sd_fw->size;
bcb->Flags = 0x00;
bcb->CDB[0] = 0xEF;
result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0);
info->BIN_FLAG = flag;
kfree(buf);
nofw:
if (sd_fw != NULL) {
release_firmware(sd_fw);
sd_fw = NULL;
}
return result;
}
static int ms_card_init(struct us_data *us)
{
u32 result;
u16 TmpBlock;
unsigned char *PageBuffer0 = NULL, *PageBuffer1 = NULL;
struct ms_lib_type_extdat extdat;
u16 btBlk1st, btBlk2nd;
u32 btBlk1stErred;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
printk(KERN_INFO "MS_CardInit start\n");
ms_lib_free_allocatedarea(us); /* Clean buffer and set struct us_data flag to 0 */
/* get two PageBuffer */
PageBuffer0 = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
PageBuffer1 = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
if ((PageBuffer0 == NULL) || (PageBuffer1 == NULL)) {
result = MS_NO_MEMORY_ERROR;
goto exit;
}
btBlk1st = btBlk2nd = MS_LB_NOT_USED;
btBlk1stErred = 0;
for (TmpBlock = 0; TmpBlock < MS_MAX_INITIAL_ERROR_BLOCKS+2; TmpBlock++) {
switch (ms_read_readpage(us, TmpBlock, 0, (u32 *)PageBuffer0, &extdat)) {
case MS_STATUS_SUCCESS:
break;
case MS_STATUS_INT_ERROR:
break;
case MS_STATUS_ERROR:
default:
continue;
}
if ((extdat.ovrflg & MS_REG_OVR_BKST) == MS_REG_OVR_BKST_NG)
continue;
if (((extdat.mngflg & MS_REG_MNG_SYSFLG) == MS_REG_MNG_SYSFLG_USER) ||
(be16_to_cpu(((struct ms_bootblock_page0 *)PageBuffer0)->header.wBlockID) != MS_BOOT_BLOCK_ID) ||
(be16_to_cpu(((struct ms_bootblock_page0 *)PageBuffer0)->header.wFormatVersion) != MS_BOOT_BLOCK_FORMAT_VERSION) ||
(((struct ms_bootblock_page0 *)PageBuffer0)->header.bNumberOfDataEntry != MS_BOOT_BLOCK_DATA_ENTRIES))
continue;
if (btBlk1st != MS_LB_NOT_USED) {
btBlk2nd = TmpBlock;
break;
}
btBlk1st = TmpBlock;
memcpy(PageBuffer1, PageBuffer0, MS_BYTES_PER_PAGE);
if (extdat.status1 & (MS_REG_ST1_DTER | MS_REG_ST1_EXER | MS_REG_ST1_FGER))
btBlk1stErred = 1;
}
if (btBlk1st == MS_LB_NOT_USED) {
result = MS_STATUS_ERROR;
goto exit;
}
/* write protect */
if ((extdat.status0 & MS_REG_ST0_WP) == MS_REG_ST0_WP_ON)
ms_lib_ctrl_set(info, MS_LIB_CTRL_WRPROTECT);
result = MS_STATUS_ERROR;
/* 1st Boot Block */
if (btBlk1stErred == 0)
result = ms_lib_process_bootblock(us, btBlk1st, PageBuffer1);
/* 1st */
/* 2nd Boot Block */
if (result && (btBlk2nd != MS_LB_NOT_USED))
result = ms_lib_process_bootblock(us, btBlk2nd, PageBuffer0);
if (result) {
result = MS_STATUS_ERROR;
goto exit;
}
for (TmpBlock = 0; TmpBlock < btBlk1st; TmpBlock++)
info->MS_Lib.Phy2LogMap[TmpBlock] = MS_LB_INITIAL_ERROR;
info->MS_Lib.Phy2LogMap[btBlk1st] = MS_LB_BOOT_BLOCK;
if (btBlk2nd != MS_LB_NOT_USED) {
for (TmpBlock = btBlk1st + 1; TmpBlock < btBlk2nd; TmpBlock++)
info->MS_Lib.Phy2LogMap[TmpBlock] = MS_LB_INITIAL_ERROR;
info->MS_Lib.Phy2LogMap[btBlk2nd] = MS_LB_BOOT_BLOCK;
}
result = ms_lib_scan_logicalblocknumber(us, btBlk1st);
if (result)
goto exit;
for (TmpBlock = MS_PHYSICAL_BLOCKS_PER_SEGMENT;
TmpBlock < info->MS_Lib.NumberOfPhyBlock;
TmpBlock += MS_PHYSICAL_BLOCKS_PER_SEGMENT) {
if (ms_count_freeblock(us, TmpBlock) == 0) {
ms_lib_ctrl_set(info, MS_LIB_CTRL_WRPROTECT);
break;
}
}
/* write */
if (ms_lib_alloc_writebuf(us)) {
result = MS_NO_MEMORY_ERROR;
goto exit;
}
result = MS_STATUS_SUCCESS;
exit:
kfree(PageBuffer1);
kfree(PageBuffer0);
printk(KERN_INFO "MS_CardInit end\n");
return result;
}
static int ene_ms_init(struct us_data *us)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
u8 buf[0x200];
u16 MSP_BlockSize, MSP_UserAreaBlocks;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
printk(KERN_INFO "transport --- ENE_MSInit\n");
/* the same part to test ENE */
result = ene_load_bincode(us, MS_INIT_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
printk(KERN_ERR "Load MS Init Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x01;
result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0);
if (result != USB_STOR_XFER_GOOD) {
printk(KERN_ERR "Execution MS Init Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
/* the same part to test ENE */
info->MS_Status = *(struct MS_STATUS *)&buf[0];
if (info->MS_Status.Insert && info->MS_Status.Ready) {
printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert);
printk(KERN_INFO "Ready = %x\n", info->MS_Status.Ready);
printk(KERN_INFO "IsMSPro = %x\n", info->MS_Status.IsMSPro);
printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG);
printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP);
if (info->MS_Status.IsMSPro) {
MSP_BlockSize = (buf[6] << 8) | buf[7];
MSP_UserAreaBlocks = (buf[10] << 8) | buf[11];
info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks;
} else {
ms_card_init(us); /* Card is MS (to ms.c)*/
}
US_DEBUGP("MS Init Code OK !!\n");
} else {
US_DEBUGP("MS Card Not Ready --- %x\n", buf[0]);
return USB_STOR_TRANSPORT_ERROR;
}
return USB_STOR_TRANSPORT_GOOD;
}
static int ene_sd_init(struct us_data *us)
{
int result;
u8 buf[0x200];
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
US_DEBUGP("transport --- ENE_SDInit\n");
/* SD Init Part-1 */
result = ene_load_bincode(us, SD_INIT1_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
US_DEBUGP("Load SD Init Code Part-1 Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF2;
result = ene_send_scsi_cmd(us, FDIR_READ, NULL, 0);
if (result != USB_STOR_XFER_GOOD) {
US_DEBUGP("Execution SD Init Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
/* SD Init Part-2 */
result = ene_load_bincode(us, SD_INIT2_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
US_DEBUGP("Load SD Init Code Part-2 Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x200;
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0);
if (result != USB_STOR_XFER_GOOD) {
US_DEBUGP("Execution SD Init Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
info->SD_Status = *(struct SD_STATUS *)&buf[0];
if (info->SD_Status.Insert && info->SD_Status.Ready) {
ene_get_card_status(us, (unsigned char *)&buf);
US_DEBUGP("Insert = %x\n", info->SD_Status.Insert);
US_DEBUGP("Ready = %x\n", info->SD_Status.Ready);
US_DEBUGP("IsMMC = %x\n", info->SD_Status.IsMMC);
US_DEBUGP("HiCapacity = %x\n", info->SD_Status.HiCapacity);
US_DEBUGP("HiSpeed = %x\n", info->SD_Status.HiSpeed);
US_DEBUGP("WtP = %x\n", info->SD_Status.WtP);
} else {
US_DEBUGP("SD Card Not Ready --- %x\n", buf[0]);
return USB_STOR_TRANSPORT_ERROR;
}
return USB_STOR_TRANSPORT_GOOD;
}
static int ene_init(struct us_data *us)
{
int result;
u8 misc_reg03 = 0;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (misc_reg03 & 0x01) {
if (!info->SD_Status.Ready) {
result = ene_sd_init(us);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
}
}
if (misc_reg03 & 0x02) {
if (!info->MS_Status.Ready) {
result = ene_ms_init(us);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
}
}
return result;
}
/*----- sd_scsi_irp() ---------*/
static int sd_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)us->extra;
info->SrbStatus = SS_SUCCESS;
switch (srb->cmnd[0]) {
case TEST_UNIT_READY:
result = sd_scsi_test_unit_ready(us, srb);
break; /* 0x00 */
case INQUIRY:
result = sd_scsi_inquiry(us, srb);
break; /* 0x12 */
case MODE_SENSE:
result = sd_scsi_mode_sense(us, srb);
break; /* 0x1A */
/*
case START_STOP:
result = SD_SCSI_Start_Stop(us, srb);
break; //0x1B
*/
case READ_CAPACITY:
result = sd_scsi_read_capacity(us, srb);
break; /* 0x25 */
case READ_10:
result = sd_scsi_read(us, srb);
break; /* 0x28 */
case WRITE_10:
result = sd_scsi_write(us, srb);
break; /* 0x2A */
default:
info->SrbStatus = SS_ILLEGAL_REQUEST;
result = USB_STOR_TRANSPORT_FAILED;
break;
}
return result;
}
/*
* ms_scsi_irp()
*/
static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)us->extra;
info->SrbStatus = SS_SUCCESS;
switch (srb->cmnd[0]) {
case TEST_UNIT_READY:
result = ms_scsi_test_unit_ready(us, srb);
break; /* 0x00 */
case INQUIRY:
result = ms_scsi_inquiry(us, srb);
break; /* 0x12 */
case MODE_SENSE:
result = ms_scsi_mode_sense(us, srb);
break; /* 0x1A */
case READ_CAPACITY:
result = ms_scsi_read_capacity(us, srb);
break; /* 0x25 */
case READ_10:
result = ms_scsi_read(us, srb);
break; /* 0x28 */
case WRITE_10:
result = ms_scsi_write(us, srb);
break; /* 0x2A */
default:
info->SrbStatus = SS_ILLEGAL_REQUEST;
result = USB_STOR_TRANSPORT_FAILED;
break;
}
return result;
}
static int ene_transport(struct scsi_cmnd *srb, struct us_data *us)
{
int result = 0;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
/*US_DEBUG(usb_stor_show_command(srb)); */
scsi_set_resid(srb, 0);
if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) {
result = ene_init(us);
} else {
if (info->SD_Status.Ready)
result = sd_scsi_irp(us, srb);
if (info->MS_Status.Ready)
result = ms_scsi_irp(us, srb);
}
return 0;
}
static int ene_ub6250_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int result;
u8 misc_reg03 = 0;
struct us_data *us;
result = usb_stor_probe1(&us, intf, id,
(id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list);
if (result)
return result;
/* FIXME: where should the code alloc extra buf ? */
if (!us->extra) {
us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL);
if (!us->extra)
return -ENOMEM;
us->extra_destructor = ene_ub6250_info_destructor;
}
us->transport_name = "ene_ub6250";
us->transport = ene_transport;
us->max_lun = 0;
result = usb_stor_probe2(us);
if (result)
return result;
/* probe card type */
result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_disconnect(intf);
return USB_STOR_TRANSPORT_ERROR;
}
if (!(misc_reg03 & 0x01)) {
pr_info("ums_eneub6250: The driver only supports SD/MS card. "
"To use SM card, please build driver/staging/keucr\n");
}
return result;
}
#ifdef CONFIG_PM
static int ene_ub6250_resume(struct usb_interface *iface)
{
u8 tmp = 0;
struct us_data *us = usb_get_intfdata(iface);
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
mutex_lock(&us->dev_mutex);
US_DEBUGP("%s\n", __func__);
if (us->suspend_resume_hook)
(us->suspend_resume_hook)(us, US_RESUME);
mutex_unlock(&us->dev_mutex);
info->Power_IsResum = true;
/*info->SD_Status.Ready = 0; */
info->SD_Status = *(struct SD_STATUS *)&tmp;
info->MS_Status = *(struct MS_STATUS *)&tmp;
info->SM_Status = *(struct SM_STATUS *)&tmp;
return 0;
}
static int ene_ub6250_reset_resume(struct usb_interface *iface)
{
u8 tmp = 0;
struct us_data *us = usb_get_intfdata(iface);
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
US_DEBUGP("%s\n", __func__);
/* Report the reset to the SCSI core */
usb_stor_reset_resume(iface);
/* FIXME: Notify the subdrivers that they need to reinitialize
* the device */
info->Power_IsResum = true;
/*info->SD_Status.Ready = 0; */
info->SD_Status = *(struct SD_STATUS *)&tmp;
info->MS_Status = *(struct MS_STATUS *)&tmp;
info->SM_Status = *(struct SM_STATUS *)&tmp;
return 0;
}
#else
#define ene_ub6250_resume NULL
#define ene_ub6250_reset_resume NULL
#endif
static struct usb_driver ene_ub6250_driver = {
.name = "ums_eneub6250",
.probe = ene_ub6250_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = ene_ub6250_resume,
.reset_resume = ene_ub6250_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = ene_ub6250_usb_ids,
.soft_unbind = 1,
.no_dynamic_id = 1,
};
module_usb_driver(ene_ub6250_driver);
| gpl-2.0 |
aatjitra/OnePlus | drivers/watchdog/cpu5wdt.c | 4980 | 6857 | /*
* sma cpu5 watchdog driver
*
* Copyright (C) 2003 Heiko Ronsdorf <hero@ihg.uni-duisburg.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/timer.h>
#include <linux/completion.h>
#include <linux/jiffies.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/watchdog.h>
/* adjustable parameters */
static int verbose;
static int port = 0x91;
static int ticks = 10000;
static DEFINE_SPINLOCK(cpu5wdt_lock);
#define PFX "cpu5wdt: "
#define CPU5WDT_EXTENT 0x0A
#define CPU5WDT_STATUS_REG 0x00
#define CPU5WDT_TIME_A_REG 0x02
#define CPU5WDT_TIME_B_REG 0x03
#define CPU5WDT_MODE_REG 0x04
#define CPU5WDT_TRIGGER_REG 0x07
#define CPU5WDT_ENABLE_REG 0x08
#define CPU5WDT_RESET_REG 0x09
#define CPU5WDT_INTERVAL (HZ/10+1)
/* some device data */
static struct {
struct completion stop;
int running;
struct timer_list timer;
int queue;
int default_ticks;
unsigned long inuse;
} cpu5wdt_device;
/* generic helper functions */
static void cpu5wdt_trigger(unsigned long unused)
{
if (verbose > 2)
pr_debug("trigger at %i ticks\n", ticks);
if (cpu5wdt_device.running)
ticks--;
spin_lock(&cpu5wdt_lock);
/* keep watchdog alive */
outb(1, port + CPU5WDT_TRIGGER_REG);
/* requeue?? */
if (cpu5wdt_device.queue && ticks)
mod_timer(&cpu5wdt_device.timer, jiffies + CPU5WDT_INTERVAL);
else {
/* ticks doesn't matter anyway */
complete(&cpu5wdt_device.stop);
}
spin_unlock(&cpu5wdt_lock);
}
static void cpu5wdt_reset(void)
{
ticks = cpu5wdt_device.default_ticks;
if (verbose)
pr_debug("reset (%i ticks)\n", (int) ticks);
}
static void cpu5wdt_start(void)
{
unsigned long flags;
spin_lock_irqsave(&cpu5wdt_lock, flags);
if (!cpu5wdt_device.queue) {
cpu5wdt_device.queue = 1;
outb(0, port + CPU5WDT_TIME_A_REG);
outb(0, port + CPU5WDT_TIME_B_REG);
outb(1, port + CPU5WDT_MODE_REG);
outb(0, port + CPU5WDT_RESET_REG);
outb(0, port + CPU5WDT_ENABLE_REG);
mod_timer(&cpu5wdt_device.timer, jiffies + CPU5WDT_INTERVAL);
}
/* if process dies, counter is not decremented */
cpu5wdt_device.running++;
spin_unlock_irqrestore(&cpu5wdt_lock, flags);
}
static int cpu5wdt_stop(void)
{
unsigned long flags;
spin_lock_irqsave(&cpu5wdt_lock, flags);
if (cpu5wdt_device.running)
cpu5wdt_device.running = 0;
ticks = cpu5wdt_device.default_ticks;
spin_unlock_irqrestore(&cpu5wdt_lock, flags);
if (verbose)
pr_crit("stop not possible\n");
return -EIO;
}
/* filesystem operations */
static int cpu5wdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, &cpu5wdt_device.inuse))
return -EBUSY;
return nonseekable_open(inode, file);
}
static int cpu5wdt_release(struct inode *inode, struct file *file)
{
clear_bit(0, &cpu5wdt_device.inuse);
return 0;
}
static long cpu5wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
unsigned int value;
static const struct watchdog_info ident = {
.options = WDIOF_CARDRESET,
.identity = "CPU5 WDT",
};
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
break;
case WDIOC_GETSTATUS:
value = inb(port + CPU5WDT_STATUS_REG);
value = (value >> 2) & 1;
return put_user(value, p);
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_SETOPTIONS:
if (get_user(value, p))
return -EFAULT;
if (value & WDIOS_ENABLECARD)
cpu5wdt_start();
if (value & WDIOS_DISABLECARD)
cpu5wdt_stop();
break;
case WDIOC_KEEPALIVE:
cpu5wdt_reset();
break;
default:
return -ENOTTY;
}
return 0;
}
static ssize_t cpu5wdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
if (!count)
return -EIO;
cpu5wdt_reset();
return count;
}
static const struct file_operations cpu5wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = cpu5wdt_ioctl,
.open = cpu5wdt_open,
.write = cpu5wdt_write,
.release = cpu5wdt_release,
};
static struct miscdevice cpu5wdt_misc = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &cpu5wdt_fops,
};
/* init/exit function */
static int __devinit cpu5wdt_init(void)
{
unsigned int val;
int err;
if (verbose)
pr_debug("port=0x%x, verbose=%i\n", port, verbose);
init_completion(&cpu5wdt_device.stop);
cpu5wdt_device.queue = 0;
setup_timer(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
cpu5wdt_device.default_ticks = ticks;
if (!request_region(port, CPU5WDT_EXTENT, PFX)) {
pr_err("request_region failed\n");
err = -EBUSY;
goto no_port;
}
/* watchdog reboot? */
val = inb(port + CPU5WDT_STATUS_REG);
val = (val >> 2) & 1;
if (!val)
pr_info("sorry, was my fault\n");
err = misc_register(&cpu5wdt_misc);
if (err < 0) {
pr_err("misc_register failed\n");
goto no_misc;
}
pr_info("init success\n");
return 0;
no_misc:
release_region(port, CPU5WDT_EXTENT);
no_port:
return err;
}
static int __devinit cpu5wdt_init_module(void)
{
return cpu5wdt_init();
}
static void __devexit cpu5wdt_exit(void)
{
if (cpu5wdt_device.queue) {
cpu5wdt_device.queue = 0;
wait_for_completion(&cpu5wdt_device.stop);
}
misc_deregister(&cpu5wdt_misc);
release_region(port, CPU5WDT_EXTENT);
}
static void __devexit cpu5wdt_exit_module(void)
{
cpu5wdt_exit();
}
/* module entry points */
module_init(cpu5wdt_init_module);
module_exit(cpu5wdt_exit_module);
MODULE_AUTHOR("Heiko Ronsdorf <hero@ihg.uni-duisburg.de>");
MODULE_DESCRIPTION("sma cpu5 watchdog driver");
MODULE_SUPPORTED_DEVICE("sma cpu5 watchdog");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(port, int, 0);
MODULE_PARM_DESC(port, "base address of watchdog card, default is 0x91");
module_param(verbose, int, 0);
MODULE_PARM_DESC(verbose, "be verbose, default is 0 (no)");
module_param(ticks, int, 0);
MODULE_PARM_DESC(ticks, "count down ticks, default is 10000");
| gpl-2.0 |
bigbiff/android_device_samsung_mondrianwfiue | arch/arm/mach-msm/board-mahimahi-flashlight.c | 4980 | 6403 | /*
* arch/arm/mach-msm/flashlight.c - flashlight driver
*
* Copyright (C) 2009 zion huang <zion_huang@htc.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*/
#define DEBUG
#include <linux/delay.h>
#include <linux/earlysuspend.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/wakelock.h>
#include <linux/hrtimer.h>
#include <mach/msm_iomap.h>
#include <asm/gpio.h>
#include <asm/io.h>
#include "board-mahimahi-flashlight.h"
struct flashlight_struct {
struct led_classdev fl_lcdev;
struct early_suspend early_suspend_flashlight;
spinlock_t spin_lock;
struct hrtimer timer;
int brightness;
int gpio_torch;
int gpio_flash;
int flash_duration_ms;
};
static struct flashlight_struct the_fl;
static inline void toggle(void)
{
gpio_direction_output(the_fl.gpio_torch, 0);
udelay(2);
gpio_direction_output(the_fl.gpio_torch, 1);
udelay(2);
}
static void flashlight_hw_command(uint8_t addr, uint8_t data)
{
int i;
for (i = 0; i < addr + 17; i++)
toggle();
udelay(500);
for (i = 0; i < data; i++)
toggle();
udelay(500);
}
static enum hrtimer_restart flashlight_timeout(struct hrtimer *timer)
{
unsigned long flags;
pr_debug("%s\n", __func__);
spin_lock_irqsave(&the_fl.spin_lock, flags);
gpio_direction_output(the_fl.gpio_flash, 0);
the_fl.brightness = LED_OFF;
spin_unlock_irqrestore(&the_fl.spin_lock, flags);
return HRTIMER_NORESTART;
}
int flashlight_control(int mode)
{
int ret = 0;
unsigned long flags;
pr_debug("%s: mode %d -> %d\n", __func__,
the_fl.brightness, mode);
spin_lock_irqsave(&the_fl.spin_lock, flags);
the_fl.brightness = mode;
switch (mode) {
case FLASHLIGHT_TORCH:
pr_info("%s: half\n", __func__);
/* If we are transitioning from flash to torch, make sure to
* cancel the flash timeout timer, otherwise when it expires,
* the torch will go off as well.
*/
hrtimer_cancel(&the_fl.timer);
flashlight_hw_command(2, 4);
break;
case FLASHLIGHT_FLASH:
pr_info("%s: full\n", __func__);
hrtimer_cancel(&the_fl.timer);
gpio_direction_output(the_fl.gpio_flash, 0);
udelay(40);
gpio_direction_output(the_fl.gpio_flash, 1);
hrtimer_start(&the_fl.timer,
ktime_set(the_fl.flash_duration_ms / 1000,
(the_fl.flash_duration_ms % 1000) *
NSEC_PER_MSEC),
HRTIMER_MODE_REL);
/* Flash overrides torch mode, and after the flash period, the
* flash LED will turn off.
*/
mode = LED_OFF;
break;
case FLASHLIGHT_OFF:
pr_info("%s: off\n", __func__);
gpio_direction_output(the_fl.gpio_flash, 0);
gpio_direction_output(the_fl.gpio_torch, 0);
break;
default:
pr_err("%s: unknown flash_light flags: %d\n", __func__, mode);
ret = -EINVAL;
goto done;
}
done:
spin_unlock_irqrestore(&the_fl.spin_lock, flags);
return ret;
}
EXPORT_SYMBOL(flashlight_control);
static void fl_lcdev_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
int level;
switch (brightness) {
case LED_HALF:
level = FLASHLIGHT_TORCH;
break;
case LED_FULL:
level = FLASHLIGHT_FLASH;
break;
case LED_OFF:
default:
level = FLASHLIGHT_OFF;
};
flashlight_control(level);
}
static void flashlight_early_suspend(struct early_suspend *handler)
{
flashlight_control(FLASHLIGHT_OFF);
}
static int flashlight_setup_gpio(struct flashlight_platform_data *fl_pdata)
{
int ret;
pr_debug("%s\n", __func__);
if (fl_pdata->gpio_init) {
ret = fl_pdata->gpio_init();
if (ret < 0) {
pr_err("%s: gpio init failed: %d\n", __func__,
ret);
return ret;
}
}
if (fl_pdata->torch) {
ret = gpio_request(fl_pdata->torch, "flashlight_torch");
if (ret < 0) {
pr_err("%s: gpio_request failed\n", __func__);
return ret;
}
}
if (fl_pdata->flash) {
ret = gpio_request(fl_pdata->flash, "flashlight_flash");
if (ret < 0) {
pr_err("%s: gpio_request failed\n", __func__);
gpio_free(fl_pdata->torch);
return ret;
}
}
the_fl.gpio_torch = fl_pdata->torch;
the_fl.gpio_flash = fl_pdata->flash;
the_fl.flash_duration_ms = fl_pdata->flash_duration_ms;
return 0;
}
static int flashlight_probe(struct platform_device *pdev)
{
struct flashlight_platform_data *fl_pdata = pdev->dev.platform_data;
int err = 0;
pr_debug("%s\n", __func__);
err = flashlight_setup_gpio(fl_pdata);
if (err < 0) {
pr_err("%s: setup GPIO failed\n", __func__);
goto fail_free_mem;
}
spin_lock_init(&the_fl.spin_lock);
the_fl.fl_lcdev.name = pdev->name;
the_fl.fl_lcdev.brightness_set = fl_lcdev_brightness_set;
the_fl.fl_lcdev.brightness = LED_OFF;
err = led_classdev_register(&pdev->dev, &the_fl.fl_lcdev);
if (err < 0) {
pr_err("failed on led_classdev_register\n");
goto fail_free_gpio;
}
hrtimer_init(&the_fl.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
the_fl.timer.function = flashlight_timeout;
#ifdef CONFIG_HAS_EARLYSUSPEND
the_fl.early_suspend_flashlight.suspend = flashlight_early_suspend;
the_fl.early_suspend_flashlight.resume = NULL;
register_early_suspend(&the_fl.early_suspend_flashlight);
#endif
return 0;
fail_free_gpio:
if (fl_pdata->torch)
gpio_free(fl_pdata->torch);
if (fl_pdata->flash)
gpio_free(fl_pdata->flash);
fail_free_mem:
return err;
}
static int flashlight_remove(struct platform_device *pdev)
{
struct flashlight_platform_data *fl_pdata = pdev->dev.platform_data;
pr_debug("%s\n", __func__);
hrtimer_cancel(&the_fl.timer);
unregister_early_suspend(&the_fl.early_suspend_flashlight);
flashlight_control(FLASHLIGHT_OFF);
led_classdev_unregister(&the_fl.fl_lcdev);
if (fl_pdata->torch)
gpio_free(fl_pdata->torch);
if (fl_pdata->flash)
gpio_free(fl_pdata->flash);
return 0;
}
static struct platform_driver flashlight_driver = {
.probe = flashlight_probe,
.remove = flashlight_remove,
.driver = {
.name = FLASHLIGHT_NAME,
.owner = THIS_MODULE,
},
};
static int __init flashlight_init(void)
{
pr_debug("%s\n", __func__);
return platform_driver_register(&flashlight_driver);
}
static void __exit flashlight_exit(void)
{
pr_debug("%s\n", __func__);
platform_driver_unregister(&flashlight_driver);
}
module_init(flashlight_init);
module_exit(flashlight_exit);
MODULE_AUTHOR("Zion Huang <zion_huang@htc.com>");
MODULE_DESCRIPTION("flash light driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
uberlaggydarwin/htc-desire-eye-kernel | net/ipv6/tunnel6.c | 8052 | 4757 | /*
* Copyright (C)2003,2004 USAGI/WIDE Project
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Authors Mitsuru KANDA <mk@linux-ipv6.org>
* YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
*/
#include <linux/icmpv6.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/ipv6.h>
#include <net/protocol.h>
#include <net/xfrm.h>
static struct xfrm6_tunnel __rcu *tunnel6_handlers __read_mostly;
static struct xfrm6_tunnel __rcu *tunnel46_handlers __read_mostly;
static DEFINE_MUTEX(tunnel6_mutex);
int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
{
struct xfrm6_tunnel __rcu **pprev;
struct xfrm6_tunnel *t;
int ret = -EEXIST;
int priority = handler->priority;
mutex_lock(&tunnel6_mutex);
for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
(t = rcu_dereference_protected(*pprev,
lockdep_is_held(&tunnel6_mutex))) != NULL;
pprev = &t->next) {
if (t->priority > priority)
break;
if (t->priority == priority)
goto err;
}
handler->next = *pprev;
rcu_assign_pointer(*pprev, handler);
ret = 0;
err:
mutex_unlock(&tunnel6_mutex);
return ret;
}
EXPORT_SYMBOL(xfrm6_tunnel_register);
int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
{
struct xfrm6_tunnel __rcu **pprev;
struct xfrm6_tunnel *t;
int ret = -ENOENT;
mutex_lock(&tunnel6_mutex);
for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
(t = rcu_dereference_protected(*pprev,
lockdep_is_held(&tunnel6_mutex))) != NULL;
pprev = &t->next) {
if (t == handler) {
*pprev = handler->next;
ret = 0;
break;
}
}
mutex_unlock(&tunnel6_mutex);
synchronize_net();
return ret;
}
EXPORT_SYMBOL(xfrm6_tunnel_deregister);
#define for_each_tunnel_rcu(head, handler) \
for (handler = rcu_dereference(head); \
handler != NULL; \
handler = rcu_dereference(handler->next)) \
static int tunnel6_rcv(struct sk_buff *skb)
{
struct xfrm6_tunnel *handler;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto drop;
for_each_tunnel_rcu(tunnel6_handlers, handler)
if (!handler->handler(skb))
return 0;
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
drop:
kfree_skb(skb);
return 0;
}
static int tunnel46_rcv(struct sk_buff *skb)
{
struct xfrm6_tunnel *handler;
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto drop;
for_each_tunnel_rcu(tunnel46_handlers, handler)
if (!handler->handler(skb))
return 0;
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
drop:
kfree_skb(skb);
return 0;
}
static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct xfrm6_tunnel *handler;
for_each_tunnel_rcu(tunnel6_handlers, handler)
if (!handler->err_handler(skb, opt, type, code, offset, info))
break;
}
static const struct inet6_protocol tunnel6_protocol = {
.handler = tunnel6_rcv,
.err_handler = tunnel6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
static const struct inet6_protocol tunnel46_protocol = {
.handler = tunnel46_rcv,
.err_handler = tunnel6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
static int __init tunnel6_init(void)
{
if (inet6_add_protocol(&tunnel6_protocol, IPPROTO_IPV6)) {
printk(KERN_ERR "tunnel6 init(): can't add protocol\n");
return -EAGAIN;
}
if (inet6_add_protocol(&tunnel46_protocol, IPPROTO_IPIP)) {
printk(KERN_ERR "tunnel6 init(): can't add protocol\n");
inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6);
return -EAGAIN;
}
return 0;
}
static void __exit tunnel6_fini(void)
{
if (inet6_del_protocol(&tunnel46_protocol, IPPROTO_IPIP))
printk(KERN_ERR "tunnel6 close: can't remove protocol\n");
if (inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6))
printk(KERN_ERR "tunnel6 close: can't remove protocol\n");
}
module_init(tunnel6_init);
module_exit(tunnel6_fini);
MODULE_LICENSE("GPL");
| gpl-2.0 |
HomuHomu/android_kernel_samsung_js01lte | drivers/net/wireless/bcmdhd4354/bcmsdh.c | 117 | 18135 | /*
* BCMSDH interface glue
* implement bcmsdh API for SDIOH driver
*
* Copyright (C) 1999-2013, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: bcmsdh.c 432245 2013-10-26 22:45:40Z $
*/
/**
* @file bcmsdh.c
*/
/* ****************** BCMSDH Interface Functions *************************** */
#include <typedefs.h>
#include <bcmdevs.h>
#include <bcmendian.h>
#include <bcmutils.h>
#include <hndsoc.h>
#include <siutils.h>
#include <osl.h>
#include <bcmsdh.h> /* BRCM API for SDIO clients (such as wl, dhd) */
#include <bcmsdbus.h> /* common SDIO/controller interface */
#include <sbsdio.h> /* SDIO device core hardware definitions. */
#include <sdio.h> /* SDIO Device and Protocol Specs */
#define SDIOH_API_ACCESS_RETRY_LIMIT 2
const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL;
/* local copy of bcm sd handler */
bcmsdh_info_t * l_bcmsdh = NULL;
#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
extern int
sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
void
bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable)
{
sdioh_enable_hw_oob_intr(sdh->sdioh, enable);
}
#endif
/* Attach BCMSDH layer to SDIO Host Controller Driver
*
* @param osh OSL Handle.
* @param cfghdl Configuration Handle.
* @param regsva Virtual address of controller registers.
* @param irq Interrupt number of SDIO controller.
*
* @return bcmsdh_info_t Handle to BCMSDH context.
*/
bcmsdh_info_t *
bcmsdh_attach(osl_t *osh, void *sdioh, uint32 *regsva)
{
bcmsdh_info_t *bcmsdh;
if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) {
BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
return NULL;
}
bzero((char *)bcmsdh, sizeof(bcmsdh_info_t));
bcmsdh->sdioh = sdioh;
bcmsdh->osh = osh;
bcmsdh->init_success = TRUE;
*regsva = SI_ENUM_BASE;
/* Report the BAR, to fix if needed */
bcmsdh->sbwad = SI_ENUM_BASE;
/* save the handler locally */
l_bcmsdh = bcmsdh;
return bcmsdh;
}
int
bcmsdh_detach(osl_t *osh, void *sdh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
if (bcmsdh != NULL) {
MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t));
}
l_bcmsdh = NULL;
return 0;
}
int
bcmsdh_iovar_op(void *sdh, const char *name,
void *params, int plen, void *arg, int len, bool set)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set);
}
bool
bcmsdh_intr_query(void *sdh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
bool on;
ASSERT(bcmsdh);
status = sdioh_interrupt_query(bcmsdh->sdioh, &on);
if (SDIOH_API_SUCCESS(status))
return FALSE;
else
return on;
}
int
bcmsdh_intr_enable(void *sdh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
#ifdef BCMSPI_ANDROID
uint32 data;
#endif /* BCMSPI_ANDROID */
ASSERT(bcmsdh);
status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE);
#ifdef BCMSPI_ANDROID
data = bcmsdh_cfg_read_word(sdh, 0, 4, NULL);
data |= 0xE0E70000;
bcmsdh_cfg_write_word(sdh, 0, 4, data, NULL);
#endif /* BCMSPI_ANDROID */
return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
}
int
bcmsdh_intr_disable(void *sdh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
#ifdef BCMSPI_ANDROID
uint32 data;
#endif /* BCMSPI_ANDROID */
ASSERT(bcmsdh);
status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE);
#ifdef BCMSPI_ANDROID
data = bcmsdh_cfg_read_word(sdh, 0, 4, NULL);
data &= ~0xE0E70000;
bcmsdh_cfg_write_word(sdh, 0, 4, data, NULL);
#endif /* BCMSPI_ANDROID */
return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
}
int
bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
ASSERT(bcmsdh);
status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh);
return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
}
int
bcmsdh_intr_dereg(void *sdh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
ASSERT(bcmsdh);
status = sdioh_interrupt_deregister(bcmsdh->sdioh);
return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
}
#if defined(DHD_DEBUG)
bool
bcmsdh_intr_pending(void *sdh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
ASSERT(sdh);
return sdioh_interrupt_pending(bcmsdh->sdioh);
}
#endif
int
bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
{
ASSERT(sdh);
/* don't support yet */
return BCME_UNSUPPORTED;
}
/**
* Read from SDIO Configuration Space
* @param sdh SDIO Host context.
* @param func_num Function number to read from.
* @param addr Address to read from.
* @param err Error return.
* @return value read from SDIO configuration space.
*/
uint8
bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
int32 retry = 0;
#endif
uint8 data = 0;
if (!bcmsdh)
bcmsdh = l_bcmsdh;
ASSERT(bcmsdh->init_success);
#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
do {
if (retry) /* wait for 1 ms till bus get settled down */
OSL_DELAY(1000);
#endif
status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
#endif
if (err)
*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
fnc_num, addr, data));
return data;
}
void
bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
int32 retry = 0;
#endif
if (!bcmsdh)
bcmsdh = l_bcmsdh;
ASSERT(bcmsdh->init_success);
#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
do {
if (retry) /* wait for 1 ms till bus get settled down */
OSL_DELAY(1000);
#endif
status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
#endif
if (err)
*err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR;
BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
fnc_num, addr, data));
}
uint32
bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
uint32 data = 0;
if (!bcmsdh)
bcmsdh = l_bcmsdh;
ASSERT(bcmsdh->init_success);
status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num,
addr, &data, 4);
if (err)
*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__,
fnc_num, addr, data));
return data;
}
void
bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
if (!bcmsdh)
bcmsdh = l_bcmsdh;
ASSERT(bcmsdh->init_success);
status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num,
addr, &data, 4);
if (err)
*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num,
addr, data));
}
int
bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
uint8 *tmp_buf, *tmp_ptr;
uint8 *ptr;
bool ascii = func & ~0xf;
func &= 0x7;
if (!bcmsdh)
bcmsdh = l_bcmsdh;
ASSERT(bcmsdh->init_success);
ASSERT(cis);
ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT);
status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length);
if (ascii) {
/* Move binary bits to tmp and format them into the provided buffer. */
if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) {
BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__));
return BCME_NOMEM;
}
bcopy(cis, tmp_buf, length);
for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) {
ptr += snprintf((char*)ptr, (cis + length - ptr - 4),
"%.2x ", *tmp_ptr & 0xff);
if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0)
ptr += snprintf((char *)ptr, (cis + length - ptr -4), "\n");
}
MFREE(bcmsdh->osh, tmp_buf, length);
}
return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
}
int
bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set)
{
int err = 0;
uint bar0 = address & ~SBSDIO_SB_OFT_ADDR_MASK;
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
if (bar0 != bcmsdh->sbwad || force_set) {
bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
(address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
if (!err)
bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
(address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
if (!err)
bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
(address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
if (!err)
bcmsdh->sbwad = bar0;
else
/* invalidate cached window var */
bcmsdh->sbwad = 0;
}
return err;
}
uint32
bcmsdh_reg_read(void *sdh, uint32 addr, uint size)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
uint32 word = 0;
BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr));
if (!bcmsdh)
bcmsdh = l_bcmsdh;
ASSERT(bcmsdh->init_success);
if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))
return 0xFFFFFFFF;
addr &= SBSDIO_SB_OFT_ADDR_MASK;
if (size == 4)
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
SDIOH_READ, SDIO_FUNC_1, addr, &word, size);
bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
BCMSDH_INFO(("uint32data = 0x%x\n", word));
/* if ok, return appropriately masked word */
if (SDIOH_API_SUCCESS(status)) {
switch (size) {
case sizeof(uint8):
return (word & 0xff);
case sizeof(uint16):
return (word & 0xffff);
case sizeof(uint32):
return word;
default:
bcmsdh->regfail = TRUE;
}
}
/* otherwise, bad sdio access or invalid size */
BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size));
return 0xFFFFFFFF;
}
uint32
bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
int err = 0;
BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
__FUNCTION__, addr, size*8, data));
if (!bcmsdh)
bcmsdh = l_bcmsdh;
ASSERT(bcmsdh->init_success);
if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
return err;
addr &= SBSDIO_SB_OFT_ADDR_MASK;
if (size == 4)
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1,
addr, &data, size);
bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
if (SDIOH_API_SUCCESS(status))
return 0;
BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
__FUNCTION__, data, addr, size));
return 0xFFFFFFFF;
}
bool
bcmsdh_regfail(void *sdh)
{
return ((bcmsdh_info_t *)sdh)->regfail;
}
int
bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
uint8 *buf, uint nbytes, void *pkt,
bcmsdh_cmplt_fn_t complete_fn, void *handle)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
uint incr_fix;
uint width;
int err = 0;
ASSERT(bcmsdh);
ASSERT(bcmsdh->init_success);
BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
__FUNCTION__, fn, addr, nbytes));
/* Async not implemented yet */
ASSERT(!(flags & SDIO_REQ_ASYNC));
if (flags & SDIO_REQ_ASYNC)
return BCME_UNSUPPORTED;
if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
return err;
addr &= SBSDIO_SB_OFT_ADDR_MASK;
incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
if (width == 4)
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
SDIOH_READ, fn, addr, width, nbytes, buf, pkt);
return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
}
int
bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
uint8 *buf, uint nbytes, void *pkt,
bcmsdh_cmplt_fn_t complete_fn, void *handle)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
uint incr_fix;
uint width;
int err = 0;
ASSERT(bcmsdh);
ASSERT(bcmsdh->init_success);
BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
__FUNCTION__, fn, addr, nbytes));
/* Async not implemented yet */
ASSERT(!(flags & SDIO_REQ_ASYNC));
if (flags & SDIO_REQ_ASYNC)
return BCME_UNSUPPORTED;
if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
return err;
addr &= SBSDIO_SB_OFT_ADDR_MASK;
incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
if (width == 4)
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
}
int
bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
ASSERT(bcmsdh);
ASSERT(bcmsdh->init_success);
ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0);
addr &= SBSDIO_SB_OFT_ADDR_MASK;
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC,
(rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
addr, 4, nbytes, buf, NULL);
return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
}
int
bcmsdh_abort(void *sdh, uint fn)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
return sdioh_abort(bcmsdh->sdioh, fn);
}
int
bcmsdh_start(void *sdh, int stage)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
return sdioh_start(bcmsdh->sdioh, stage);
}
int
bcmsdh_stop(void *sdh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
return sdioh_stop(bcmsdh->sdioh);
}
int
bcmsdh_waitlockfree(void *sdh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
return sdioh_waitlockfree(bcmsdh->sdioh);
}
#ifdef BCMSPI /* 4329 gSPI won't have CIS reads. */
int
bcmsdh_query_device(void *sdh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | BCM4321_D11N2G_ID;
return (bcmsdh->vendevid);
}
#else
int
bcmsdh_query_device(void *sdh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0;
return (bcmsdh->vendevid);
}
#endif /* else BCMSPI */
uint
bcmsdh_query_iofnum(void *sdh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
if (!bcmsdh)
bcmsdh = l_bcmsdh;
return (sdioh_query_iofnum(bcmsdh->sdioh));
}
int
bcmsdh_reset(bcmsdh_info_t *sdh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
return sdioh_sdio_reset(bcmsdh->sdioh);
}
void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh)
{
ASSERT(sdh);
return sdh->sdioh;
}
/* Function to pass device-status bits to DHD. */
uint32
bcmsdh_get_dstatus(void *sdh)
{
#ifdef BCMSPI
bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
return sdioh_get_dstatus(sd);
#else
return 0;
#endif /* BCMSPI */
}
uint32
bcmsdh_cur_sbwad(void *sdh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
if (!bcmsdh)
bcmsdh = l_bcmsdh;
return (bcmsdh->sbwad);
}
void
bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev)
{
#ifdef BCMSPI
bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
sdioh_chipinfo(sd, chip, chiprev);
#else
return;
#endif /* BCMSPI */
}
#ifdef BCMSPI
void
bcmsdh_dwordmode(void *sdh, bool set)
{
bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
sdioh_dwordmode(sd, set);
return;
}
#endif /* BCMSPI */
int
bcmsdh_sleep(void *sdh, bool enab)
{
#ifdef SDIOH_SLEEP_ENABLED
bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
return sdioh_sleep(sd, enab);
#else
return BCME_UNSUPPORTED;
#endif
}
int
bcmsdh_gpio_init(void *sdh)
{
bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
return sdioh_gpio_init(sd);
}
bool
bcmsdh_gpioin(void *sdh, uint32 gpio)
{
bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
return sdioh_gpioin(sd, gpio);
}
int
bcmsdh_gpioouten(void *sdh, uint32 gpio)
{
bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
return sdioh_gpioouten(sd, gpio);
}
int
bcmsdh_gpioout(void *sdh, uint32 gpio, bool enab)
{
bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
return sdioh_gpioout(sd, gpio, enab);
}
| gpl-2.0 |
Buckmarble/elite_kernel_grouper | drivers/char/hpet.c | 629 | 25039 | /*
* Intel & MS High Precision Event Timer Implementation.
*
* Copyright (C) 2003 Intel Corporation
* Venki Pallipadi
* (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
* Bob Picco <robert.picco@hp.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/major.h>
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/sysctl.h>
#include <linux/wait.h>
#include <linux/bcd.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <linux/compat.h>
#include <linux/clocksource.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <asm/current.h>
#include <asm/system.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <linux/hpet.h>
/*
* The High Precision Event Timer driver.
* This driver is closely modelled after the rtc.c driver.
* http://www.intel.com/hardwaredesign/hpetspec_1.pdf
*/
#define HPET_USER_FREQ (64)
#define HPET_DRIFT (500)
#define HPET_RANGE_SIZE 1024 /* from HPET spec */
/* WARNING -- don't get confused. These macros are never used
* to write the (single) counter, and rarely to read it.
* They're badly named; to fix, someday.
*/
#if BITS_PER_LONG == 64
#define write_counter(V, MC) writeq(V, MC)
#define read_counter(MC) readq(MC)
#else
#define write_counter(V, MC) writel(V, MC)
#define read_counter(MC) readl(MC)
#endif
static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */
static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
/* This clocksource driver currently only works on ia64 */
#ifdef CONFIG_IA64
static void __iomem *hpet_mctr;
static cycle_t read_hpet(struct clocksource *cs)
{
return (cycle_t)read_counter((void __iomem *)hpet_mctr);
}
static struct clocksource clocksource_hpet = {
.name = "hpet",
.rating = 250,
.read = read_hpet,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct clocksource *hpet_clocksource;
#endif
/* A lock for concurrent access by app and isr hpet activity. */
static DEFINE_SPINLOCK(hpet_lock);
#define HPET_DEV_NAME (7)
struct hpet_dev {
struct hpets *hd_hpets;
struct hpet __iomem *hd_hpet;
struct hpet_timer __iomem *hd_timer;
unsigned long hd_ireqfreq;
unsigned long hd_irqdata;
wait_queue_head_t hd_waitqueue;
struct fasync_struct *hd_async_queue;
unsigned int hd_flags;
unsigned int hd_irq;
unsigned int hd_hdwirq;
char hd_name[HPET_DEV_NAME];
};
struct hpets {
struct hpets *hp_next;
struct hpet __iomem *hp_hpet;
unsigned long hp_hpet_phys;
struct clocksource *hp_clocksource;
unsigned long long hp_tick_freq;
unsigned long hp_delta;
unsigned int hp_ntimer;
unsigned int hp_which;
struct hpet_dev hp_dev[1];
};
static struct hpets *hpets;
#define HPET_OPEN 0x0001
#define HPET_IE 0x0002 /* interrupt enabled */
#define HPET_PERIODIC 0x0004
#define HPET_SHARED_IRQ 0x0008
#ifndef readq
static inline unsigned long long readq(void __iomem *addr)
{
return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
}
#endif
#ifndef writeq
static inline void writeq(unsigned long long v, void __iomem *addr)
{
writel(v & 0xffffffff, addr);
writel(v >> 32, addr + 4);
}
#endif
static irqreturn_t hpet_interrupt(int irq, void *data)
{
struct hpet_dev *devp;
unsigned long isr;
devp = data;
isr = 1 << (devp - devp->hd_hpets->hp_dev);
if ((devp->hd_flags & HPET_SHARED_IRQ) &&
!(isr & readl(&devp->hd_hpet->hpet_isr)))
return IRQ_NONE;
spin_lock(&hpet_lock);
devp->hd_irqdata++;
/*
* For non-periodic timers, increment the accumulator.
* This has the effect of treating non-periodic like periodic.
*/
if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
unsigned long m, t, mc, base, k;
struct hpet __iomem *hpet = devp->hd_hpet;
struct hpets *hpetp = devp->hd_hpets;
t = devp->hd_ireqfreq;
m = read_counter(&devp->hd_timer->hpet_compare);
mc = read_counter(&hpet->hpet_mc);
/* The time for the next interrupt would logically be t + m,
* however, if we are very unlucky and the interrupt is delayed
* for longer than t then we will completely miss the next
* interrupt if we set t + m and an application will hang.
* Therefore we need to make a more complex computation assuming
* that there exists a k for which the following is true:
* k * t + base < mc + delta
* (k + 1) * t + base > mc + delta
* where t is the interval in hpet ticks for the given freq,
* base is the theoretical start value 0 < base < t,
* mc is the main counter value at the time of the interrupt,
* delta is the time it takes to write the a value to the
* comparator.
* k may then be computed as (mc - base + delta) / t .
*/
base = mc % t;
k = (mc - base + hpetp->hp_delta) / t;
write_counter(t * (k + 1) + base,
&devp->hd_timer->hpet_compare);
}
if (devp->hd_flags & HPET_SHARED_IRQ)
writel(isr, &devp->hd_hpet->hpet_isr);
spin_unlock(&hpet_lock);
wake_up_interruptible(&devp->hd_waitqueue);
kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN);
return IRQ_HANDLED;
}
static void hpet_timer_set_irq(struct hpet_dev *devp)
{
unsigned long v;
int irq, gsi;
struct hpet_timer __iomem *timer;
spin_lock_irq(&hpet_lock);
if (devp->hd_hdwirq) {
spin_unlock_irq(&hpet_lock);
return;
}
timer = devp->hd_timer;
/* we prefer level triggered mode */
v = readl(&timer->hpet_config);
if (!(v & Tn_INT_TYPE_CNF_MASK)) {
v |= Tn_INT_TYPE_CNF_MASK;
writel(v, &timer->hpet_config);
}
spin_unlock_irq(&hpet_lock);
v = (readq(&timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >>
Tn_INT_ROUTE_CAP_SHIFT;
/*
* In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by
* legacy device. In IO APIC mode, we skip all the legacy IRQS.
*/
if (acpi_irq_model == ACPI_IRQ_MODEL_PIC)
v &= ~0xf3df;
else
v &= ~0xffff;
for_each_set_bit(irq, &v, HPET_MAX_IRQ) {
if (irq >= nr_irqs) {
irq = HPET_MAX_IRQ;
break;
}
gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
if (gsi > 0)
break;
/* FIXME: Setup interrupt source table */
}
if (irq < HPET_MAX_IRQ) {
spin_lock_irq(&hpet_lock);
v = readl(&timer->hpet_config);
v |= irq << Tn_INT_ROUTE_CNF_SHIFT;
writel(v, &timer->hpet_config);
devp->hd_hdwirq = gsi;
spin_unlock_irq(&hpet_lock);
}
return;
}
static int hpet_open(struct inode *inode, struct file *file)
{
struct hpet_dev *devp;
struct hpets *hpetp;
int i;
if (file->f_mode & FMODE_WRITE)
return -EINVAL;
mutex_lock(&hpet_mutex);
spin_lock_irq(&hpet_lock);
for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
for (i = 0; i < hpetp->hp_ntimer; i++)
if (hpetp->hp_dev[i].hd_flags & HPET_OPEN)
continue;
else {
devp = &hpetp->hp_dev[i];
break;
}
if (!devp) {
spin_unlock_irq(&hpet_lock);
mutex_unlock(&hpet_mutex);
return -EBUSY;
}
file->private_data = devp;
devp->hd_irqdata = 0;
devp->hd_flags |= HPET_OPEN;
spin_unlock_irq(&hpet_lock);
mutex_unlock(&hpet_mutex);
hpet_timer_set_irq(devp);
return 0;
}
static ssize_t
hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long data;
ssize_t retval;
struct hpet_dev *devp;
devp = file->private_data;
if (!devp->hd_ireqfreq)
return -EIO;
if (count < sizeof(unsigned long))
return -EINVAL;
add_wait_queue(&devp->hd_waitqueue, &wait);
for ( ; ; ) {
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irq(&hpet_lock);
data = devp->hd_irqdata;
devp->hd_irqdata = 0;
spin_unlock_irq(&hpet_lock);
if (data)
break;
else if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto out;
} else if (signal_pending(current)) {
retval = -ERESTARTSYS;
goto out;
}
schedule();
}
retval = put_user(data, (unsigned long __user *)buf);
if (!retval)
retval = sizeof(unsigned long);
out:
__set_current_state(TASK_RUNNING);
remove_wait_queue(&devp->hd_waitqueue, &wait);
return retval;
}
static unsigned int hpet_poll(struct file *file, poll_table * wait)
{
unsigned long v;
struct hpet_dev *devp;
devp = file->private_data;
if (!devp->hd_ireqfreq)
return 0;
poll_wait(file, &devp->hd_waitqueue, wait);
spin_lock_irq(&hpet_lock);
v = devp->hd_irqdata;
spin_unlock_irq(&hpet_lock);
if (v != 0)
return POLLIN | POLLRDNORM;
return 0;
}
static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
{
#ifdef CONFIG_HPET_MMAP
struct hpet_dev *devp;
unsigned long addr;
if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
return -EINVAL;
devp = file->private_data;
addr = devp->hd_hpets->hp_hpet_phys;
if (addr & (PAGE_SIZE - 1))
return -ENOSYS;
vma->vm_flags |= VM_IO;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
PAGE_SIZE, vma->vm_page_prot)) {
printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
__func__);
return -EAGAIN;
}
return 0;
#else
return -ENOSYS;
#endif
}
static int hpet_fasync(int fd, struct file *file, int on)
{
struct hpet_dev *devp;
devp = file->private_data;
if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0)
return 0;
else
return -EIO;
}
static int hpet_release(struct inode *inode, struct file *file)
{
struct hpet_dev *devp;
struct hpet_timer __iomem *timer;
int irq = 0;
devp = file->private_data;
timer = devp->hd_timer;
spin_lock_irq(&hpet_lock);
writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK),
&timer->hpet_config);
irq = devp->hd_irq;
devp->hd_irq = 0;
devp->hd_ireqfreq = 0;
if (devp->hd_flags & HPET_PERIODIC
&& readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
unsigned long v;
v = readq(&timer->hpet_config);
v ^= Tn_TYPE_CNF_MASK;
writeq(v, &timer->hpet_config);
}
devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC);
spin_unlock_irq(&hpet_lock);
if (irq)
free_irq(irq, devp);
file->private_data = NULL;
return 0;
}
static int hpet_ioctl_ieon(struct hpet_dev *devp)
{
struct hpet_timer __iomem *timer;
struct hpet __iomem *hpet;
struct hpets *hpetp;
int irq;
unsigned long g, v, t, m;
unsigned long flags, isr;
timer = devp->hd_timer;
hpet = devp->hd_hpet;
hpetp = devp->hd_hpets;
if (!devp->hd_ireqfreq)
return -EIO;
spin_lock_irq(&hpet_lock);
if (devp->hd_flags & HPET_IE) {
spin_unlock_irq(&hpet_lock);
return -EBUSY;
}
devp->hd_flags |= HPET_IE;
if (readl(&timer->hpet_config) & Tn_INT_TYPE_CNF_MASK)
devp->hd_flags |= HPET_SHARED_IRQ;
spin_unlock_irq(&hpet_lock);
irq = devp->hd_hdwirq;
if (irq) {
unsigned long irq_flags;
if (devp->hd_flags & HPET_SHARED_IRQ) {
/*
* To prevent the interrupt handler from seeing an
* unwanted interrupt status bit, program the timer
* so that it will not fire in the near future ...
*/
writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
&timer->hpet_config);
write_counter(read_counter(&hpet->hpet_mc),
&timer->hpet_compare);
/* ... and clear any left-over status. */
isr = 1 << (devp - devp->hd_hpets->hp_dev);
writel(isr, &hpet->hpet_isr);
}
sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
irq_flags = devp->hd_flags & HPET_SHARED_IRQ
? IRQF_SHARED : IRQF_DISABLED;
if (request_irq(irq, hpet_interrupt, irq_flags,
devp->hd_name, (void *)devp)) {
printk(KERN_ERR "hpet: IRQ %d is not free\n", irq);
irq = 0;
}
}
if (irq == 0) {
spin_lock_irq(&hpet_lock);
devp->hd_flags ^= HPET_IE;
spin_unlock_irq(&hpet_lock);
return -EIO;
}
devp->hd_irq = irq;
t = devp->hd_ireqfreq;
v = readq(&timer->hpet_config);
/* 64-bit comparators are not yet supported through the ioctls,
* so force this into 32-bit mode if it supports both modes
*/
g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK;
if (devp->hd_flags & HPET_PERIODIC) {
g |= Tn_TYPE_CNF_MASK;
v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK;
writeq(v, &timer->hpet_config);
local_irq_save(flags);
/*
* NOTE: First we modify the hidden accumulator
* register supported by periodic-capable comparators.
* We never want to modify the (single) counter; that
* would affect all the comparators. The value written
* is the counter value when the first interrupt is due.
*/
m = read_counter(&hpet->hpet_mc);
write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
/*
* Then we modify the comparator, indicating the period
* for subsequent interrupt.
*/
write_counter(t, &timer->hpet_compare);
} else {
local_irq_save(flags);
m = read_counter(&hpet->hpet_mc);
write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
}
if (devp->hd_flags & HPET_SHARED_IRQ) {
isr = 1 << (devp - devp->hd_hpets->hp_dev);
writel(isr, &hpet->hpet_isr);
}
writeq(g, &timer->hpet_config);
local_irq_restore(flags);
return 0;
}
/* converts Hz to number of timer ticks */
static inline unsigned long hpet_time_div(struct hpets *hpets,
unsigned long dis)
{
unsigned long long m;
m = hpets->hp_tick_freq + (dis >> 1);
do_div(m, dis);
return (unsigned long)m;
}
static int
hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
struct hpet_info *info)
{
struct hpet_timer __iomem *timer;
struct hpet __iomem *hpet;
struct hpets *hpetp;
int err;
unsigned long v;
switch (cmd) {
case HPET_IE_OFF:
case HPET_INFO:
case HPET_EPI:
case HPET_DPI:
case HPET_IRQFREQ:
timer = devp->hd_timer;
hpet = devp->hd_hpet;
hpetp = devp->hd_hpets;
break;
case HPET_IE_ON:
return hpet_ioctl_ieon(devp);
default:
return -EINVAL;
}
err = 0;
switch (cmd) {
case HPET_IE_OFF:
if ((devp->hd_flags & HPET_IE) == 0)
break;
v = readq(&timer->hpet_config);
v &= ~Tn_INT_ENB_CNF_MASK;
writeq(v, &timer->hpet_config);
if (devp->hd_irq) {
free_irq(devp->hd_irq, devp);
devp->hd_irq = 0;
}
devp->hd_flags ^= HPET_IE;
break;
case HPET_INFO:
{
memset(info, 0, sizeof(*info));
if (devp->hd_ireqfreq)
info->hi_ireqfreq =
hpet_time_div(hpetp, devp->hd_ireqfreq);
info->hi_flags =
readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
info->hi_hpet = hpetp->hp_which;
info->hi_timer = devp - hpetp->hp_dev;
break;
}
case HPET_EPI:
v = readq(&timer->hpet_config);
if ((v & Tn_PER_INT_CAP_MASK) == 0) {
err = -ENXIO;
break;
}
devp->hd_flags |= HPET_PERIODIC;
break;
case HPET_DPI:
v = readq(&timer->hpet_config);
if ((v & Tn_PER_INT_CAP_MASK) == 0) {
err = -ENXIO;
break;
}
if (devp->hd_flags & HPET_PERIODIC &&
readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
v = readq(&timer->hpet_config);
v ^= Tn_TYPE_CNF_MASK;
writeq(v, &timer->hpet_config);
}
devp->hd_flags &= ~HPET_PERIODIC;
break;
case HPET_IRQFREQ:
if ((arg > hpet_max_freq) &&
!capable(CAP_SYS_RESOURCE)) {
err = -EACCES;
break;
}
if (!arg) {
err = -EINVAL;
break;
}
devp->hd_ireqfreq = hpet_time_div(hpetp, arg);
}
return err;
}
static long
hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct hpet_info info;
int err;
mutex_lock(&hpet_mutex);
err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
mutex_unlock(&hpet_mutex);
if ((cmd == HPET_INFO) && !err &&
(copy_to_user((void __user *)arg, &info, sizeof(info))))
err = -EFAULT;
return err;
}
#ifdef CONFIG_COMPAT
struct compat_hpet_info {
compat_ulong_t hi_ireqfreq; /* Hz */
compat_ulong_t hi_flags; /* information */
unsigned short hi_hpet;
unsigned short hi_timer;
};
static long
hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct hpet_info info;
int err;
mutex_lock(&hpet_mutex);
err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
mutex_unlock(&hpet_mutex);
if ((cmd == HPET_INFO) && !err) {
struct compat_hpet_info __user *u = compat_ptr(arg);
if (put_user(info.hi_ireqfreq, &u->hi_ireqfreq) ||
put_user(info.hi_flags, &u->hi_flags) ||
put_user(info.hi_hpet, &u->hi_hpet) ||
put_user(info.hi_timer, &u->hi_timer))
err = -EFAULT;
}
return err;
}
#endif
static const struct file_operations hpet_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = hpet_read,
.poll = hpet_poll,
.unlocked_ioctl = hpet_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = hpet_compat_ioctl,
#endif
.open = hpet_open,
.release = hpet_release,
.fasync = hpet_fasync,
.mmap = hpet_mmap,
};
static int hpet_is_known(struct hpet_data *hdp)
{
struct hpets *hpetp;
for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next)
if (hpetp->hp_hpet_phys == hdp->hd_phys_address)
return 1;
return 0;
}
static ctl_table hpet_table[] = {
{
.procname = "max-user-freq",
.data = &hpet_max_freq,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{}
};
static ctl_table hpet_root[] = {
{
.procname = "hpet",
.maxlen = 0,
.mode = 0555,
.child = hpet_table,
},
{}
};
static ctl_table dev_root[] = {
{
.procname = "dev",
.maxlen = 0,
.mode = 0555,
.child = hpet_root,
},
{}
};
static struct ctl_table_header *sysctl_header;
/*
* Adjustment for when arming the timer with
* initial conditions. That is, main counter
* ticks expired before interrupts are enabled.
*/
#define TICK_CALIBRATE (1000UL)
static unsigned long __hpet_calibrate(struct hpets *hpetp)
{
struct hpet_timer __iomem *timer = NULL;
unsigned long t, m, count, i, flags, start;
struct hpet_dev *devp;
int j;
struct hpet __iomem *hpet;
for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++)
if ((devp->hd_flags & HPET_OPEN) == 0) {
timer = devp->hd_timer;
break;
}
if (!timer)
return 0;
hpet = hpetp->hp_hpet;
t = read_counter(&timer->hpet_compare);
i = 0;
count = hpet_time_div(hpetp, TICK_CALIBRATE);
local_irq_save(flags);
start = read_counter(&hpet->hpet_mc);
do {
m = read_counter(&hpet->hpet_mc);
write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
} while (i++, (m - start) < count);
local_irq_restore(flags);
return (m - start) / i;
}
static unsigned long hpet_calibrate(struct hpets *hpetp)
{
unsigned long ret = -1;
unsigned long tmp;
/*
* Try to calibrate until return value becomes stable small value.
* If SMI interruption occurs in calibration loop, the return value
* will be big. This avoids its impact.
*/
for ( ; ; ) {
tmp = __hpet_calibrate(hpetp);
if (ret <= tmp)
break;
ret = tmp;
}
return ret;
}
int hpet_alloc(struct hpet_data *hdp)
{
u64 cap, mcfg;
struct hpet_dev *devp;
u32 i, ntimer;
struct hpets *hpetp;
size_t siz;
struct hpet __iomem *hpet;
static struct hpets *last;
unsigned long period;
unsigned long long temp;
u32 remainder;
/*
* hpet_alloc can be called by platform dependent code.
* If platform dependent code has allocated the hpet that
* ACPI has also reported, then we catch it here.
*/
if (hpet_is_known(hdp)) {
printk(KERN_DEBUG "%s: duplicate HPET ignored\n",
__func__);
return 0;
}
siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) *
sizeof(struct hpet_dev));
hpetp = kzalloc(siz, GFP_KERNEL);
if (!hpetp)
return -ENOMEM;
hpetp->hp_which = hpet_nhpet++;
hpetp->hp_hpet = hdp->hd_address;
hpetp->hp_hpet_phys = hdp->hd_phys_address;
hpetp->hp_ntimer = hdp->hd_nirqs;
for (i = 0; i < hdp->hd_nirqs; i++)
hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
hpet = hpetp->hp_hpet;
cap = readq(&hpet->hpet_cap);
ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1;
if (hpetp->hp_ntimer != ntimer) {
printk(KERN_WARNING "hpet: number irqs doesn't agree"
" with number of timers\n");
kfree(hpetp);
return -ENODEV;
}
if (last)
last->hp_next = hpetp;
else
hpets = hpetp;
last = hpetp;
period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >>
HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */
temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */
temp += period >> 1; /* round */
do_div(temp, period);
hpetp->hp_tick_freq = temp; /* ticks per second */
printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s",
hpetp->hp_which, hdp->hd_phys_address,
hpetp->hp_ntimer > 1 ? "s" : "");
for (i = 0; i < hpetp->hp_ntimer; i++)
printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
printk("\n");
temp = hpetp->hp_tick_freq;
remainder = do_div(temp, 1000000);
printk(KERN_INFO
"hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n",
hpetp->hp_which, hpetp->hp_ntimer,
cap & HPET_COUNTER_SIZE_MASK ? 64 : 32,
(unsigned) temp, remainder);
mcfg = readq(&hpet->hpet_config);
if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) {
write_counter(0L, &hpet->hpet_mc);
mcfg |= HPET_ENABLE_CNF_MASK;
writeq(mcfg, &hpet->hpet_config);
}
for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) {
struct hpet_timer __iomem *timer;
timer = &hpet->hpet_timers[devp - hpetp->hp_dev];
devp->hd_hpets = hpetp;
devp->hd_hpet = hpet;
devp->hd_timer = timer;
/*
* If the timer was reserved by platform code,
* then make timer unavailable for opens.
*/
if (hdp->hd_state & (1 << i)) {
devp->hd_flags = HPET_OPEN;
continue;
}
init_waitqueue_head(&devp->hd_waitqueue);
}
hpetp->hp_delta = hpet_calibrate(hpetp);
/* This clocksource driver currently only works on ia64 */
#ifdef CONFIG_IA64
if (!hpet_clocksource) {
hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
clocksource_hpet.archdata.fsys_mmio = hpet_mctr;
clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq);
hpetp->hp_clocksource = &clocksource_hpet;
hpet_clocksource = &clocksource_hpet;
}
#endif
return 0;
}
static acpi_status hpet_resources(struct acpi_resource *res, void *data)
{
struct hpet_data *hdp;
acpi_status status;
struct acpi_resource_address64 addr;
hdp = data;
status = acpi_resource_to_address64(res, &addr);
if (ACPI_SUCCESS(status)) {
hdp->hd_phys_address = addr.minimum;
hdp->hd_address = ioremap(addr.minimum, addr.address_length);
if (hpet_is_known(hdp)) {
iounmap(hdp->hd_address);
return AE_ALREADY_EXISTS;
}
} else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
struct acpi_resource_fixed_memory32 *fixmem32;
fixmem32 = &res->data.fixed_memory32;
if (!fixmem32)
return AE_NO_MEMORY;
hdp->hd_phys_address = fixmem32->address;
hdp->hd_address = ioremap(fixmem32->address,
HPET_RANGE_SIZE);
if (hpet_is_known(hdp)) {
iounmap(hdp->hd_address);
return AE_ALREADY_EXISTS;
}
} else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
struct acpi_resource_extended_irq *irqp;
int i, irq;
irqp = &res->data.extended_irq;
for (i = 0; i < irqp->interrupt_count; i++) {
irq = acpi_register_gsi(NULL, irqp->interrupts[i],
irqp->triggering, irqp->polarity);
if (irq < 0)
return AE_ERROR;
hdp->hd_irq[hdp->hd_nirqs] = irq;
hdp->hd_nirqs++;
}
}
return AE_OK;
}
static int hpet_acpi_add(struct acpi_device *device)
{
acpi_status result;
struct hpet_data data;
memset(&data, 0, sizeof(data));
result =
acpi_walk_resources(device->handle, METHOD_NAME__CRS,
hpet_resources, &data);
if (ACPI_FAILURE(result))
return -ENODEV;
if (!data.hd_address || !data.hd_nirqs) {
if (data.hd_address)
iounmap(data.hd_address);
printk("%s: no address or irqs in _CRS\n", __func__);
return -ENODEV;
}
return hpet_alloc(&data);
}
static int hpet_acpi_remove(struct acpi_device *device, int type)
{
/* XXX need to unregister clocksource, dealloc mem, etc */
return -EINVAL;
}
static const struct acpi_device_id hpet_device_ids[] = {
{"PNP0103", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, hpet_device_ids);
static struct acpi_driver hpet_acpi_driver = {
.name = "hpet",
.ids = hpet_device_ids,
.ops = {
.add = hpet_acpi_add,
.remove = hpet_acpi_remove,
},
};
static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops };
static int __init hpet_init(void)
{
int result;
result = misc_register(&hpet_misc);
if (result < 0)
return -ENODEV;
sysctl_header = register_sysctl_table(dev_root);
result = acpi_bus_register_driver(&hpet_acpi_driver);
if (result < 0) {
if (sysctl_header)
unregister_sysctl_table(sysctl_header);
misc_deregister(&hpet_misc);
return result;
}
return 0;
}
static void __exit hpet_exit(void)
{
acpi_bus_unregister_driver(&hpet_acpi_driver);
if (sysctl_header)
unregister_sysctl_table(sysctl_header);
misc_deregister(&hpet_misc);
return;
}
module_init(hpet_init);
module_exit(hpet_exit);
MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Shabbypenguin/Kettle_Corn_Kernel | arch/sparc/kernel/ds.c | 1141 | 26161 | /* ds.c: Domain Services driver for Logical Domains
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/reboot.h>
#include <linux/cpu.h>
#include <asm/hypervisor.h>
#include <asm/ldc.h>
#include <asm/vio.h>
#include <asm/mdesc.h>
#include <asm/head.h>
#include <asm/irq.h>
#include "kernel.h"
#define DRV_MODULE_NAME "ds"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.0"
#define DRV_MODULE_RELDATE "Jul 11, 2007"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("Sun LDOM domain services driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
struct ds_msg_tag {
__u32 type;
#define DS_INIT_REQ 0x00
#define DS_INIT_ACK 0x01
#define DS_INIT_NACK 0x02
#define DS_REG_REQ 0x03
#define DS_REG_ACK 0x04
#define DS_REG_NACK 0x05
#define DS_UNREG_REQ 0x06
#define DS_UNREG_ACK 0x07
#define DS_UNREG_NACK 0x08
#define DS_DATA 0x09
#define DS_NACK 0x0a
__u32 len;
};
/* Result codes */
#define DS_OK 0x00
#define DS_REG_VER_NACK 0x01
#define DS_REG_DUP 0x02
#define DS_INV_HDL 0x03
#define DS_TYPE_UNKNOWN 0x04
struct ds_version {
__u16 major;
__u16 minor;
};
struct ds_ver_req {
struct ds_msg_tag tag;
struct ds_version ver;
};
struct ds_ver_ack {
struct ds_msg_tag tag;
__u16 minor;
};
struct ds_ver_nack {
struct ds_msg_tag tag;
__u16 major;
};
struct ds_reg_req {
struct ds_msg_tag tag;
__u64 handle;
__u16 major;
__u16 minor;
char svc_id[0];
};
struct ds_reg_ack {
struct ds_msg_tag tag;
__u64 handle;
__u16 minor;
};
struct ds_reg_nack {
struct ds_msg_tag tag;
__u64 handle;
__u16 major;
};
struct ds_unreg_req {
struct ds_msg_tag tag;
__u64 handle;
};
struct ds_unreg_ack {
struct ds_msg_tag tag;
__u64 handle;
};
struct ds_unreg_nack {
struct ds_msg_tag tag;
__u64 handle;
};
struct ds_data {
struct ds_msg_tag tag;
__u64 handle;
};
struct ds_data_nack {
struct ds_msg_tag tag;
__u64 handle;
__u64 result;
};
struct ds_info;
struct ds_cap_state {
__u64 handle;
void (*data)(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
const char *service_id;
u8 state;
#define CAP_STATE_UNKNOWN 0x00
#define CAP_STATE_REG_SENT 0x01
#define CAP_STATE_REGISTERED 0x02
};
static void md_update_data(struct ds_info *dp, struct ds_cap_state *cp,
void *buf, int len);
static void domain_shutdown_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
static void domain_panic_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
#ifdef CONFIG_HOTPLUG_CPU
static void dr_cpu_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
#endif
static void ds_pri_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
static void ds_var_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
static struct ds_cap_state ds_states_template[] = {
{
.service_id = "md-update",
.data = md_update_data,
},
{
.service_id = "domain-shutdown",
.data = domain_shutdown_data,
},
{
.service_id = "domain-panic",
.data = domain_panic_data,
},
#ifdef CONFIG_HOTPLUG_CPU
{
.service_id = "dr-cpu",
.data = dr_cpu_data,
},
#endif
{
.service_id = "pri",
.data = ds_pri_data,
},
{
.service_id = "var-config",
.data = ds_var_data,
},
{
.service_id = "var-config-backup",
.data = ds_var_data,
},
};
static DEFINE_SPINLOCK(ds_lock);
struct ds_info {
struct ldc_channel *lp;
u8 hs_state;
#define DS_HS_START 0x01
#define DS_HS_DONE 0x02
u64 id;
void *rcv_buf;
int rcv_buf_len;
struct ds_cap_state *ds_states;
int num_ds_states;
struct ds_info *next;
};
static struct ds_info *ds_info_list;
static struct ds_cap_state *find_cap(struct ds_info *dp, u64 handle)
{
unsigned int index = handle >> 32;
if (index >= dp->num_ds_states)
return NULL;
return &dp->ds_states[index];
}
static struct ds_cap_state *find_cap_by_string(struct ds_info *dp,
const char *name)
{
int i;
for (i = 0; i < dp->num_ds_states; i++) {
if (strcmp(dp->ds_states[i].service_id, name))
continue;
return &dp->ds_states[i];
}
return NULL;
}
static int __ds_send(struct ldc_channel *lp, void *data, int len)
{
int err, limit = 1000;
err = -EINVAL;
while (limit-- > 0) {
err = ldc_write(lp, data, len);
if (!err || (err != -EAGAIN))
break;
udelay(1);
}
return err;
}
static int ds_send(struct ldc_channel *lp, void *data, int len)
{
unsigned long flags;
int err;
spin_lock_irqsave(&ds_lock, flags);
err = __ds_send(lp, data, len);
spin_unlock_irqrestore(&ds_lock, flags);
return err;
}
struct ds_md_update_req {
__u64 req_num;
};
struct ds_md_update_res {
__u64 req_num;
__u32 result;
};
static void md_update_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ldc_channel *lp = dp->lp;
struct ds_data *dpkt = buf;
struct ds_md_update_req *rp;
struct {
struct ds_data data;
struct ds_md_update_res res;
} pkt;
rp = (struct ds_md_update_req *) (dpkt + 1);
printk(KERN_INFO "ds-%llu: Machine description update.\n", dp->id);
mdesc_update();
memset(&pkt, 0, sizeof(pkt));
pkt.data.tag.type = DS_DATA;
pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
pkt.data.handle = cp->handle;
pkt.res.req_num = rp->req_num;
pkt.res.result = DS_OK;
ds_send(lp, &pkt, sizeof(pkt));
}
struct ds_shutdown_req {
__u64 req_num;
__u32 ms_delay;
};
struct ds_shutdown_res {
__u64 req_num;
__u32 result;
char reason[1];
};
static void domain_shutdown_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ldc_channel *lp = dp->lp;
struct ds_data *dpkt = buf;
struct ds_shutdown_req *rp;
struct {
struct ds_data data;
struct ds_shutdown_res res;
} pkt;
rp = (struct ds_shutdown_req *) (dpkt + 1);
printk(KERN_ALERT "ds-%llu: Shutdown request from "
"LDOM manager received.\n", dp->id);
memset(&pkt, 0, sizeof(pkt));
pkt.data.tag.type = DS_DATA;
pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
pkt.data.handle = cp->handle;
pkt.res.req_num = rp->req_num;
pkt.res.result = DS_OK;
pkt.res.reason[0] = 0;
ds_send(lp, &pkt, sizeof(pkt));
orderly_poweroff(true);
}
struct ds_panic_req {
__u64 req_num;
};
struct ds_panic_res {
__u64 req_num;
__u32 result;
char reason[1];
};
static void domain_panic_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ldc_channel *lp = dp->lp;
struct ds_data *dpkt = buf;
struct ds_panic_req *rp;
struct {
struct ds_data data;
struct ds_panic_res res;
} pkt;
rp = (struct ds_panic_req *) (dpkt + 1);
printk(KERN_ALERT "ds-%llu: Panic request from "
"LDOM manager received.\n", dp->id);
memset(&pkt, 0, sizeof(pkt));
pkt.data.tag.type = DS_DATA;
pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
pkt.data.handle = cp->handle;
pkt.res.req_num = rp->req_num;
pkt.res.result = DS_OK;
pkt.res.reason[0] = 0;
ds_send(lp, &pkt, sizeof(pkt));
panic("PANIC requested by LDOM manager.");
}
#ifdef CONFIG_HOTPLUG_CPU
struct dr_cpu_tag {
__u64 req_num;
__u32 type;
#define DR_CPU_CONFIGURE 0x43
#define DR_CPU_UNCONFIGURE 0x55
#define DR_CPU_FORCE_UNCONFIGURE 0x46
#define DR_CPU_STATUS 0x53
/* Responses */
#define DR_CPU_OK 0x6f
#define DR_CPU_ERROR 0x65
__u32 num_records;
};
struct dr_cpu_resp_entry {
__u32 cpu;
__u32 result;
#define DR_CPU_RES_OK 0x00
#define DR_CPU_RES_FAILURE 0x01
#define DR_CPU_RES_BLOCKED 0x02
#define DR_CPU_RES_CPU_NOT_RESPONDING 0x03
#define DR_CPU_RES_NOT_IN_MD 0x04
__u32 stat;
#define DR_CPU_STAT_NOT_PRESENT 0x00
#define DR_CPU_STAT_UNCONFIGURED 0x01
#define DR_CPU_STAT_CONFIGURED 0x02
__u32 str_off;
};
static void __dr_cpu_send_error(struct ds_info *dp,
struct ds_cap_state *cp,
struct ds_data *data)
{
struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
struct {
struct ds_data data;
struct dr_cpu_tag tag;
} pkt;
int msg_len;
memset(&pkt, 0, sizeof(pkt));
pkt.data.tag.type = DS_DATA;
pkt.data.handle = cp->handle;
pkt.tag.req_num = tag->req_num;
pkt.tag.type = DR_CPU_ERROR;
pkt.tag.num_records = 0;
msg_len = (sizeof(struct ds_data) +
sizeof(struct dr_cpu_tag));
pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
__ds_send(dp->lp, &pkt, msg_len);
}
static void dr_cpu_send_error(struct ds_info *dp,
struct ds_cap_state *cp,
struct ds_data *data)
{
unsigned long flags;
spin_lock_irqsave(&ds_lock, flags);
__dr_cpu_send_error(dp, cp, data);
spin_unlock_irqrestore(&ds_lock, flags);
}
#define CPU_SENTINEL 0xffffffff
static void purge_dups(u32 *list, u32 num_ents)
{
unsigned int i;
for (i = 0; i < num_ents; i++) {
u32 cpu = list[i];
unsigned int j;
if (cpu == CPU_SENTINEL)
continue;
for (j = i + 1; j < num_ents; j++) {
if (list[j] == cpu)
list[j] = CPU_SENTINEL;
}
}
}
static int dr_cpu_size_response(int ncpus)
{
return (sizeof(struct ds_data) +
sizeof(struct dr_cpu_tag) +
(sizeof(struct dr_cpu_resp_entry) * ncpus));
}
static void dr_cpu_init_response(struct ds_data *resp, u64 req_num,
u64 handle, int resp_len, int ncpus,
cpumask_t *mask, u32 default_stat)
{
struct dr_cpu_resp_entry *ent;
struct dr_cpu_tag *tag;
int i, cpu;
tag = (struct dr_cpu_tag *) (resp + 1);
ent = (struct dr_cpu_resp_entry *) (tag + 1);
resp->tag.type = DS_DATA;
resp->tag.len = resp_len - sizeof(struct ds_msg_tag);
resp->handle = handle;
tag->req_num = req_num;
tag->type = DR_CPU_OK;
tag->num_records = ncpus;
i = 0;
for_each_cpu(cpu, mask) {
ent[i].cpu = cpu;
ent[i].result = DR_CPU_RES_OK;
ent[i].stat = default_stat;
i++;
}
BUG_ON(i != ncpus);
}
static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus,
u32 res, u32 stat)
{
struct dr_cpu_resp_entry *ent;
struct dr_cpu_tag *tag;
int i;
tag = (struct dr_cpu_tag *) (resp + 1);
ent = (struct dr_cpu_resp_entry *) (tag + 1);
for (i = 0; i < ncpus; i++) {
if (ent[i].cpu != cpu)
continue;
ent[i].result = res;
ent[i].stat = stat;
break;
}
}
static int __cpuinit dr_cpu_configure(struct ds_info *dp,
struct ds_cap_state *cp,
u64 req_num,
cpumask_t *mask)
{
struct ds_data *resp;
int resp_len, ncpus, cpu;
unsigned long flags;
ncpus = cpumask_weight(mask);
resp_len = dr_cpu_size_response(ncpus);
resp = kzalloc(resp_len, GFP_KERNEL);
if (!resp)
return -ENOMEM;
dr_cpu_init_response(resp, req_num, cp->handle,
resp_len, ncpus, mask,
DR_CPU_STAT_CONFIGURED);
mdesc_populate_present_mask(mask);
mdesc_fill_in_cpu_data(mask);
for_each_cpu(cpu, mask) {
int err;
printk(KERN_INFO "ds-%llu: Starting cpu %d...\n",
dp->id, cpu);
err = cpu_up(cpu);
if (err) {
__u32 res = DR_CPU_RES_FAILURE;
__u32 stat = DR_CPU_STAT_UNCONFIGURED;
if (!cpu_present(cpu)) {
/* CPU not present in MD */
res = DR_CPU_RES_NOT_IN_MD;
stat = DR_CPU_STAT_NOT_PRESENT;
} else if (err == -ENODEV) {
/* CPU did not call in successfully */
res = DR_CPU_RES_CPU_NOT_RESPONDING;
}
printk(KERN_INFO "ds-%llu: CPU startup failed err=%d\n",
dp->id, err);
dr_cpu_mark(resp, cpu, ncpus, res, stat);
}
}
spin_lock_irqsave(&ds_lock, flags);
__ds_send(dp->lp, resp, resp_len);
spin_unlock_irqrestore(&ds_lock, flags);
kfree(resp);
/* Redistribute IRQs, taking into account the new cpus. */
fixup_irqs();
return 0;
}
static int dr_cpu_unconfigure(struct ds_info *dp,
struct ds_cap_state *cp,
u64 req_num,
cpumask_t *mask)
{
struct ds_data *resp;
int resp_len, ncpus, cpu;
unsigned long flags;
ncpus = cpumask_weight(mask);
resp_len = dr_cpu_size_response(ncpus);
resp = kzalloc(resp_len, GFP_KERNEL);
if (!resp)
return -ENOMEM;
dr_cpu_init_response(resp, req_num, cp->handle,
resp_len, ncpus, mask,
DR_CPU_STAT_UNCONFIGURED);
for_each_cpu(cpu, mask) {
int err;
printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n",
dp->id, cpu);
err = cpu_down(cpu);
if (err)
dr_cpu_mark(resp, cpu, ncpus,
DR_CPU_RES_FAILURE,
DR_CPU_STAT_CONFIGURED);
}
spin_lock_irqsave(&ds_lock, flags);
__ds_send(dp->lp, resp, resp_len);
spin_unlock_irqrestore(&ds_lock, flags);
kfree(resp);
return 0;
}
static void __cpuinit dr_cpu_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ds_data *data = buf;
struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
u32 *cpu_list = (u32 *) (tag + 1);
u64 req_num = tag->req_num;
cpumask_t mask;
unsigned int i;
int err;
switch (tag->type) {
case DR_CPU_CONFIGURE:
case DR_CPU_UNCONFIGURE:
case DR_CPU_FORCE_UNCONFIGURE:
break;
default:
dr_cpu_send_error(dp, cp, data);
return;
}
purge_dups(cpu_list, tag->num_records);
cpumask_clear(&mask);
for (i = 0; i < tag->num_records; i++) {
if (cpu_list[i] == CPU_SENTINEL)
continue;
if (cpu_list[i] < nr_cpu_ids)
cpumask_set_cpu(cpu_list[i], &mask);
}
if (tag->type == DR_CPU_CONFIGURE)
err = dr_cpu_configure(dp, cp, req_num, &mask);
else
err = dr_cpu_unconfigure(dp, cp, req_num, &mask);
if (err)
dr_cpu_send_error(dp, cp, data);
}
#endif /* CONFIG_HOTPLUG_CPU */
struct ds_pri_msg {
__u64 req_num;
__u64 type;
#define DS_PRI_REQUEST 0x00
#define DS_PRI_DATA 0x01
#define DS_PRI_UPDATE 0x02
};
static void ds_pri_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ds_data *dpkt = buf;
struct ds_pri_msg *rp;
rp = (struct ds_pri_msg *) (dpkt + 1);
printk(KERN_INFO "ds-%llu: PRI REQ [%llx:%llx], len=%d\n",
dp->id, rp->req_num, rp->type, len);
}
struct ds_var_hdr {
__u32 type;
#define DS_VAR_SET_REQ 0x00
#define DS_VAR_DELETE_REQ 0x01
#define DS_VAR_SET_RESP 0x02
#define DS_VAR_DELETE_RESP 0x03
};
struct ds_var_set_msg {
struct ds_var_hdr hdr;
char name_and_value[0];
};
struct ds_var_delete_msg {
struct ds_var_hdr hdr;
char name[0];
};
struct ds_var_resp {
struct ds_var_hdr hdr;
__u32 result;
#define DS_VAR_SUCCESS 0x00
#define DS_VAR_NO_SPACE 0x01
#define DS_VAR_INVALID_VAR 0x02
#define DS_VAR_INVALID_VAL 0x03
#define DS_VAR_NOT_PRESENT 0x04
};
static DEFINE_MUTEX(ds_var_mutex);
static int ds_var_doorbell;
static int ds_var_response;
static void ds_var_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len)
{
struct ds_data *dpkt = buf;
struct ds_var_resp *rp;
rp = (struct ds_var_resp *) (dpkt + 1);
if (rp->hdr.type != DS_VAR_SET_RESP &&
rp->hdr.type != DS_VAR_DELETE_RESP)
return;
ds_var_response = rp->result;
wmb();
ds_var_doorbell = 1;
}
void ldom_set_var(const char *var, const char *value)
{
struct ds_cap_state *cp;
struct ds_info *dp;
unsigned long flags;
spin_lock_irqsave(&ds_lock, flags);
cp = NULL;
for (dp = ds_info_list; dp; dp = dp->next) {
struct ds_cap_state *tmp;
tmp = find_cap_by_string(dp, "var-config");
if (tmp && tmp->state == CAP_STATE_REGISTERED) {
cp = tmp;
break;
}
}
if (!cp) {
for (dp = ds_info_list; dp; dp = dp->next) {
struct ds_cap_state *tmp;
tmp = find_cap_by_string(dp, "var-config-backup");
if (tmp && tmp->state == CAP_STATE_REGISTERED) {
cp = tmp;
break;
}
}
}
spin_unlock_irqrestore(&ds_lock, flags);
if (cp) {
union {
struct {
struct ds_data data;
struct ds_var_set_msg msg;
} header;
char all[512];
} pkt;
char *base, *p;
int msg_len, loops;
memset(&pkt, 0, sizeof(pkt));
pkt.header.data.tag.type = DS_DATA;
pkt.header.data.handle = cp->handle;
pkt.header.msg.hdr.type = DS_VAR_SET_REQ;
base = p = &pkt.header.msg.name_and_value[0];
strcpy(p, var);
p += strlen(var) + 1;
strcpy(p, value);
p += strlen(value) + 1;
msg_len = (sizeof(struct ds_data) +
sizeof(struct ds_var_set_msg) +
(p - base));
msg_len = (msg_len + 3) & ~3;
pkt.header.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
mutex_lock(&ds_var_mutex);
spin_lock_irqsave(&ds_lock, flags);
ds_var_doorbell = 0;
ds_var_response = -1;
__ds_send(dp->lp, &pkt, msg_len);
spin_unlock_irqrestore(&ds_lock, flags);
loops = 1000;
while (ds_var_doorbell == 0) {
if (loops-- < 0)
break;
barrier();
udelay(100);
}
mutex_unlock(&ds_var_mutex);
if (ds_var_doorbell == 0 ||
ds_var_response != DS_VAR_SUCCESS)
printk(KERN_ERR "ds-%llu: var-config [%s:%s] "
"failed, response(%d).\n",
dp->id, var, value,
ds_var_response);
} else {
printk(KERN_ERR PFX "var-config not registered so "
"could not set (%s) variable to (%s).\n",
var, value);
}
}
static char full_boot_str[256] __attribute__((aligned(32)));
static int reboot_data_supported;
void ldom_reboot(const char *boot_command)
{
/* Don't bother with any of this if the boot_command
* is empty.
*/
if (boot_command && strlen(boot_command)) {
unsigned long len;
strcpy(full_boot_str, "boot ");
strcpy(full_boot_str + strlen("boot "), boot_command);
len = strlen(full_boot_str);
if (reboot_data_supported) {
unsigned long ra = kimage_addr_to_ra(full_boot_str);
unsigned long hv_ret;
hv_ret = sun4v_reboot_data_set(ra, len);
if (hv_ret != HV_EOK)
pr_err("SUN4V: Unable to set reboot data "
"hv_ret=%lu\n", hv_ret);
} else {
ldom_set_var("reboot-command", full_boot_str);
}
}
sun4v_mach_sir();
}
void ldom_power_off(void)
{
sun4v_mach_exit(0);
}
static void ds_conn_reset(struct ds_info *dp)
{
printk(KERN_ERR "ds-%llu: ds_conn_reset() from %p\n",
dp->id, __builtin_return_address(0));
}
static int register_services(struct ds_info *dp)
{
struct ldc_channel *lp = dp->lp;
int i;
for (i = 0; i < dp->num_ds_states; i++) {
struct {
struct ds_reg_req req;
u8 id_buf[256];
} pbuf;
struct ds_cap_state *cp = &dp->ds_states[i];
int err, msg_len;
u64 new_count;
if (cp->state == CAP_STATE_REGISTERED)
continue;
new_count = sched_clock() & 0xffffffff;
cp->handle = ((u64) i << 32) | new_count;
msg_len = (sizeof(struct ds_reg_req) +
strlen(cp->service_id));
memset(&pbuf, 0, sizeof(pbuf));
pbuf.req.tag.type = DS_REG_REQ;
pbuf.req.tag.len = (msg_len - sizeof(struct ds_msg_tag));
pbuf.req.handle = cp->handle;
pbuf.req.major = 1;
pbuf.req.minor = 0;
strcpy(pbuf.req.svc_id, cp->service_id);
err = __ds_send(lp, &pbuf, msg_len);
if (err > 0)
cp->state = CAP_STATE_REG_SENT;
}
return 0;
}
static int ds_handshake(struct ds_info *dp, struct ds_msg_tag *pkt)
{
if (dp->hs_state == DS_HS_START) {
if (pkt->type != DS_INIT_ACK)
goto conn_reset;
dp->hs_state = DS_HS_DONE;
return register_services(dp);
}
if (dp->hs_state != DS_HS_DONE)
goto conn_reset;
if (pkt->type == DS_REG_ACK) {
struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt;
struct ds_cap_state *cp = find_cap(dp, ap->handle);
if (!cp) {
printk(KERN_ERR "ds-%llu: REG ACK for unknown "
"handle %llx\n", dp->id, ap->handle);
return 0;
}
printk(KERN_INFO "ds-%llu: Registered %s service.\n",
dp->id, cp->service_id);
cp->state = CAP_STATE_REGISTERED;
} else if (pkt->type == DS_REG_NACK) {
struct ds_reg_nack *np = (struct ds_reg_nack *) pkt;
struct ds_cap_state *cp = find_cap(dp, np->handle);
if (!cp) {
printk(KERN_ERR "ds-%llu: REG NACK for "
"unknown handle %llx\n",
dp->id, np->handle);
return 0;
}
cp->state = CAP_STATE_UNKNOWN;
}
return 0;
conn_reset:
ds_conn_reset(dp);
return -ECONNRESET;
}
static void __send_ds_nack(struct ds_info *dp, u64 handle)
{
struct ds_data_nack nack = {
.tag = {
.type = DS_NACK,
.len = (sizeof(struct ds_data_nack) -
sizeof(struct ds_msg_tag)),
},
.handle = handle,
.result = DS_INV_HDL,
};
__ds_send(dp->lp, &nack, sizeof(nack));
}
static LIST_HEAD(ds_work_list);
static DECLARE_WAIT_QUEUE_HEAD(ds_wait);
struct ds_queue_entry {
struct list_head list;
struct ds_info *dp;
int req_len;
int __pad;
u64 req[0];
};
static void process_ds_work(void)
{
struct ds_queue_entry *qp, *tmp;
unsigned long flags;
LIST_HEAD(todo);
spin_lock_irqsave(&ds_lock, flags);
list_splice_init(&ds_work_list, &todo);
spin_unlock_irqrestore(&ds_lock, flags);
list_for_each_entry_safe(qp, tmp, &todo, list) {
struct ds_data *dpkt = (struct ds_data *) qp->req;
struct ds_info *dp = qp->dp;
struct ds_cap_state *cp = find_cap(dp, dpkt->handle);
int req_len = qp->req_len;
if (!cp) {
printk(KERN_ERR "ds-%llu: Data for unknown "
"handle %llu\n",
dp->id, dpkt->handle);
spin_lock_irqsave(&ds_lock, flags);
__send_ds_nack(dp, dpkt->handle);
spin_unlock_irqrestore(&ds_lock, flags);
} else {
cp->data(dp, cp, dpkt, req_len);
}
list_del(&qp->list);
kfree(qp);
}
}
static int ds_thread(void *__unused)
{
DEFINE_WAIT(wait);
while (1) {
prepare_to_wait(&ds_wait, &wait, TASK_INTERRUPTIBLE);
if (list_empty(&ds_work_list))
schedule();
finish_wait(&ds_wait, &wait);
if (kthread_should_stop())
break;
process_ds_work();
}
return 0;
}
static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len)
{
struct ds_data *dpkt = (struct ds_data *) pkt;
struct ds_queue_entry *qp;
qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC);
if (!qp) {
__send_ds_nack(dp, dpkt->handle);
} else {
qp->dp = dp;
memcpy(&qp->req, pkt, len);
list_add_tail(&qp->list, &ds_work_list);
wake_up(&ds_wait);
}
return 0;
}
static void ds_up(struct ds_info *dp)
{
struct ldc_channel *lp = dp->lp;
struct ds_ver_req req;
int err;
req.tag.type = DS_INIT_REQ;
req.tag.len = sizeof(req) - sizeof(struct ds_msg_tag);
req.ver.major = 1;
req.ver.minor = 0;
err = __ds_send(lp, &req, sizeof(req));
if (err > 0)
dp->hs_state = DS_HS_START;
}
static void ds_reset(struct ds_info *dp)
{
int i;
dp->hs_state = 0;
for (i = 0; i < dp->num_ds_states; i++) {
struct ds_cap_state *cp = &dp->ds_states[i];
cp->state = CAP_STATE_UNKNOWN;
}
}
static void ds_event(void *arg, int event)
{
struct ds_info *dp = arg;
struct ldc_channel *lp = dp->lp;
unsigned long flags;
int err;
spin_lock_irqsave(&ds_lock, flags);
if (event == LDC_EVENT_UP) {
ds_up(dp);
spin_unlock_irqrestore(&ds_lock, flags);
return;
}
if (event == LDC_EVENT_RESET) {
ds_reset(dp);
spin_unlock_irqrestore(&ds_lock, flags);
return;
}
if (event != LDC_EVENT_DATA_READY) {
printk(KERN_WARNING "ds-%llu: Unexpected LDC event %d\n",
dp->id, event);
spin_unlock_irqrestore(&ds_lock, flags);
return;
}
err = 0;
while (1) {
struct ds_msg_tag *tag;
err = ldc_read(lp, dp->rcv_buf, sizeof(*tag));
if (unlikely(err < 0)) {
if (err == -ECONNRESET)
ds_conn_reset(dp);
break;
}
if (err == 0)
break;
tag = dp->rcv_buf;
err = ldc_read(lp, tag + 1, tag->len);
if (unlikely(err < 0)) {
if (err == -ECONNRESET)
ds_conn_reset(dp);
break;
}
if (err < tag->len)
break;
if (tag->type < DS_DATA)
err = ds_handshake(dp, dp->rcv_buf);
else
err = ds_data(dp, dp->rcv_buf,
sizeof(*tag) + err);
if (err == -ECONNRESET)
break;
}
spin_unlock_irqrestore(&ds_lock, flags);
}
static int __devinit ds_probe(struct vio_dev *vdev,
const struct vio_device_id *id)
{
static int ds_version_printed;
struct ldc_channel_config ds_cfg = {
.event = ds_event,
.mtu = 4096,
.mode = LDC_MODE_STREAM,
};
struct mdesc_handle *hp;
struct ldc_channel *lp;
struct ds_info *dp;
const u64 *val;
int err, i;
if (ds_version_printed++ == 0)
printk(KERN_INFO "%s", version);
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
err = -ENOMEM;
if (!dp)
goto out_err;
hp = mdesc_grab();
val = mdesc_get_property(hp, vdev->mp, "id", NULL);
if (val)
dp->id = *val;
mdesc_release(hp);
dp->rcv_buf = kzalloc(4096, GFP_KERNEL);
if (!dp->rcv_buf)
goto out_free_dp;
dp->rcv_buf_len = 4096;
dp->ds_states = kzalloc(sizeof(ds_states_template),
GFP_KERNEL);
if (!dp->ds_states)
goto out_free_rcv_buf;
memcpy(dp->ds_states, ds_states_template,
sizeof(ds_states_template));
dp->num_ds_states = ARRAY_SIZE(ds_states_template);
for (i = 0; i < dp->num_ds_states; i++)
dp->ds_states[i].handle = ((u64)i << 32);
ds_cfg.tx_irq = vdev->tx_irq;
ds_cfg.rx_irq = vdev->rx_irq;
lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp);
if (IS_ERR(lp)) {
err = PTR_ERR(lp);
goto out_free_ds_states;
}
dp->lp = lp;
err = ldc_bind(lp, "DS");
if (err)
goto out_free_ldc;
spin_lock_irq(&ds_lock);
dp->next = ds_info_list;
ds_info_list = dp;
spin_unlock_irq(&ds_lock);
return err;
out_free_ldc:
ldc_free(dp->lp);
out_free_ds_states:
kfree(dp->ds_states);
out_free_rcv_buf:
kfree(dp->rcv_buf);
out_free_dp:
kfree(dp);
out_err:
return err;
}
static int ds_remove(struct vio_dev *vdev)
{
return 0;
}
static const struct vio_device_id ds_match[] = {
{
.type = "domain-services-port",
},
{},
};
static struct vio_driver ds_driver = {
.id_table = ds_match,
.probe = ds_probe,
.remove = ds_remove,
.driver = {
.name = "ds",
.owner = THIS_MODULE,
}
};
static int __init ds_init(void)
{
unsigned long hv_ret, major, minor;
if (tlb_type == hypervisor) {
hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor);
if (hv_ret == HV_EOK) {
pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n",
major, minor);
reboot_data_supported = 1;
}
}
kthread_run(ds_thread, NULL, "kldomd");
return vio_register_driver(&ds_driver);
}
subsys_initcall(ds_init);
| gpl-2.0 |
IndieBeto/StockLP | drivers/media/platform/msm/wfd/wfd-ioctl.c | 1397 | 47396 | /* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/ioctl.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/version.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/time.h>
#include <linux/slab.h>
#include <mach/board.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-subdev.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-msm-mem.h>
#include "wfd-util.h"
#include "mdp-subdev.h"
#include "enc-subdev.h"
#include "vsg-subdev.h"
#define WFD_VERSION KERNEL_VERSION(0, 0, 1)
#define WFD_NUM_DEVICES 2
#define WFD_DEVICE_NUMBER_BASE 38
#define WFD_DEVICE_SECURE (WFD_DEVICE_NUMBER_BASE + 1)
#define DEFAULT_WFD_WIDTH 1280
#define DEFAULT_WFD_HEIGHT 720
#define VENC_INPUT_BUFFERS 4
#define MAX_EVENTS 16
struct wfd_device {
struct mutex dev_lock;
struct platform_device *pdev;
struct v4l2_device v4l2_dev;
struct video_device *pvdev;
struct v4l2_subdev mdp_sdev;
struct v4l2_subdev enc_sdev;
struct v4l2_subdev vsg_sdev;
struct ion_client *ion_client;
bool secure;
bool in_use;
bool mdp_iommu_split_domain;
};
struct mem_info {
u32 fd;
u32 offset;
};
struct mem_info_entry {
struct list_head list;
unsigned long userptr;
struct mem_info minfo;
};
struct mem_region_pair {
struct mem_region *enc;
struct mem_region *mdp;
struct list_head list;
};
struct wfd_inst {
struct vb2_queue vid_bufq;
struct mutex lock;
struct mutex vb2_lock;
u32 buf_count;
struct task_struct *mdp_task;
void *mdp_inst;
void *venc_inst;
u32 height;
u32 width;
u32 pixelformat;
struct list_head minfo_list;
bool streamoff;
u32 input_bufs_allocated;
u32 input_buf_size;
u32 out_buf_size;
struct list_head input_mem_list;
struct wfd_stats stats;
struct completion stop_mdp_thread;
struct v4l2_fh event_handler;
};
struct wfd_vid_buffer {
struct vb2_buffer vidbuf;
};
static inline struct wfd_inst *file_to_inst(struct file *filp)
{
return container_of(filp->private_data, struct wfd_inst, event_handler);
}
static int wfd_vidbuf_queue_setup(struct vb2_queue *q,
const struct v4l2_format *fmt,
unsigned int *num_buffers,
unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
struct file *priv_data = (struct file *)(q->drv_priv);
struct wfd_inst *inst = file_to_inst(priv_data);
int i;
WFD_MSG_DBG("In %s\n", __func__);
if (num_buffers == NULL || num_planes == NULL)
return -EINVAL;
*num_planes = 1;
mutex_lock(&inst->lock);
for (i = 0; i < *num_planes; ++i) {
sizes[i] = inst->out_buf_size;
alloc_ctxs[i] = inst;
}
mutex_unlock(&inst->lock);
return 0;
}
static void wfd_vidbuf_wait_prepare(struct vb2_queue *q)
{
struct file *priv_data = (struct file *)(q->drv_priv);
struct wfd_inst *inst = file_to_inst(priv_data);
mutex_unlock(&inst->vb2_lock);
}
static void wfd_vidbuf_wait_finish(struct vb2_queue *q)
{
struct file *priv_data = (struct file *)(q->drv_priv);
struct wfd_inst *inst = file_to_inst(priv_data);
mutex_lock(&inst->vb2_lock);
}
static unsigned long wfd_enc_addr_to_mdp_addr(struct wfd_inst *inst,
unsigned long addr)
{
struct list_head *ptr, *next;
struct mem_region_pair *mpair;
if (!list_empty(&inst->input_mem_list)) {
list_for_each_safe(ptr, next,
&inst->input_mem_list) {
mpair = list_entry(ptr, struct mem_region_pair,
list);
if (mpair->enc->paddr == (u8 *)addr)
return (unsigned long)mpair->mdp->paddr;
}
}
return (unsigned long)NULL;
}
#ifdef CONFIG_MSM_WFD_DEBUG
static void *wfd_map_kernel(struct ion_client *client,
struct ion_handle *handle)
{
return ion_map_kernel(client, handle);
}
static void wfd_unmap_kernel(struct ion_client *client,
struct ion_handle *handle)
{
ion_unmap_kernel(client, handle);
}
#else
static void *wfd_map_kernel(struct ion_client *client,
struct ion_handle *handle)
{
return NULL;
}
static void wfd_unmap_kernel(struct ion_client *client,
struct ion_handle *handle)
{
return;
}
#endif
static int wfd_allocate_ion_buffer(struct ion_client *client,
bool secure, struct mem_region *mregion)
{
struct ion_handle *handle = NULL;
unsigned int alloc_regions = 0, ion_flags = 0, align = 0;
int rc = 0;
if (secure) {
alloc_regions = ION_HEAP(ION_CP_MM_HEAP_ID);
ion_flags = ION_FLAG_SECURE;
align = SZ_1M;
} else {
alloc_regions = ION_HEAP(ION_IOMMU_HEAP_ID);
align = SZ_4K;
}
handle = ion_alloc(client, mregion->size, align,
alloc_regions, ion_flags);
if (IS_ERR_OR_NULL(handle)) {
WFD_MSG_ERR("Failed to allocate input buffer\n");
rc = PTR_ERR(handle);
goto alloc_fail;
}
mregion->kvaddr = secure ? NULL :
wfd_map_kernel(client, handle);
mregion->ion_handle = handle;
return rc;
alloc_fail:
if (!IS_ERR_OR_NULL(handle)) {
if (!IS_ERR_OR_NULL(mregion->kvaddr))
wfd_unmap_kernel(client, handle);
ion_free(client, handle);
mregion->kvaddr = NULL;
mregion->paddr = NULL;
mregion->ion_handle = NULL;
}
return rc;
}
/* Doesn't do iommu unmap */
static int wfd_free_ion_buffer(struct ion_client *client,
struct mem_region *mregion)
{
if (!client || !mregion) {
WFD_MSG_ERR("Failed to free ion buffer: "
"Invalid client or region");
return -EINVAL;
}
if (!IS_ERR_OR_NULL(mregion->kvaddr))
wfd_unmap_kernel(client, mregion->ion_handle);
ion_free(client, mregion->ion_handle);
return 0;
}
static int wfd_flush_ion_buffer(struct ion_client *client,
struct mem_region *mregion)
{
if (!client || !mregion) {
WFD_MSG_ERR("Failed to flush ion buffer: "
"Invalid client or region");
return -EINVAL;
} else if (!mregion->ion_handle) {
WFD_MSG_ERR("Failed to flush ion buffer: "
"not an ion buffer");
return -EINVAL;
}
return msm_ion_do_cache_op(client,
mregion->ion_handle,
mregion->kvaddr,
mregion->size,
ION_IOC_INV_CACHES);
}
static int wfd_allocate_input_buffers(struct wfd_device *wfd_dev,
struct wfd_inst *inst)
{
int i;
struct mem_region *enc_mregion, *mdp_mregion;
struct mem_region_pair *mpair;
int rc;
struct mdp_buf_info mdp_buf = {0};
struct mem_region_map mmap_context = {0};
mutex_lock(&inst->lock);
if (inst->input_bufs_allocated) {
mutex_unlock(&inst->lock);
return 0;
}
inst->input_bufs_allocated = true;
mutex_unlock(&inst->lock);
for (i = 0; i < VENC_INPUT_BUFFERS; ++i) {
mpair = kzalloc(sizeof(*mpair), GFP_KERNEL);
enc_mregion = kzalloc(sizeof(*enc_mregion), GFP_KERNEL);
mdp_mregion = kzalloc(sizeof(*enc_mregion), GFP_KERNEL);
enc_mregion->size = ALIGN(inst->input_buf_size, SZ_4K);
rc = wfd_allocate_ion_buffer(wfd_dev->ion_client,
wfd_dev->secure, enc_mregion);
if (rc) {
WFD_MSG_ERR("Failed to allocate input memory\n");
goto alloc_fail;
}
mmap_context.mregion = enc_mregion;
mmap_context.ion_client = wfd_dev->ion_client;
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
ENC_MMAP, &mmap_context);
if (rc) {
WFD_MSG_ERR("Failed to map input memory\n");
goto alloc_fail;
} else if (!enc_mregion->paddr) {
WFD_MSG_ERR("ENC_MMAP returned success" \
"but failed to map input memory\n");
rc = -EINVAL;
goto alloc_fail;
}
WFD_MSG_DBG("NOTE: enc paddr = [%p->%p], kvaddr = %p\n",
enc_mregion->paddr, (int8_t *)
enc_mregion->paddr + enc_mregion->size,
enc_mregion->kvaddr);
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
SET_INPUT_BUFFER, (void *)enc_mregion);
if (rc) {
WFD_MSG_ERR("Setting enc input buffer failed\n");
goto set_input_fail;
}
/* map the buffer from encoder to mdp */
mdp_mregion->kvaddr = enc_mregion->kvaddr;
mdp_mregion->size = enc_mregion->size;
mdp_mregion->offset = enc_mregion->offset;
mdp_mregion->fd = enc_mregion->fd;
mdp_mregion->cookie = 0;
mdp_mregion->ion_handle = enc_mregion->ion_handle;
memset(&mmap_context, 0, sizeof(mmap_context));
mmap_context.mregion = mdp_mregion;
mmap_context.ion_client = wfd_dev->ion_client;
mmap_context.cookie = inst->mdp_inst;
rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
MDP_MMAP, (void *)&mmap_context);
if (rc) {
WFD_MSG_ERR(
"Failed to map to mdp, rc = %d, paddr = 0x%p\n",
rc, mdp_mregion->paddr);
mdp_mregion->kvaddr = NULL;
mdp_mregion->paddr = NULL;
mdp_mregion->ion_handle = NULL;
goto mdp_mmap_fail;
} else if (!mdp_mregion->paddr) {
WFD_MSG_ERR("MDP_MMAP returned success" \
"but failed to map to MDP\n");
rc = -EINVAL;
mdp_mregion->kvaddr = NULL;
mdp_mregion->paddr = NULL;
mdp_mregion->ion_handle = NULL;
goto mdp_mmap_fail;
}
mdp_buf.inst = inst->mdp_inst;
mdp_buf.cookie = enc_mregion;
mdp_buf.kvaddr = (u32) mdp_mregion->kvaddr;
mdp_buf.paddr = (u32) mdp_mregion->paddr;
WFD_MSG_DBG("NOTE: mdp paddr = [%p->%p], kvaddr = %p\n",
mdp_mregion->paddr, (void *)
((int)mdp_mregion->paddr + mdp_mregion->size),
mdp_mregion->kvaddr);
rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
MDP_Q_BUFFER, (void *)&mdp_buf);
if (rc) {
WFD_MSG_ERR("Unable to queue the"
" buffer to mdp\n");
goto mdp_q_fail;
} else {
wfd_stats_update(&inst->stats,
WFD_STAT_EVENT_MDP_QUEUE);
}
INIT_LIST_HEAD(&mpair->list);
mpair->enc = enc_mregion;
mpair->mdp = mdp_mregion;
list_add_tail(&mpair->list, &inst->input_mem_list);
}
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
ALLOC_RECON_BUFFERS, NULL);
if (rc) {
WFD_MSG_ERR("Failed to allocate recon buffers\n");
goto recon_alloc_fail;
}
return rc;
/*
* Clean up only the buffer that we failed in setting up.
* Caller will clean up the rest by calling free_input_buffers()
*/
mdp_q_fail:
memset(&mmap_context, 0, sizeof(mmap_context));
mmap_context.mregion = mdp_mregion;
mmap_context.ion_client = wfd_dev->ion_client;
mmap_context.cookie = inst->mdp_inst;
v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
MDP_MUNMAP, (void *)&mmap_context);
mdp_mmap_fail:
v4l2_subdev_call(&wfd_dev->enc_sdev,
core, ioctl, FREE_INPUT_BUFFER,
(void *)enc_mregion);
set_input_fail:
memset(&mmap_context, 0, sizeof(mmap_context));
mmap_context.ion_client = wfd_dev->ion_client;
mmap_context.mregion = enc_mregion;
v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
ENC_MUNMAP, &mmap_context);
alloc_fail:
kfree(mpair);
kfree(enc_mregion);
kfree(mdp_mregion);
recon_alloc_fail:
return rc;
}
static void wfd_free_input_buffers(struct wfd_device *wfd_dev,
struct wfd_inst *inst)
{
struct list_head *ptr, *next;
struct mem_region_pair *mpair;
int rc = 0;
mutex_lock(&inst->lock);
if (!inst->input_bufs_allocated) {
mutex_unlock(&inst->lock);
return;
}
inst->input_bufs_allocated = false;
mutex_unlock(&inst->lock);
if (!list_empty(&inst->input_mem_list)) {
list_for_each_safe(ptr, next,
&inst->input_mem_list) {
mpair = list_entry(ptr, struct mem_region_pair,
list);
rc = v4l2_subdev_call(&wfd_dev->enc_sdev,
core, ioctl, FREE_INPUT_BUFFER,
(void *)mpair->enc);
if (rc)
WFD_MSG_ERR("Failed to free buffers "
"from encoder\n");
if (mpair->mdp->paddr) {
struct mem_region_map temp = {0};
temp.ion_client = wfd_dev->ion_client;
temp.mregion = mpair->mdp;
temp.cookie = inst->mdp_inst;
v4l2_subdev_call(&wfd_dev->mdp_sdev, core,
ioctl, MDP_MUNMAP,
(void *)&temp);
}
if (mpair->enc->paddr) {
struct mem_region_map temp = {0};
temp.ion_client = wfd_dev->ion_client;
temp.mregion = mpair->enc;
v4l2_subdev_call(&wfd_dev->enc_sdev,
core, ioctl, ENC_MUNMAP, &temp);
}
wfd_free_ion_buffer(wfd_dev->ion_client, mpair->enc);
list_del(&mpair->list);
kfree(mpair->enc);
kfree(mpair->mdp);
kfree(mpair);
}
}
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
FREE_RECON_BUFFERS, NULL);
if (rc)
WFD_MSG_ERR("Failed to free recon buffers\n");
}
static struct mem_info *wfd_get_mem_info(struct wfd_inst *inst,
unsigned long userptr)
{
struct mem_info_entry *temp;
struct mem_info *ret = NULL;
mutex_lock(&inst->lock);
if (!list_empty(&inst->minfo_list)) {
list_for_each_entry(temp, &inst->minfo_list, list) {
if (temp && temp->userptr == userptr) {
ret = &temp->minfo;
break;
}
}
}
mutex_unlock(&inst->lock);
return ret;
}
static void wfd_put_mem_info(struct wfd_inst *inst,
struct mem_info *minfo)
{
struct list_head *ptr, *next;
struct mem_info_entry *temp;
mutex_lock(&inst->lock);
if (!list_empty(&inst->minfo_list)) {
list_for_each_safe(ptr, next,
&inst->minfo_list) {
temp = list_entry(ptr, struct mem_info_entry,
list);
if (temp && (&temp->minfo == minfo)) {
list_del(&temp->list);
kfree(temp);
}
}
}
mutex_unlock(&inst->lock);
}
static void wfd_unregister_out_buf(struct wfd_inst *inst,
struct mem_info *minfo)
{
if (!minfo || !inst) {
WFD_MSG_ERR("Invalid arguments\n");
return;
}
wfd_put_mem_info(inst, minfo);
}
static int wfd_vidbuf_buf_init(struct vb2_buffer *vb)
{
int rc = 0;
struct vb2_queue *q = vb->vb2_queue;
struct file *priv_data = (struct file *)(q->drv_priv);
struct wfd_inst *inst = file_to_inst(priv_data);
struct wfd_device *wfd_dev =
(struct wfd_device *)video_drvdata(priv_data);
struct mem_info *minfo = vb2_plane_cookie(vb, 0);
struct mem_region mregion;
mregion.fd = minfo->fd;
mregion.offset = minfo->offset;
mregion.cookie = (u32)vb;
/*TODO: should be fixed in kernel 3.2*/
mregion.size = inst->out_buf_size;
if (inst && !inst->vid_bufq.streaming) {
rc = wfd_allocate_input_buffers(wfd_dev, inst);
if (rc) {
WFD_MSG_ERR("Failed to allocate input buffers\n");
goto free_input_bufs;
}
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
SET_OUTPUT_BUFFER, (void *)&mregion);
if (rc) {
WFD_MSG_ERR("Failed to set output buffer\n");
goto free_input_bufs;
}
}
return rc;
free_input_bufs:
wfd_free_input_buffers(wfd_dev, inst);
return rc;
}
static int wfd_vidbuf_buf_prepare(struct vb2_buffer *vb)
{
return 0;
}
static int wfd_vidbuf_buf_finish(struct vb2_buffer *vb)
{
return 0;
}
static void wfd_vidbuf_buf_cleanup(struct vb2_buffer *vb)
{
int rc = 0;
struct vb2_queue *q = vb->vb2_queue;
struct file *priv_data = (struct file *)(q->drv_priv);
struct wfd_device *wfd_dev =
(struct wfd_device *)video_drvdata(priv_data);
struct wfd_inst *inst = file_to_inst(priv_data);
struct mem_info *minfo = vb2_plane_cookie(vb, 0);
struct mem_region mregion;
if (minfo == NULL) {
WFD_MSG_DBG("not freeing buffers since allocation failed");
return;
}
mregion.fd = minfo->fd;
mregion.offset = minfo->offset;
mregion.cookie = (u32)vb;
mregion.size = inst->out_buf_size;
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
FREE_OUTPUT_BUFFER, (void *)&mregion);
if (rc)
WFD_MSG_ERR("Failed to free output buffer\n");
wfd_unregister_out_buf(inst, minfo);
}
static int mdp_output_thread(void *data)
{
int rc = 0, no_sig_wait = 0;
struct file *filp = (struct file *)data;
struct wfd_inst *inst = file_to_inst(filp);
struct wfd_device *wfd_dev =
(struct wfd_device *)video_drvdata(filp);
struct mdp_buf_info obuf_mdp = {inst->mdp_inst, 0, 0, 0};
struct mem_region *mregion;
struct vsg_buf_info ibuf_vsg;
while (!kthread_should_stop()) {
if (rc) {
WFD_MSG_DBG("%s() error in output thread\n", __func__);
if (!no_sig_wait) {
wait_for_completion(&inst->stop_mdp_thread);
no_sig_wait = 1;
}
continue;
}
WFD_MSG_DBG("waiting for mdp output\n");
rc = v4l2_subdev_call(&wfd_dev->mdp_sdev,
core, ioctl, MDP_DQ_BUFFER, (void *)&obuf_mdp);
if (rc) {
if (rc != -ENOBUFS)
WFD_MSG_ERR("MDP reported err %d\n", rc);
WFD_MSG_ERR("Streamoff called\n");
continue;
} else {
wfd_stats_update(&inst->stats,
WFD_STAT_EVENT_MDP_DEQUEUE);
}
mregion = obuf_mdp.cookie;
if (!mregion) {
WFD_MSG_ERR("mdp cookie is null\n");
rc = -EINVAL;
continue;
}
ibuf_vsg.mdp_buf_info = obuf_mdp;
ibuf_vsg.mdp_buf_info.inst = inst->mdp_inst;
ibuf_vsg.mdp_buf_info.cookie = mregion;
ibuf_vsg.mdp_buf_info.kvaddr = (u32) mregion->kvaddr;
ibuf_vsg.mdp_buf_info.paddr =
(u32)wfd_enc_addr_to_mdp_addr(inst,
(unsigned long)mregion->paddr);
rc = v4l2_subdev_call(&wfd_dev->vsg_sdev,
core, ioctl, VSG_Q_BUFFER, (void *)&ibuf_vsg);
if (rc) {
WFD_MSG_ERR("Failed to queue frame to vsg\n");
continue;
} else {
wfd_stats_update(&inst->stats,
WFD_STAT_EVENT_VSG_QUEUE);
}
}
WFD_MSG_DBG("Exiting the thread\n");
return rc;
}
static int wfd_vidbuf_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct file *priv_data = (struct file *)(q->drv_priv);
struct wfd_device *wfd_dev =
(struct wfd_device *)video_drvdata(priv_data);
struct wfd_inst *inst = file_to_inst(priv_data);
int rc = 0;
WFD_MSG_ERR("Stream on called\n");
WFD_MSG_DBG("enc start\n");
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
ENCODE_START, (void *)inst->venc_inst);
if (rc) {
WFD_MSG_ERR("Failed to start encoder\n");
goto subdev_start_fail;
}
WFD_MSG_DBG("vsg start\n");
rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl,
VSG_START, NULL);
if (rc) {
WFD_MSG_ERR("Failed to start vsg\n");
goto subdev_start_fail;
}
init_completion(&inst->stop_mdp_thread);
inst->mdp_task = kthread_run(mdp_output_thread, priv_data,
"mdp_output_thread");
if (IS_ERR(inst->mdp_task)) {
rc = PTR_ERR(inst->mdp_task);
goto subdev_start_fail;
}
WFD_MSG_DBG("mdp start\n");
rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
MDP_START, (void *)inst->mdp_inst);
if (rc)
WFD_MSG_ERR("Failed to start MDP\n");
subdev_start_fail:
return rc;
}
static int wfd_vidbuf_stop_streaming(struct vb2_queue *q)
{
struct file *priv_data = (struct file *)(q->drv_priv);
struct wfd_device *wfd_dev =
(struct wfd_device *)video_drvdata(priv_data);
struct wfd_inst *inst = file_to_inst(priv_data);
int rc = 0;
WFD_MSG_DBG("mdp stop\n");
rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
MDP_STOP, (void *)inst->mdp_inst);
if (rc)
WFD_MSG_ERR("Failed to stop MDP\n");
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
ENCODE_FLUSH, (void *)inst->venc_inst);
if (rc)
WFD_MSG_ERR("Failed to flush encoder\n");
WFD_MSG_DBG("vsg stop\n");
rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl,
VSG_STOP, NULL);
if (rc)
WFD_MSG_ERR("Failed to stop VSG\n");
complete(&inst->stop_mdp_thread);
kthread_stop(inst->mdp_task);
WFD_MSG_DBG("enc stop\n");
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
ENCODE_STOP, (void *)inst->venc_inst);
if (rc)
WFD_MSG_ERR("Failed to stop encoder\n");
return rc;
}
static void wfd_vidbuf_buf_queue(struct vb2_buffer *vb)
{
int rc = 0;
struct vb2_queue *q = vb->vb2_queue;
struct file *priv_data = (struct file *)(q->drv_priv);
struct wfd_device *wfd_dev =
(struct wfd_device *)video_drvdata(priv_data);
struct wfd_inst *inst = file_to_inst(priv_data);
struct mem_region mregion;
struct mem_info *minfo = vb2_plane_cookie(vb, 0);
mregion.fd = minfo->fd;
mregion.offset = minfo->offset;
mregion.cookie = (u32)vb;
mregion.size = inst->out_buf_size;
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
FILL_OUTPUT_BUFFER, (void *)&mregion);
if (rc) {
WFD_MSG_ERR("Failed to fill output buffer\n");
}
}
static struct vb2_ops wfd_vidbuf_ops = {
.queue_setup = wfd_vidbuf_queue_setup,
.wait_prepare = wfd_vidbuf_wait_prepare,
.wait_finish = wfd_vidbuf_wait_finish,
.buf_init = wfd_vidbuf_buf_init,
.buf_prepare = wfd_vidbuf_buf_prepare,
.buf_finish = wfd_vidbuf_buf_finish,
.buf_cleanup = wfd_vidbuf_buf_cleanup,
.start_streaming = wfd_vidbuf_start_streaming,
.stop_streaming = wfd_vidbuf_stop_streaming,
.buf_queue = wfd_vidbuf_buf_queue,
};
static const struct v4l2_subdev_core_ops mdp_subdev_core_ops = {
.init = mdp_init,
.ioctl = mdp_ioctl,
};
static const struct v4l2_subdev_ops mdp_subdev_ops = {
.core = &mdp_subdev_core_ops,
};
static const struct v4l2_subdev_core_ops enc_subdev_core_ops = {
.init = venc_init,
.load_fw = venc_load_fw,
.ioctl = venc_ioctl,
};
static const struct v4l2_subdev_ops enc_subdev_ops = {
.core = &enc_subdev_core_ops,
};
static const struct v4l2_subdev_core_ops vsg_subdev_core_ops = {
.init = vsg_init,
.ioctl = vsg_ioctl,
};
static const struct v4l2_subdev_ops vsg_subdev_ops = {
.core = &vsg_subdev_core_ops,
};
static int wfdioc_querycap(struct file *filp, void *fh,
struct v4l2_capability *cap) {
WFD_MSG_DBG("wfdioc_querycap: E\n");
memset(cap, 0, sizeof(struct v4l2_capability));
strlcpy(cap->driver, "wifi-display", sizeof(cap->driver));
strlcpy(cap->card, "msm", sizeof(cap->card));
cap->version = WFD_VERSION;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
WFD_MSG_DBG("wfdioc_querycap: X\n");
return 0;
}
static int wfdioc_g_fmt(struct file *filp, void *fh,
struct v4l2_format *fmt)
{
struct wfd_inst *inst = file_to_inst(filp);
if (!fmt) {
WFD_MSG_ERR("Invalid argument\n");
return -EINVAL;
}
if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
WFD_MSG_ERR("Only V4L2_BUF_TYPE_VIDEO_CAPTURE is supported\n");
return -EINVAL;
}
mutex_lock(&inst->lock);
fmt->fmt.pix.width = inst->width;
fmt->fmt.pix.height = inst->height;
fmt->fmt.pix.pixelformat = inst->pixelformat;
fmt->fmt.pix.sizeimage = inst->out_buf_size;
fmt->fmt.pix.priv = 0;
mutex_unlock(&inst->lock);
return 0;
}
static int wfdioc_s_fmt(struct file *filp, void *fh,
struct v4l2_format *fmt)
{
int rc = 0;
struct wfd_inst *inst = file_to_inst(filp);
struct wfd_device *wfd_dev = video_drvdata(filp);
struct mdp_prop prop;
struct bufreq breq;
if (!fmt) {
WFD_MSG_ERR("Invalid argument\n");
return -EINVAL;
}
if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_H264) {
WFD_MSG_ERR("Only V4L2_BUF_TYPE_VIDEO_CAPTURE and "
"V4L2_PIX_FMT_H264 are supported\n");
return -EINVAL;
}
if (fmt->fmt.pix.width % 16) {
WFD_MSG_ERR("Only 16 byte aligned widths are supported\n");
return -ENOTSUPP;
}
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, SET_FORMAT,
(void *)fmt);
if (rc) {
WFD_MSG_ERR("Failed to set format on encoder, rc = %d\n", rc);
return rc;
}
breq.count = VENC_INPUT_BUFFERS;
breq.height = fmt->fmt.pix.height;
breq.width = fmt->fmt.pix.width;
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
SET_BUFFER_REQ, (void *)&breq);
if (rc) {
WFD_MSG_ERR("Failed to set buffer reqs on encoder\n");
return rc;
}
mutex_lock(&inst->lock);
inst->input_buf_size = breq.size;
inst->out_buf_size = fmt->fmt.pix.sizeimage;
prop.height = inst->height = fmt->fmt.pix.height;
prop.width = inst->width = fmt->fmt.pix.width;
prop.inst = inst->mdp_inst;
mutex_unlock(&inst->lock);
rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_SET_PROP,
(void *)&prop);
if (rc)
WFD_MSG_ERR("Failed to set height/width property on mdp\n");
return rc;
}
static int wfdioc_reqbufs(struct file *filp, void *fh,
struct v4l2_requestbuffers *b)
{
struct wfd_inst *inst = file_to_inst(filp);
struct wfd_device *wfd_dev = video_drvdata(filp);
int rc = 0;
if (b->type != V4L2_CAP_VIDEO_CAPTURE ||
b->memory != V4L2_MEMORY_USERPTR) {
WFD_MSG_ERR("Only V4L2_CAP_VIDEO_CAPTURE and "
"V4L2_MEMORY_USERPTR are supported\n");
return -EINVAL;
}
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
GET_BUFFER_REQ, (void *)b);
if (rc) {
WFD_MSG_ERR("Failed to get buf reqs from encoder\n");
return rc;
}
mutex_lock(&inst->lock);
inst->buf_count = b->count;
mutex_unlock(&inst->lock);
rc = vb2_reqbufs(&inst->vid_bufq, b);
return rc;
}
static int wfd_register_out_buf(struct wfd_inst *inst,
struct v4l2_buffer *b)
{
struct mem_info_entry *minfo_entry;
struct mem_info *minfo;
if (!b || !inst || !b->reserved) {
WFD_MSG_ERR("Invalid arguments\n");
return -EINVAL;
}
minfo = wfd_get_mem_info(inst, b->m.userptr);
if (!minfo) {
minfo_entry = kzalloc(sizeof(struct mem_info_entry),
GFP_KERNEL);
if (copy_from_user(&minfo_entry->minfo, (void *)b->reserved,
sizeof(struct mem_info))) {
WFD_MSG_ERR(" copy_from_user failed. Populate"
" v4l2_buffer->reserved with meminfo\n");
return -EINVAL;
}
minfo_entry->userptr = b->m.userptr;
mutex_lock(&inst->lock);
list_add_tail(&minfo_entry->list, &inst->minfo_list);
mutex_unlock(&inst->lock);
} else
WFD_MSG_DBG("Buffer already registered\n");
return 0;
}
static int wfdioc_qbuf(struct file *filp, void *fh,
struct v4l2_buffer *b)
{
int rc = 0;
struct wfd_inst *inst = file_to_inst(filp);
if (!inst || !b ||
(b->index < 0 || b->index >= inst->buf_count)) {
WFD_MSG_ERR("Invalid input parameters to QBUF IOCTL\n");
return -EINVAL;
}
rc = wfd_register_out_buf(inst, b);
if (rc) {
WFD_MSG_ERR("Failed to register buffer\n");
return rc;
}
mutex_lock(&inst->vb2_lock);
rc = vb2_qbuf(&inst->vid_bufq, b);
mutex_unlock(&inst->vb2_lock);
if (rc)
WFD_MSG_ERR("Failed to queue buffer\n");
else
wfd_stats_update(&inst->stats, WFD_STAT_EVENT_CLIENT_QUEUE);
return rc;
}
static int wfdioc_streamon(struct file *filp, void *fh,
enum v4l2_buf_type i)
{
int rc = 0;
struct wfd_inst *inst = file_to_inst(filp);
if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
WFD_MSG_ERR("stream on for buffer type = %d is not "
"supported.\n", i);
return -EINVAL;
}
mutex_lock(&inst->lock);
inst->streamoff = false;
mutex_unlock(&inst->lock);
rc = vb2_streamon(&inst->vid_bufq, i);
if (rc) {
WFD_MSG_ERR("videobuf_streamon failed with err = %d\n", rc);
goto vidbuf_streamon_failed;
}
return rc;
vidbuf_streamon_failed:
vb2_streamoff(&inst->vid_bufq, i);
return rc;
}
static int wfdioc_streamoff(struct file *filp, void *fh,
enum v4l2_buf_type i)
{
struct wfd_inst *inst = file_to_inst(filp);
if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
WFD_MSG_ERR("stream off for buffer type = %d is not "
"supported.\n", i);
return -EINVAL;
}
mutex_lock(&inst->lock);
if (inst->streamoff) {
WFD_MSG_ERR("Module is already in streamoff state\n");
mutex_unlock(&inst->lock);
return -EINVAL;
}
inst->streamoff = true;
mutex_unlock(&inst->lock);
WFD_MSG_DBG("Calling videobuf_streamoff\n");
vb2_streamoff(&inst->vid_bufq, i);
wake_up(&inst->event_handler.wait);
return 0;
}
static int wfdioc_dqbuf(struct file *filp, void *fh,
struct v4l2_buffer *b)
{
struct wfd_inst *inst = file_to_inst(filp);
int rc;
WFD_MSG_DBG("Waiting to dequeue buffer\n");
mutex_lock(&inst->vb2_lock);
rc = vb2_dqbuf(&inst->vid_bufq, b, false);
mutex_unlock(&inst->vb2_lock);
if (rc)
WFD_MSG_ERR("Failed to dequeue buffer\n");
else
wfd_stats_update(&inst->stats, WFD_STAT_EVENT_CLIENT_DEQUEUE);
return rc;
}
static int wfdioc_g_ctrl(struct file *filp, void *fh,
struct v4l2_control *a)
{
int rc = 0;
struct wfd_device *wfd_dev = video_drvdata(filp);
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
ioctl, GET_PROP, a);
if (rc)
WFD_MSG_ERR("Failed to get encoder property\n");
return rc;
}
static int wfdioc_s_ctrl(struct file *filp, void *fh,
struct v4l2_control *a)
{
int rc = 0;
struct wfd_device *wfd_dev = video_drvdata(filp);
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
ioctl, SET_PROP, a);
if (rc)
WFD_MSG_ERR("Failed to set encoder property\n");
return rc;
}
static int wfdioc_g_parm(struct file *filp, void *fh,
struct v4l2_streamparm *a)
{
int rc = 0;
struct wfd_device *wfd_dev = video_drvdata(filp);
struct wfd_inst *inst = file_to_inst(filp);
int64_t frame_interval = 0,
max_frame_interval = 0; /* both in nsecs*/
struct v4l2_qcom_frameskip frameskip, *usr_frameskip;
usr_frameskip = (struct v4l2_qcom_frameskip *)
a->parm.capture.extendedmode;
if (!usr_frameskip) {
rc = -EINVAL;
goto get_parm_fail;
}
rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
ioctl, VSG_GET_FRAME_INTERVAL, &frame_interval);
if (rc < 0)
goto get_parm_fail;
rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
ioctl, VSG_GET_MAX_FRAME_INTERVAL, &max_frame_interval);
if (rc < 0)
goto get_parm_fail;
frameskip = (struct v4l2_qcom_frameskip) {
.maxframeinterval = max_frame_interval,
};
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
a->parm.capture = (struct v4l2_captureparm) {
.capability = V4L2_CAP_TIMEPERFRAME,
.capturemode = 0,
.timeperframe = (struct v4l2_fract) {
.numerator = frame_interval,
.denominator = NSEC_PER_SEC,
},
.readbuffers = inst->buf_count,
.extendedmode = (__u32)usr_frameskip,
.reserved = {0}
};
rc = copy_to_user((void *)a->parm.capture.extendedmode,
&frameskip, sizeof(frameskip));
if (rc < 0)
goto get_parm_fail;
get_parm_fail:
return rc;
}
static int wfdioc_s_parm(struct file *filp, void *fh,
struct v4l2_streamparm *a)
{
int rc = 0;
struct wfd_device *wfd_dev = video_drvdata(filp);
struct wfd_inst *inst = file_to_inst(filp);
struct v4l2_qcom_frameskip frameskip;
int64_t frame_interval = 0,
max_frame_interval = 0,
frame_interval_variance = 0;
void *extendedmode = NULL;
enum vsg_modes vsg_mode = VSG_MODE_VFR;
enum venc_framerate_modes venc_mode = VENC_MODE_VFR;
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
rc = -ENOTSUPP;
goto set_parm_fail;
}
if (a->parm.capture.readbuffers == 0 ||
a->parm.capture.readbuffers == inst->buf_count) {
a->parm.capture.readbuffers = inst->buf_count;
} else {
rc = -EINVAL;
goto set_parm_fail;
}
extendedmode = (void *)a->parm.capture.extendedmode;
if (a->parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
if (a->parm.capture.timeperframe.denominator == 0) {
rc = -EINVAL;
goto set_parm_fail;
}
frame_interval =
a->parm.capture.timeperframe.numerator * NSEC_PER_SEC /
a->parm.capture.timeperframe.denominator;
rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
ioctl, VSG_SET_FRAME_INTERVAL,
&frame_interval);
if (rc)
goto set_parm_fail;
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
ioctl, SET_FRAMERATE,
&a->parm.capture.timeperframe);
if (rc)
goto set_parm_fail;
}
if (a->parm.capture.capability & V4L2_CAP_QCOM_FRAMESKIP &&
extendedmode) {
rc = copy_from_user(&frameskip,
extendedmode, sizeof(frameskip));
if (rc)
goto set_parm_fail;
max_frame_interval = (int64_t)frameskip.maxframeinterval;
frame_interval_variance = frameskip.fpsvariance;
vsg_mode = VSG_MODE_VFR;
venc_mode = VENC_MODE_VFR;
rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
ioctl, VSG_SET_MAX_FRAME_INTERVAL,
&max_frame_interval);
if (rc)
goto set_parm_fail;
} else {
vsg_mode = VSG_MODE_CFR;
venc_mode = VENC_MODE_CFR;
}
rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
ioctl, VSG_SET_MODE, &vsg_mode);
if (rc) {
WFD_MSG_ERR("Setting FR mode for VSG failed\n");
goto set_parm_fail;
}
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
ioctl, SET_FRAMERATE_MODE,
&venc_mode);
if (rc) {
WFD_MSG_ERR("Setting FR mode for VENC failed\n");
goto set_parm_fail;
}
if (frame_interval_variance) {
rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
ioctl, VSG_SET_FRAME_INTERVAL_VARIANCE,
&frame_interval_variance);
if (rc) {
WFD_MSG_ERR("Setting FR variance for VSG failed\n");
goto set_parm_fail;
}
}
set_parm_fail:
return rc;
}
static int wfdioc_subscribe_event(struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
struct wfd_inst *inst = container_of(fh, struct wfd_inst,
event_handler);
return v4l2_event_subscribe(&inst->event_handler, sub, MAX_EVENTS);
}
static int wfdioc_unsubscribe_event(struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
struct wfd_inst *inst = container_of(fh, struct wfd_inst,
event_handler);
return v4l2_event_unsubscribe(&inst->event_handler, sub);
}
static const struct v4l2_ioctl_ops g_wfd_ioctl_ops = {
.vidioc_querycap = wfdioc_querycap,
.vidioc_s_fmt_vid_cap = wfdioc_s_fmt,
.vidioc_g_fmt_vid_cap = wfdioc_g_fmt,
.vidioc_reqbufs = wfdioc_reqbufs,
.vidioc_qbuf = wfdioc_qbuf,
.vidioc_streamon = wfdioc_streamon,
.vidioc_streamoff = wfdioc_streamoff,
.vidioc_dqbuf = wfdioc_dqbuf,
.vidioc_g_ctrl = wfdioc_g_ctrl,
.vidioc_s_ctrl = wfdioc_s_ctrl,
.vidioc_g_parm = wfdioc_g_parm,
.vidioc_s_parm = wfdioc_s_parm,
.vidioc_subscribe_event = wfdioc_subscribe_event,
.vidioc_unsubscribe_event = wfdioc_unsubscribe_event,
};
static int wfd_set_default_properties(struct file *filp)
{
struct v4l2_format fmt;
struct v4l2_control ctrl;
struct wfd_inst *inst = file_to_inst(filp);
if (!inst) {
WFD_MSG_ERR("Invalid argument\n");
return -EINVAL;
}
mutex_lock(&inst->lock);
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmt.fmt.pix.height = inst->height = DEFAULT_WFD_HEIGHT;
fmt.fmt.pix.width = inst->width = DEFAULT_WFD_WIDTH;
fmt.fmt.pix.pixelformat = inst->pixelformat
= V4L2_PIX_FMT_H264;
mutex_unlock(&inst->lock);
wfdioc_s_fmt(filp, filp->private_data, &fmt);
ctrl.id = V4L2_CID_MPEG_VIDEO_HEADER_MODE;
ctrl.value = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_I_FRAME;
wfdioc_s_ctrl(filp, filp->private_data, &ctrl);
return 0;
}
static void venc_op_buffer_done(void *cookie, u32 status,
struct vb2_buffer *buf)
{
struct file *filp = cookie;
struct wfd_inst *inst = file_to_inst(filp);
WFD_MSG_DBG("yay!! got callback\n");
mutex_lock(&inst->vb2_lock);
vb2_buffer_done(buf, status ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
mutex_unlock(&inst->vb2_lock);
}
static void venc_ip_buffer_done(void *cookie, u32 status,
struct mem_region *mregion)
{
struct file *filp = cookie;
struct wfd_inst *inst = file_to_inst(filp);
struct vsg_buf_info buf;
struct mdp_buf_info mdp_buf = {0};
struct wfd_device *wfd_dev =
(struct wfd_device *)video_drvdata(filp);
int rc = 0;
WFD_MSG_DBG("yay!! got ip callback\n");
mdp_buf.inst = inst->mdp_inst;
mdp_buf.cookie = mregion;
mdp_buf.kvaddr = (u32) mregion->kvaddr;
mdp_buf.paddr =
(u32)wfd_enc_addr_to_mdp_addr(inst,
(unsigned long)mregion->paddr);
buf.mdp_buf_info = mdp_buf;
rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
ioctl, VSG_RETURN_IP_BUFFER, (void *)&buf);
if (rc)
WFD_MSG_ERR("Failed to return buffer to vsg\n");
else
wfd_stats_update(&inst->stats, WFD_STAT_EVENT_ENC_DEQUEUE);
}
static void venc_on_event(void *cookie, enum venc_event e)
{
struct file *filp = cookie;
struct wfd_inst *inst = file_to_inst(filp);
struct v4l2_event event;
int type = 0;
switch (e) {
case VENC_EVENT_HARDWARE_ERROR:
type = V4L2_EVENT_MSM_VIDC_SYS_ERROR;
break;
default:
/* Whatever~~ */
break;
}
if (type) {
event.id = 0;
event.type = type;
v4l2_event_queue_fh(&inst->event_handler, &event);
}
}
static int vsg_release_input_frame(void *cookie, struct vsg_buf_info *buf)
{
struct file *filp = cookie;
struct wfd_inst *inst = file_to_inst(filp);
struct wfd_device *wfd_dev =
(struct wfd_device *)video_drvdata(filp);
int rc = 0;
rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core,
ioctl, MDP_Q_BUFFER, buf);
if (rc)
WFD_MSG_ERR("Failed to Q buffer to mdp\n");
else {
wfd_stats_update(&inst->stats, WFD_STAT_EVENT_MDP_QUEUE);
wfd_stats_update(&inst->stats, WFD_STAT_EVENT_VSG_DEQUEUE);
}
return rc;
}
static int vsg_encode_frame(void *cookie, struct vsg_buf_info *buf)
{
struct file *filp = cookie;
struct wfd_inst *inst = file_to_inst(filp);
struct wfd_device *wfd_dev =
(struct wfd_device *)video_drvdata(filp);
struct venc_buf_info venc_buf;
int rc = 0;
if (!buf)
return -EINVAL;
venc_buf = (struct venc_buf_info){
.timestamp = timespec_to_ns(&buf->time),
.mregion = buf->mdp_buf_info.cookie
};
wfd_flush_ion_buffer(wfd_dev->ion_client, venc_buf.mregion);
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
ENCODE_FRAME, &venc_buf);
if (rc)
WFD_MSG_ERR("Encode failed\n");
else
wfd_stats_update(&inst->stats, WFD_STAT_EVENT_ENC_QUEUE);
return rc;
}
void *wfd_vb2_mem_ops_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write)
{
return wfd_get_mem_info(alloc_ctx, vaddr);
}
void wfd_vb2_mem_ops_put_userptr(void *buf_priv)
{
/*TODO: Free the list*/
}
void *wfd_vb2_mem_ops_cookie(void *buf_priv)
{
return buf_priv;
}
static struct vb2_mem_ops wfd_vb2_mem_ops = {
.get_userptr = wfd_vb2_mem_ops_get_userptr,
.put_userptr = wfd_vb2_mem_ops_put_userptr,
.cookie = wfd_vb2_mem_ops_cookie,
};
int wfd_initialize_vb2_queue(struct vb2_queue *q, void *priv)
{
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_USERPTR;
q->ops = &wfd_vidbuf_ops;
q->mem_ops = &wfd_vb2_mem_ops;
q->drv_priv = priv;
return vb2_queue_init(q);
}
static int wfd_open(struct file *filp)
{
int rc = 0;
struct wfd_inst *inst = NULL;
struct wfd_device *wfd_dev = NULL;
struct venc_msg_ops enc_mops;
struct mdp_msg_ops mdp_mops;
struct vsg_msg_ops vsg_mops;
WFD_MSG_DBG("wfd_open: E\n");
wfd_dev = video_drvdata(filp);
if (!wfd_dev) {
rc = -EINVAL;
goto err_dev_busy;
}
mutex_lock(&wfd_dev->dev_lock);
if (wfd_dev->in_use) {
WFD_MSG_ERR("Device already in use.\n");
rc = -EBUSY;
mutex_unlock(&wfd_dev->dev_lock);
goto err_dev_busy;
}
wfd_dev->in_use = true;
mutex_unlock(&wfd_dev->dev_lock);
inst = kzalloc(sizeof(struct wfd_inst), GFP_KERNEL);
if (!inst) {
WFD_MSG_ERR("Could not allocate memory for "
"wfd instance\n");
rc = -ENOMEM;
goto err_mdp_open;
}
filp->private_data = &inst->event_handler;
mutex_init(&inst->lock);
mutex_init(&inst->vb2_lock);
INIT_LIST_HEAD(&inst->input_mem_list);
INIT_LIST_HEAD(&inst->minfo_list);
/* Set up userspace event handlers */
v4l2_fh_init(&inst->event_handler, wfd_dev->pvdev);
v4l2_fh_add(&inst->event_handler);
wfd_stats_init(&inst->stats, MINOR(filp->f_dentry->d_inode->i_rdev));
mdp_mops.secure = wfd_dev->secure;
mdp_mops.iommu_split_domain = wfd_dev->mdp_iommu_split_domain;
rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_OPEN,
(void *)&mdp_mops);
if (rc) {
WFD_MSG_ERR("Failed to open mdp subdevice: %d\n", rc);
goto err_mdp_open;
}
inst->mdp_inst = mdp_mops.cookie;
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, load_fw);
if (rc) {
WFD_MSG_ERR("Failed to load video encoder firmware: %d\n", rc);
goto err_venc;
}
enc_mops.op_buffer_done = venc_op_buffer_done;
enc_mops.ip_buffer_done = venc_ip_buffer_done;
enc_mops.on_event = venc_on_event;
enc_mops.cbdata = filp;
enc_mops.secure = wfd_dev->secure;
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, OPEN,
(void *)&enc_mops);
if (rc || !enc_mops.cookie) {
WFD_MSG_ERR("Failed to open encoder subdevice: %d\n", rc);
goto err_venc;
}
inst->venc_inst = enc_mops.cookie;
vsg_mops.encode_frame = vsg_encode_frame;
vsg_mops.release_input_frame = vsg_release_input_frame;
vsg_mops.cbdata = filp;
rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl, VSG_OPEN,
&vsg_mops);
if (rc) {
WFD_MSG_ERR("Failed to open vsg subdevice: %d\n", rc);
goto err_vsg_open;
}
wfd_initialize_vb2_queue(&inst->vid_bufq, filp);
wfd_set_default_properties(filp);
WFD_MSG_DBG("wfd_open: X\n");
return rc;
err_vsg_open:
v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, CLOSE, NULL);
err_venc:
v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
MDP_CLOSE, (void *)inst->mdp_inst);
err_mdp_open:
v4l2_fh_del(&inst->event_handler);
mutex_lock(&wfd_dev->dev_lock);
wfd_dev->in_use = false;
mutex_unlock(&wfd_dev->dev_lock);
kfree(inst);
err_dev_busy:
return rc;
}
static int wfd_close(struct file *filp)
{
struct wfd_inst *inst;
struct wfd_device *wfd_dev;
int rc = 0;
wfd_dev = video_drvdata(filp);
WFD_MSG_DBG("wfd_close: E\n");
inst = file_to_inst(filp);
if (inst) {
wfdioc_streamoff(filp, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE);
vb2_queue_release(&inst->vid_bufq);
wfd_free_input_buffers(wfd_dev, inst);
rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
MDP_CLOSE, (void *)inst->mdp_inst);
if (rc)
WFD_MSG_ERR("Failed to CLOSE mdp subdevice: %d\n", rc);
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
CLOSE, (void *)inst->venc_inst);
if (rc)
WFD_MSG_ERR("Failed to CLOSE enc subdev: %d\n", rc);
rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl,
VSG_CLOSE, NULL);
if (rc)
WFD_MSG_ERR("Failed to CLOSE vsg subdev: %d\n", rc);
wfd_stats_deinit(&inst->stats);
v4l2_fh_del(&inst->event_handler);
mutex_destroy(&inst->lock);
mutex_destroy(&inst->vb2_lock);
kfree(inst);
}
mutex_lock(&wfd_dev->dev_lock);
wfd_dev->in_use = false;
mutex_unlock(&wfd_dev->dev_lock);
WFD_MSG_DBG("wfd_close: X\n");
return 0;
}
unsigned int wfd_poll(struct file *filp, struct poll_table_struct *pt)
{
struct wfd_inst *inst = file_to_inst(filp);
unsigned long flags = 0;
bool streamoff = false;
poll_wait(filp, &inst->event_handler.wait, pt);
mutex_lock(&inst->lock);
streamoff = inst->streamoff;
mutex_unlock(&inst->lock);
if (v4l2_event_pending(&inst->event_handler))
flags |= POLLPRI;
if (streamoff)
flags |= POLLERR;
return flags;
}
static const struct v4l2_file_operations g_wfd_fops = {
.owner = THIS_MODULE,
.open = wfd_open,
.release = wfd_close,
.ioctl = video_ioctl2,
.poll = wfd_poll,
};
void release_video_device(struct video_device *pvdev)
{
}
static int wfd_dev_setup(struct wfd_device *wfd_dev, int dev_num,
struct platform_device *pdev)
{
int rc = 0;
rc = v4l2_device_register(&pdev->dev, &wfd_dev->v4l2_dev);
if (rc) {
WFD_MSG_ERR("Failed to register the video device\n");
goto err_v4l2_registration;
}
wfd_dev->pvdev = video_device_alloc();
if (!wfd_dev->pvdev) {
WFD_MSG_ERR("Failed to allocate video device\n");
goto err_video_device_alloc;
}
wfd_dev->pvdev->release = release_video_device;
wfd_dev->pvdev->fops = &g_wfd_fops;
wfd_dev->pvdev->ioctl_ops = &g_wfd_ioctl_ops;
rc = video_register_device(wfd_dev->pvdev, VFL_TYPE_GRABBER,
dev_num);
if (rc) {
WFD_MSG_ERR("Failed to register the device\n");
goto err_video_register_device;
}
video_set_drvdata(wfd_dev->pvdev, wfd_dev);
v4l2_subdev_init(&wfd_dev->mdp_sdev, &mdp_subdev_ops);
strncpy(wfd_dev->mdp_sdev.name, "wfd-mdp", V4L2_SUBDEV_NAME_SIZE);
rc = v4l2_device_register_subdev(&wfd_dev->v4l2_dev,
&wfd_dev->mdp_sdev);
if (rc) {
WFD_MSG_ERR("Failed to register mdp subdevice: %d\n", rc);
goto err_mdp_register_subdev;
}
v4l2_subdev_init(&wfd_dev->enc_sdev, &enc_subdev_ops);
strncpy(wfd_dev->enc_sdev.name, "wfd-venc", V4L2_SUBDEV_NAME_SIZE);
rc = v4l2_device_register_subdev(&wfd_dev->v4l2_dev,
&wfd_dev->enc_sdev);
if (rc) {
WFD_MSG_ERR("Failed to register encoder subdevice: %d\n", rc);
goto err_venc_register_subdev;
}
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, init, 0);
if (rc) {
WFD_MSG_ERR("Failed to initiate encoder device %d\n", rc);
goto err_venc_init;
}
v4l2_subdev_init(&wfd_dev->vsg_sdev, &vsg_subdev_ops);
strncpy(wfd_dev->vsg_sdev.name, "wfd-vsg", V4L2_SUBDEV_NAME_SIZE);
rc = v4l2_device_register_subdev(&wfd_dev->v4l2_dev,
&wfd_dev->vsg_sdev);
if (rc) {
WFD_MSG_ERR("Failed to register vsg subdevice: %d\n", rc);
goto err_venc_init;
}
WFD_MSG_DBG("__wfd_probe: X\n");
return rc;
err_venc_init:
v4l2_device_unregister_subdev(&wfd_dev->enc_sdev);
err_venc_register_subdev:
v4l2_device_unregister_subdev(&wfd_dev->mdp_sdev);
err_mdp_register_subdev:
video_unregister_device(wfd_dev->pvdev);
err_video_register_device:
video_device_release(wfd_dev->pvdev);
err_video_device_alloc:
v4l2_device_unregister(&wfd_dev->v4l2_dev);
err_v4l2_registration:
return rc;
}
static int __devinit __wfd_probe(struct platform_device *pdev)
{
int rc = 0, c = 0;
struct wfd_device *wfd_dev; /* Should be taken as an array*/
struct ion_client *ion_client = NULL;
struct msm_wfd_platform_data *wfd_priv;
WFD_MSG_DBG("__wfd_probe: E\n");
wfd_dev = kzalloc(sizeof(*wfd_dev)*WFD_NUM_DEVICES, GFP_KERNEL);
if (!wfd_dev) {
WFD_MSG_ERR("Could not allocate memory for "
"wfd device\n");
rc = -ENOMEM;
goto err_v4l2_probe;
}
wfd_priv = pdev->dev.platform_data;
pdev->dev.platform_data = (void *) wfd_dev;
ion_client = msm_ion_client_create(-1, "wfd");
rc = wfd_stats_setup();
if (rc) {
WFD_MSG_ERR("No debugfs support: %d\n", rc);
/* Don't treat this as a fatal err */
rc = 0;
}
if (!ion_client) {
WFD_MSG_ERR("Failed to create ion client\n");
rc = -ENODEV;
goto err_v4l2_probe;
}
for (c = 0; c < WFD_NUM_DEVICES; ++c) {
rc = wfd_dev_setup(&wfd_dev[c],
WFD_DEVICE_NUMBER_BASE + c, pdev);
if (rc) {
/* Clear out old devices */
for (--c; c >= 0; --c) {
v4l2_device_unregister_subdev(
&wfd_dev[c].vsg_sdev);
v4l2_device_unregister_subdev(
&wfd_dev[c].enc_sdev);
v4l2_device_unregister_subdev(
&wfd_dev[c].mdp_sdev);
video_unregister_device(wfd_dev[c].pvdev);
video_device_release(wfd_dev[c].pvdev);
v4l2_device_unregister(&wfd_dev[c].v4l2_dev);
}
goto err_v4l2_probe;
}
/* Other device specific stuff */
mutex_init(&wfd_dev[c].dev_lock);
wfd_dev[c].ion_client = ion_client;
wfd_dev[c].in_use = false;
if (wfd_priv && wfd_priv->wfd_check_mdp_iommu_split) {
wfd_dev[c].mdp_iommu_split_domain =
wfd_priv->wfd_check_mdp_iommu_split();
}
switch (WFD_DEVICE_NUMBER_BASE + c) {
case WFD_DEVICE_SECURE:
wfd_dev[c].secure = true;
break;
default:
break;
}
}
WFD_MSG_DBG("__wfd_probe: X\n");
return rc;
err_v4l2_probe:
kfree(wfd_dev);
return rc;
}
static int __devexit __wfd_remove(struct platform_device *pdev)
{
struct wfd_device *wfd_dev;
int c = 0;
wfd_dev = (struct wfd_device *)pdev->dev.platform_data;
WFD_MSG_DBG("Inside wfd_remove\n");
if (!wfd_dev) {
WFD_MSG_ERR("Error removing WFD device");
return -ENODEV;
}
wfd_stats_teardown();
for (c = 0; c < WFD_NUM_DEVICES; ++c) {
v4l2_device_unregister_subdev(&wfd_dev[c].vsg_sdev);
v4l2_device_unregister_subdev(&wfd_dev[c].enc_sdev);
v4l2_device_unregister_subdev(&wfd_dev[c].mdp_sdev);
video_unregister_device(wfd_dev[c].pvdev);
video_device_release(wfd_dev[c].pvdev);
v4l2_device_unregister(&wfd_dev[c].v4l2_dev);
}
kfree(wfd_dev);
return 0;
}
static const struct of_device_id msm_wfd_dt_match[] = {
{.compatible = "qcom,msm-wfd"},
{}
};
MODULE_DEVICE_TABLE(of, msm_vidc_dt_match);
static struct platform_driver wfd_driver = {
.probe = __wfd_probe,
.remove = __wfd_remove,
.driver = {
.name = "msm_wfd",
.owner = THIS_MODULE,
.of_match_table = msm_wfd_dt_match,
}
};
static int __init wfd_init(void)
{
int rc = 0;
WFD_MSG_DBG("Calling init function of wfd driver\n");
rc = platform_driver_register(&wfd_driver);
if (rc) {
WFD_MSG_ERR("failed to load the driver\n");
goto err_platform_registration;
}
err_platform_registration:
return rc;
}
static void __exit wfd_exit(void)
{
WFD_MSG_DBG("wfd_exit: X\n");
platform_driver_unregister(&wfd_driver);
}
module_init(wfd_init);
module_exit(wfd_exit);
| gpl-2.0 |
notro/linux-staging | drivers/video/fbdev/jz4740_fb.c | 2165 | 18999 | /*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
* JZ4740 SoC LCD framebuffer driver
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/console.h>
#include <linux/fb.h>
#include <linux/dma-mapping.h>
#include <asm/mach-jz4740/jz4740_fb.h>
#include <asm/mach-jz4740/gpio.h>
#define JZ_REG_LCD_CFG 0x00
#define JZ_REG_LCD_VSYNC 0x04
#define JZ_REG_LCD_HSYNC 0x08
#define JZ_REG_LCD_VAT 0x0C
#define JZ_REG_LCD_DAH 0x10
#define JZ_REG_LCD_DAV 0x14
#define JZ_REG_LCD_PS 0x18
#define JZ_REG_LCD_CLS 0x1C
#define JZ_REG_LCD_SPL 0x20
#define JZ_REG_LCD_REV 0x24
#define JZ_REG_LCD_CTRL 0x30
#define JZ_REG_LCD_STATE 0x34
#define JZ_REG_LCD_IID 0x38
#define JZ_REG_LCD_DA0 0x40
#define JZ_REG_LCD_SA0 0x44
#define JZ_REG_LCD_FID0 0x48
#define JZ_REG_LCD_CMD0 0x4C
#define JZ_REG_LCD_DA1 0x50
#define JZ_REG_LCD_SA1 0x54
#define JZ_REG_LCD_FID1 0x58
#define JZ_REG_LCD_CMD1 0x5C
#define JZ_LCD_CFG_SLCD BIT(31)
#define JZ_LCD_CFG_PS_DISABLE BIT(23)
#define JZ_LCD_CFG_CLS_DISABLE BIT(22)
#define JZ_LCD_CFG_SPL_DISABLE BIT(21)
#define JZ_LCD_CFG_REV_DISABLE BIT(20)
#define JZ_LCD_CFG_HSYNCM BIT(19)
#define JZ_LCD_CFG_PCLKM BIT(18)
#define JZ_LCD_CFG_INV BIT(17)
#define JZ_LCD_CFG_SYNC_DIR BIT(16)
#define JZ_LCD_CFG_PS_POLARITY BIT(15)
#define JZ_LCD_CFG_CLS_POLARITY BIT(14)
#define JZ_LCD_CFG_SPL_POLARITY BIT(13)
#define JZ_LCD_CFG_REV_POLARITY BIT(12)
#define JZ_LCD_CFG_HSYNC_ACTIVE_LOW BIT(11)
#define JZ_LCD_CFG_PCLK_FALLING_EDGE BIT(10)
#define JZ_LCD_CFG_DE_ACTIVE_LOW BIT(9)
#define JZ_LCD_CFG_VSYNC_ACTIVE_LOW BIT(8)
#define JZ_LCD_CFG_18_BIT BIT(7)
#define JZ_LCD_CFG_PDW (BIT(5) | BIT(4))
#define JZ_LCD_CFG_MODE_MASK 0xf
#define JZ_LCD_CTRL_BURST_4 (0x0 << 28)
#define JZ_LCD_CTRL_BURST_8 (0x1 << 28)
#define JZ_LCD_CTRL_BURST_16 (0x2 << 28)
#define JZ_LCD_CTRL_RGB555 BIT(27)
#define JZ_LCD_CTRL_OFUP BIT(26)
#define JZ_LCD_CTRL_FRC_GRAYSCALE_16 (0x0 << 24)
#define JZ_LCD_CTRL_FRC_GRAYSCALE_4 (0x1 << 24)
#define JZ_LCD_CTRL_FRC_GRAYSCALE_2 (0x2 << 24)
#define JZ_LCD_CTRL_PDD_MASK (0xff << 16)
#define JZ_LCD_CTRL_EOF_IRQ BIT(13)
#define JZ_LCD_CTRL_SOF_IRQ BIT(12)
#define JZ_LCD_CTRL_OFU_IRQ BIT(11)
#define JZ_LCD_CTRL_IFU0_IRQ BIT(10)
#define JZ_LCD_CTRL_IFU1_IRQ BIT(9)
#define JZ_LCD_CTRL_DD_IRQ BIT(8)
#define JZ_LCD_CTRL_QDD_IRQ BIT(7)
#define JZ_LCD_CTRL_REVERSE_ENDIAN BIT(6)
#define JZ_LCD_CTRL_LSB_FISRT BIT(5)
#define JZ_LCD_CTRL_DISABLE BIT(4)
#define JZ_LCD_CTRL_ENABLE BIT(3)
#define JZ_LCD_CTRL_BPP_1 0x0
#define JZ_LCD_CTRL_BPP_2 0x1
#define JZ_LCD_CTRL_BPP_4 0x2
#define JZ_LCD_CTRL_BPP_8 0x3
#define JZ_LCD_CTRL_BPP_15_16 0x4
#define JZ_LCD_CTRL_BPP_18_24 0x5
#define JZ_LCD_CMD_SOF_IRQ BIT(31)
#define JZ_LCD_CMD_EOF_IRQ BIT(30)
#define JZ_LCD_CMD_ENABLE_PAL BIT(28)
#define JZ_LCD_SYNC_MASK 0x3ff
#define JZ_LCD_STATE_DISABLED BIT(0)
struct jzfb_framedesc {
uint32_t next;
uint32_t addr;
uint32_t id;
uint32_t cmd;
} __packed;
struct jzfb {
struct fb_info *fb;
struct platform_device *pdev;
void __iomem *base;
struct resource *mem;
struct jz4740_fb_platform_data *pdata;
size_t vidmem_size;
void *vidmem;
dma_addr_t vidmem_phys;
struct jzfb_framedesc *framedesc;
dma_addr_t framedesc_phys;
struct clk *ldclk;
struct clk *lpclk;
unsigned is_enabled:1;
struct mutex lock;
uint32_t pseudo_palette[16];
};
static const struct fb_fix_screeninfo jzfb_fix = {
.id = "JZ4740 FB",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.xpanstep = 0,
.ypanstep = 0,
.ywrapstep = 0,
.accel = FB_ACCEL_NONE,
};
static const struct jz_gpio_bulk_request jz_lcd_ctrl_pins[] = {
JZ_GPIO_BULK_PIN(LCD_PCLK),
JZ_GPIO_BULK_PIN(LCD_HSYNC),
JZ_GPIO_BULK_PIN(LCD_VSYNC),
JZ_GPIO_BULK_PIN(LCD_DE),
JZ_GPIO_BULK_PIN(LCD_PS),
JZ_GPIO_BULK_PIN(LCD_REV),
JZ_GPIO_BULK_PIN(LCD_CLS),
JZ_GPIO_BULK_PIN(LCD_SPL),
};
static const struct jz_gpio_bulk_request jz_lcd_data_pins[] = {
JZ_GPIO_BULK_PIN(LCD_DATA0),
JZ_GPIO_BULK_PIN(LCD_DATA1),
JZ_GPIO_BULK_PIN(LCD_DATA2),
JZ_GPIO_BULK_PIN(LCD_DATA3),
JZ_GPIO_BULK_PIN(LCD_DATA4),
JZ_GPIO_BULK_PIN(LCD_DATA5),
JZ_GPIO_BULK_PIN(LCD_DATA6),
JZ_GPIO_BULK_PIN(LCD_DATA7),
JZ_GPIO_BULK_PIN(LCD_DATA8),
JZ_GPIO_BULK_PIN(LCD_DATA9),
JZ_GPIO_BULK_PIN(LCD_DATA10),
JZ_GPIO_BULK_PIN(LCD_DATA11),
JZ_GPIO_BULK_PIN(LCD_DATA12),
JZ_GPIO_BULK_PIN(LCD_DATA13),
JZ_GPIO_BULK_PIN(LCD_DATA14),
JZ_GPIO_BULK_PIN(LCD_DATA15),
JZ_GPIO_BULK_PIN(LCD_DATA16),
JZ_GPIO_BULK_PIN(LCD_DATA17),
};
static unsigned int jzfb_num_ctrl_pins(struct jzfb *jzfb)
{
unsigned int num;
switch (jzfb->pdata->lcd_type) {
case JZ_LCD_TYPE_GENERIC_16_BIT:
num = 4;
break;
case JZ_LCD_TYPE_GENERIC_18_BIT:
num = 4;
break;
case JZ_LCD_TYPE_8BIT_SERIAL:
num = 3;
break;
case JZ_LCD_TYPE_SPECIAL_TFT_1:
case JZ_LCD_TYPE_SPECIAL_TFT_2:
case JZ_LCD_TYPE_SPECIAL_TFT_3:
num = 8;
break;
default:
num = 0;
break;
}
return num;
}
static unsigned int jzfb_num_data_pins(struct jzfb *jzfb)
{
unsigned int num;
switch (jzfb->pdata->lcd_type) {
case JZ_LCD_TYPE_GENERIC_16_BIT:
num = 16;
break;
case JZ_LCD_TYPE_GENERIC_18_BIT:
num = 18;
break;
case JZ_LCD_TYPE_8BIT_SERIAL:
num = 8;
break;
case JZ_LCD_TYPE_SPECIAL_TFT_1:
case JZ_LCD_TYPE_SPECIAL_TFT_2:
case JZ_LCD_TYPE_SPECIAL_TFT_3:
if (jzfb->pdata->bpp == 18)
num = 18;
else
num = 16;
break;
default:
num = 0;
break;
}
return num;
}
/* Based on CNVT_TOHW macro from skeletonfb.c */
static inline uint32_t jzfb_convert_color_to_hw(unsigned val,
struct fb_bitfield *bf)
{
return (((val << bf->length) + 0x7FFF - val) >> 16) << bf->offset;
}
static int jzfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp, struct fb_info *fb)
{
uint32_t color;
if (regno >= 16)
return -EINVAL;
color = jzfb_convert_color_to_hw(red, &fb->var.red);
color |= jzfb_convert_color_to_hw(green, &fb->var.green);
color |= jzfb_convert_color_to_hw(blue, &fb->var.blue);
color |= jzfb_convert_color_to_hw(transp, &fb->var.transp);
((uint32_t *)(fb->pseudo_palette))[regno] = color;
return 0;
}
static int jzfb_get_controller_bpp(struct jzfb *jzfb)
{
switch (jzfb->pdata->bpp) {
case 18:
case 24:
return 32;
case 15:
return 16;
default:
return jzfb->pdata->bpp;
}
}
static struct fb_videomode *jzfb_get_mode(struct jzfb *jzfb,
struct fb_var_screeninfo *var)
{
size_t i;
struct fb_videomode *mode = jzfb->pdata->modes;
for (i = 0; i < jzfb->pdata->num_modes; ++i, ++mode) {
if (mode->xres == var->xres && mode->yres == var->yres)
return mode;
}
return NULL;
}
static int jzfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fb)
{
struct jzfb *jzfb = fb->par;
struct fb_videomode *mode;
if (var->bits_per_pixel != jzfb_get_controller_bpp(jzfb) &&
var->bits_per_pixel != jzfb->pdata->bpp)
return -EINVAL;
mode = jzfb_get_mode(jzfb, var);
if (mode == NULL)
return -EINVAL;
fb_videomode_to_var(var, mode);
switch (jzfb->pdata->bpp) {
case 8:
break;
case 15:
var->red.offset = 10;
var->red.length = 5;
var->green.offset = 6;
var->green.length = 5;
var->blue.offset = 0;
var->blue.length = 5;
break;
case 16:
var->red.offset = 11;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 5;
break;
case 18:
var->red.offset = 16;
var->red.length = 6;
var->green.offset = 8;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 6;
var->bits_per_pixel = 32;
break;
case 32:
case 24:
var->transp.offset = 24;
var->transp.length = 8;
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->bits_per_pixel = 32;
break;
default:
break;
}
return 0;
}
static int jzfb_set_par(struct fb_info *info)
{
struct jzfb *jzfb = info->par;
struct jz4740_fb_platform_data *pdata = jzfb->pdata;
struct fb_var_screeninfo *var = &info->var;
struct fb_videomode *mode;
uint16_t hds, vds;
uint16_t hde, vde;
uint16_t ht, vt;
uint32_t ctrl;
uint32_t cfg;
unsigned long rate;
mode = jzfb_get_mode(jzfb, var);
if (mode == NULL)
return -EINVAL;
if (mode == info->mode)
return 0;
info->mode = mode;
hds = mode->hsync_len + mode->left_margin;
hde = hds + mode->xres;
ht = hde + mode->right_margin;
vds = mode->vsync_len + mode->upper_margin;
vde = vds + mode->yres;
vt = vde + mode->lower_margin;
ctrl = JZ_LCD_CTRL_OFUP | JZ_LCD_CTRL_BURST_16;
switch (pdata->bpp) {
case 1:
ctrl |= JZ_LCD_CTRL_BPP_1;
break;
case 2:
ctrl |= JZ_LCD_CTRL_BPP_2;
break;
case 4:
ctrl |= JZ_LCD_CTRL_BPP_4;
break;
case 8:
ctrl |= JZ_LCD_CTRL_BPP_8;
break;
case 15:
ctrl |= JZ_LCD_CTRL_RGB555; /* Falltrough */
case 16:
ctrl |= JZ_LCD_CTRL_BPP_15_16;
break;
case 18:
case 24:
case 32:
ctrl |= JZ_LCD_CTRL_BPP_18_24;
break;
default:
break;
}
cfg = pdata->lcd_type & 0xf;
if (!(mode->sync & FB_SYNC_HOR_HIGH_ACT))
cfg |= JZ_LCD_CFG_HSYNC_ACTIVE_LOW;
if (!(mode->sync & FB_SYNC_VERT_HIGH_ACT))
cfg |= JZ_LCD_CFG_VSYNC_ACTIVE_LOW;
if (pdata->pixclk_falling_edge)
cfg |= JZ_LCD_CFG_PCLK_FALLING_EDGE;
if (pdata->date_enable_active_low)
cfg |= JZ_LCD_CFG_DE_ACTIVE_LOW;
if (pdata->lcd_type == JZ_LCD_TYPE_GENERIC_18_BIT)
cfg |= JZ_LCD_CFG_18_BIT;
if (mode->pixclock) {
rate = PICOS2KHZ(mode->pixclock) * 1000;
mode->refresh = rate / vt / ht;
} else {
if (pdata->lcd_type == JZ_LCD_TYPE_8BIT_SERIAL)
rate = mode->refresh * (vt + 2 * mode->xres) * ht;
else
rate = mode->refresh * vt * ht;
mode->pixclock = KHZ2PICOS(rate / 1000);
}
mutex_lock(&jzfb->lock);
if (!jzfb->is_enabled)
clk_enable(jzfb->ldclk);
else
ctrl |= JZ_LCD_CTRL_ENABLE;
switch (pdata->lcd_type) {
case JZ_LCD_TYPE_SPECIAL_TFT_1:
case JZ_LCD_TYPE_SPECIAL_TFT_2:
case JZ_LCD_TYPE_SPECIAL_TFT_3:
writel(pdata->special_tft_config.spl, jzfb->base + JZ_REG_LCD_SPL);
writel(pdata->special_tft_config.cls, jzfb->base + JZ_REG_LCD_CLS);
writel(pdata->special_tft_config.ps, jzfb->base + JZ_REG_LCD_PS);
writel(pdata->special_tft_config.ps, jzfb->base + JZ_REG_LCD_REV);
break;
default:
cfg |= JZ_LCD_CFG_PS_DISABLE;
cfg |= JZ_LCD_CFG_CLS_DISABLE;
cfg |= JZ_LCD_CFG_SPL_DISABLE;
cfg |= JZ_LCD_CFG_REV_DISABLE;
break;
}
writel(mode->hsync_len, jzfb->base + JZ_REG_LCD_HSYNC);
writel(mode->vsync_len, jzfb->base + JZ_REG_LCD_VSYNC);
writel((ht << 16) | vt, jzfb->base + JZ_REG_LCD_VAT);
writel((hds << 16) | hde, jzfb->base + JZ_REG_LCD_DAH);
writel((vds << 16) | vde, jzfb->base + JZ_REG_LCD_DAV);
writel(cfg, jzfb->base + JZ_REG_LCD_CFG);
writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL);
if (!jzfb->is_enabled)
clk_disable_unprepare(jzfb->ldclk);
mutex_unlock(&jzfb->lock);
clk_set_rate(jzfb->lpclk, rate);
clk_set_rate(jzfb->ldclk, rate * 3);
return 0;
}
static void jzfb_enable(struct jzfb *jzfb)
{
uint32_t ctrl;
clk_prepare_enable(jzfb->ldclk);
jz_gpio_bulk_resume(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
jz_gpio_bulk_resume(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
writel(0, jzfb->base + JZ_REG_LCD_STATE);
writel(jzfb->framedesc->next, jzfb->base + JZ_REG_LCD_DA0);
ctrl = readl(jzfb->base + JZ_REG_LCD_CTRL);
ctrl |= JZ_LCD_CTRL_ENABLE;
ctrl &= ~JZ_LCD_CTRL_DISABLE;
writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL);
}
static void jzfb_disable(struct jzfb *jzfb)
{
uint32_t ctrl;
ctrl = readl(jzfb->base + JZ_REG_LCD_CTRL);
ctrl |= JZ_LCD_CTRL_DISABLE;
writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL);
do {
ctrl = readl(jzfb->base + JZ_REG_LCD_STATE);
} while (!(ctrl & JZ_LCD_STATE_DISABLED));
jz_gpio_bulk_suspend(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
jz_gpio_bulk_suspend(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
clk_disable_unprepare(jzfb->ldclk);
}
static int jzfb_blank(int blank_mode, struct fb_info *info)
{
struct jzfb *jzfb = info->par;
switch (blank_mode) {
case FB_BLANK_UNBLANK:
mutex_lock(&jzfb->lock);
if (jzfb->is_enabled) {
mutex_unlock(&jzfb->lock);
return 0;
}
jzfb_enable(jzfb);
jzfb->is_enabled = 1;
mutex_unlock(&jzfb->lock);
break;
default:
mutex_lock(&jzfb->lock);
if (!jzfb->is_enabled) {
mutex_unlock(&jzfb->lock);
return 0;
}
jzfb_disable(jzfb);
jzfb->is_enabled = 0;
mutex_unlock(&jzfb->lock);
break;
}
return 0;
}
static int jzfb_alloc_devmem(struct jzfb *jzfb)
{
int max_videosize = 0;
struct fb_videomode *mode = jzfb->pdata->modes;
void *page;
int i;
for (i = 0; i < jzfb->pdata->num_modes; ++mode, ++i) {
if (max_videosize < mode->xres * mode->yres)
max_videosize = mode->xres * mode->yres;
}
max_videosize *= jzfb_get_controller_bpp(jzfb) >> 3;
jzfb->framedesc = dma_alloc_coherent(&jzfb->pdev->dev,
sizeof(*jzfb->framedesc),
&jzfb->framedesc_phys, GFP_KERNEL);
if (!jzfb->framedesc)
return -ENOMEM;
jzfb->vidmem_size = PAGE_ALIGN(max_videosize);
jzfb->vidmem = dma_alloc_coherent(&jzfb->pdev->dev,
jzfb->vidmem_size,
&jzfb->vidmem_phys, GFP_KERNEL);
if (!jzfb->vidmem)
goto err_free_framedesc;
for (page = jzfb->vidmem;
page < jzfb->vidmem + PAGE_ALIGN(jzfb->vidmem_size);
page += PAGE_SIZE) {
SetPageReserved(virt_to_page(page));
}
jzfb->framedesc->next = jzfb->framedesc_phys;
jzfb->framedesc->addr = jzfb->vidmem_phys;
jzfb->framedesc->id = 0xdeafbead;
jzfb->framedesc->cmd = 0;
jzfb->framedesc->cmd |= max_videosize / 4;
return 0;
err_free_framedesc:
dma_free_coherent(&jzfb->pdev->dev, sizeof(*jzfb->framedesc),
jzfb->framedesc, jzfb->framedesc_phys);
return -ENOMEM;
}
static void jzfb_free_devmem(struct jzfb *jzfb)
{
dma_free_coherent(&jzfb->pdev->dev, jzfb->vidmem_size,
jzfb->vidmem, jzfb->vidmem_phys);
dma_free_coherent(&jzfb->pdev->dev, sizeof(*jzfb->framedesc),
jzfb->framedesc, jzfb->framedesc_phys);
}
static struct fb_ops jzfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = jzfb_check_var,
.fb_set_par = jzfb_set_par,
.fb_blank = jzfb_blank,
.fb_fillrect = sys_fillrect,
.fb_copyarea = sys_copyarea,
.fb_imageblit = sys_imageblit,
.fb_setcolreg = jzfb_setcolreg,
};
static int jzfb_probe(struct platform_device *pdev)
{
int ret;
struct jzfb *jzfb;
struct fb_info *fb;
struct jz4740_fb_platform_data *pdata = pdev->dev.platform_data;
struct resource *mem;
if (!pdata) {
dev_err(&pdev->dev, "Missing platform data\n");
return -ENXIO;
}
fb = framebuffer_alloc(sizeof(struct jzfb), &pdev->dev);
if (!fb) {
dev_err(&pdev->dev, "Failed to allocate framebuffer device\n");
return -ENOMEM;
}
fb->fbops = &jzfb_ops;
fb->flags = FBINFO_DEFAULT;
jzfb = fb->par;
jzfb->pdev = pdev;
jzfb->pdata = pdata;
jzfb->ldclk = devm_clk_get(&pdev->dev, "lcd");
if (IS_ERR(jzfb->ldclk)) {
ret = PTR_ERR(jzfb->ldclk);
dev_err(&pdev->dev, "Failed to get lcd clock: %d\n", ret);
goto err_framebuffer_release;
}
jzfb->lpclk = devm_clk_get(&pdev->dev, "lcd_pclk");
if (IS_ERR(jzfb->lpclk)) {
ret = PTR_ERR(jzfb->lpclk);
dev_err(&pdev->dev, "Failed to get lcd pixel clock: %d\n", ret);
goto err_framebuffer_release;
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
jzfb->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(jzfb->base)) {
ret = PTR_ERR(jzfb->base);
goto err_framebuffer_release;
}
platform_set_drvdata(pdev, jzfb);
mutex_init(&jzfb->lock);
fb_videomode_to_modelist(pdata->modes, pdata->num_modes,
&fb->modelist);
fb_videomode_to_var(&fb->var, pdata->modes);
fb->var.bits_per_pixel = pdata->bpp;
jzfb_check_var(&fb->var, fb);
ret = jzfb_alloc_devmem(jzfb);
if (ret) {
dev_err(&pdev->dev, "Failed to allocate video memory\n");
goto err_framebuffer_release;
}
fb->fix = jzfb_fix;
fb->fix.line_length = fb->var.bits_per_pixel * fb->var.xres / 8;
fb->fix.mmio_start = mem->start;
fb->fix.mmio_len = resource_size(mem);
fb->fix.smem_start = jzfb->vidmem_phys;
fb->fix.smem_len = fb->fix.line_length * fb->var.yres;
fb->screen_base = jzfb->vidmem;
fb->pseudo_palette = jzfb->pseudo_palette;
fb_alloc_cmap(&fb->cmap, 256, 0);
clk_prepare_enable(jzfb->ldclk);
jzfb->is_enabled = 1;
writel(jzfb->framedesc->next, jzfb->base + JZ_REG_LCD_DA0);
fb->mode = NULL;
jzfb_set_par(fb);
jz_gpio_bulk_request(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
jz_gpio_bulk_request(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
ret = register_framebuffer(fb);
if (ret) {
dev_err(&pdev->dev, "Failed to register framebuffer: %d\n", ret);
goto err_free_devmem;
}
jzfb->fb = fb;
return 0;
err_free_devmem:
jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
fb_dealloc_cmap(&fb->cmap);
jzfb_free_devmem(jzfb);
err_framebuffer_release:
framebuffer_release(fb);
return ret;
}
static int jzfb_remove(struct platform_device *pdev)
{
struct jzfb *jzfb = platform_get_drvdata(pdev);
jzfb_blank(FB_BLANK_POWERDOWN, jzfb->fb);
jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
fb_dealloc_cmap(&jzfb->fb->cmap);
jzfb_free_devmem(jzfb);
framebuffer_release(jzfb->fb);
return 0;
}
#ifdef CONFIG_PM
static int jzfb_suspend(struct device *dev)
{
struct jzfb *jzfb = dev_get_drvdata(dev);
console_lock();
fb_set_suspend(jzfb->fb, 1);
console_unlock();
mutex_lock(&jzfb->lock);
if (jzfb->is_enabled)
jzfb_disable(jzfb);
mutex_unlock(&jzfb->lock);
return 0;
}
static int jzfb_resume(struct device *dev)
{
struct jzfb *jzfb = dev_get_drvdata(dev);
clk_prepare_enable(jzfb->ldclk);
mutex_lock(&jzfb->lock);
if (jzfb->is_enabled)
jzfb_enable(jzfb);
mutex_unlock(&jzfb->lock);
console_lock();
fb_set_suspend(jzfb->fb, 0);
console_unlock();
return 0;
}
static const struct dev_pm_ops jzfb_pm_ops = {
.suspend = jzfb_suspend,
.resume = jzfb_resume,
.poweroff = jzfb_suspend,
.restore = jzfb_resume,
};
#define JZFB_PM_OPS (&jzfb_pm_ops)
#else
#define JZFB_PM_OPS NULL
#endif
static struct platform_driver jzfb_driver = {
.probe = jzfb_probe,
.remove = jzfb_remove,
.driver = {
.name = "jz4740-fb",
.pm = JZFB_PM_OPS,
},
};
module_platform_driver(jzfb_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("JZ4740 SoC LCD framebuffer driver");
MODULE_ALIAS("platform:jz4740-fb");
| gpl-2.0 |
zjh3123629/qt210-linux | arch/arm/mach-s3c64xx/dev-audio.c | 2933 | 6768 | /* linux/arch/arm/plat-s3c/dev-audio.c
*
* Copyright 2009 Wolfson Microelectronics
* Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/gpio.h>
#include <mach/irqs.h>
#include <mach/map.h>
#include <mach/dma.h>
#include <plat/devs.h>
#include <plat/audio.h>
#include <plat/gpio-cfg.h>
static const char *rclksrc[] = {
[0] = "iis",
[1] = "audio-bus",
};
static int s3c64xx_i2s_cfg_gpio(struct platform_device *pdev)
{
unsigned int base;
switch (pdev->id) {
case 0:
base = S3C64XX_GPD(0);
break;
case 1:
base = S3C64XX_GPE(0);
break;
case 2:
s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C_GPIO_SFN(5));
s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C_GPIO_SFN(5));
s3c_gpio_cfgpin(S3C64XX_GPC(7), S3C_GPIO_SFN(5));
s3c_gpio_cfgpin_range(S3C64XX_GPH(6), 4, S3C_GPIO_SFN(5));
return 0;
default:
printk(KERN_DEBUG "Invalid I2S Controller number: %d\n",
pdev->id);
return -EINVAL;
}
s3c_gpio_cfgpin_range(base, 5, S3C_GPIO_SFN(3));
return 0;
}
static struct resource s3c64xx_iis0_resource[] = {
[0] = {
.start = S3C64XX_PA_IIS0,
.end = S3C64XX_PA_IIS0 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_I2S0_OUT,
.end = DMACH_I2S0_OUT,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_I2S0_IN,
.end = DMACH_I2S0_IN,
.flags = IORESOURCE_DMA,
},
};
static struct s3c_audio_pdata i2sv3_pdata = {
.cfg_gpio = s3c64xx_i2s_cfg_gpio,
.type = {
.i2s = {
.src_clk = rclksrc,
},
},
};
struct platform_device s3c64xx_device_iis0 = {
.name = "samsung-i2s",
.id = 0,
.num_resources = ARRAY_SIZE(s3c64xx_iis0_resource),
.resource = s3c64xx_iis0_resource,
.dev = {
.platform_data = &i2sv3_pdata,
},
};
EXPORT_SYMBOL(s3c64xx_device_iis0);
static struct resource s3c64xx_iis1_resource[] = {
[0] = {
.start = S3C64XX_PA_IIS1,
.end = S3C64XX_PA_IIS1 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_I2S1_OUT,
.end = DMACH_I2S1_OUT,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_I2S1_IN,
.end = DMACH_I2S1_IN,
.flags = IORESOURCE_DMA,
},
};
struct platform_device s3c64xx_device_iis1 = {
.name = "samsung-i2s",
.id = 1,
.num_resources = ARRAY_SIZE(s3c64xx_iis1_resource),
.resource = s3c64xx_iis1_resource,
.dev = {
.platform_data = &i2sv3_pdata,
},
};
EXPORT_SYMBOL(s3c64xx_device_iis1);
static struct resource s3c64xx_iisv4_resource[] = {
[0] = {
.start = S3C64XX_PA_IISV4,
.end = S3C64XX_PA_IISV4 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_HSI_I2SV40_TX,
.end = DMACH_HSI_I2SV40_TX,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_HSI_I2SV40_RX,
.end = DMACH_HSI_I2SV40_RX,
.flags = IORESOURCE_DMA,
},
};
static struct s3c_audio_pdata i2sv4_pdata = {
.cfg_gpio = s3c64xx_i2s_cfg_gpio,
.type = {
.i2s = {
.quirks = QUIRK_PRI_6CHAN,
.src_clk = rclksrc,
},
},
};
struct platform_device s3c64xx_device_iisv4 = {
.name = "samsung-i2s",
.id = 2,
.num_resources = ARRAY_SIZE(s3c64xx_iisv4_resource),
.resource = s3c64xx_iisv4_resource,
.dev = {
.platform_data = &i2sv4_pdata,
},
};
EXPORT_SYMBOL(s3c64xx_device_iisv4);
/* PCM Controller platform_devices */
static int s3c64xx_pcm_cfg_gpio(struct platform_device *pdev)
{
unsigned int base;
switch (pdev->id) {
case 0:
base = S3C64XX_GPD(0);
break;
case 1:
base = S3C64XX_GPE(0);
break;
default:
printk(KERN_DEBUG "Invalid PCM Controller number: %d\n",
pdev->id);
return -EINVAL;
}
s3c_gpio_cfgpin_range(base, 5, S3C_GPIO_SFN(2));
return 0;
}
static struct resource s3c64xx_pcm0_resource[] = {
[0] = {
.start = S3C64XX_PA_PCM0,
.end = S3C64XX_PA_PCM0 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_PCM0_TX,
.end = DMACH_PCM0_TX,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_PCM0_RX,
.end = DMACH_PCM0_RX,
.flags = IORESOURCE_DMA,
},
};
static struct s3c_audio_pdata s3c_pcm0_pdata = {
.cfg_gpio = s3c64xx_pcm_cfg_gpio,
};
struct platform_device s3c64xx_device_pcm0 = {
.name = "samsung-pcm",
.id = 0,
.num_resources = ARRAY_SIZE(s3c64xx_pcm0_resource),
.resource = s3c64xx_pcm0_resource,
.dev = {
.platform_data = &s3c_pcm0_pdata,
},
};
EXPORT_SYMBOL(s3c64xx_device_pcm0);
static struct resource s3c64xx_pcm1_resource[] = {
[0] = {
.start = S3C64XX_PA_PCM1,
.end = S3C64XX_PA_PCM1 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_PCM1_TX,
.end = DMACH_PCM1_TX,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_PCM1_RX,
.end = DMACH_PCM1_RX,
.flags = IORESOURCE_DMA,
},
};
static struct s3c_audio_pdata s3c_pcm1_pdata = {
.cfg_gpio = s3c64xx_pcm_cfg_gpio,
};
struct platform_device s3c64xx_device_pcm1 = {
.name = "samsung-pcm",
.id = 1,
.num_resources = ARRAY_SIZE(s3c64xx_pcm1_resource),
.resource = s3c64xx_pcm1_resource,
.dev = {
.platform_data = &s3c_pcm1_pdata,
},
};
EXPORT_SYMBOL(s3c64xx_device_pcm1);
/* AC97 Controller platform devices */
static int s3c64xx_ac97_cfg_gpd(struct platform_device *pdev)
{
return s3c_gpio_cfgpin_range(S3C64XX_GPD(0), 5, S3C_GPIO_SFN(4));
}
static int s3c64xx_ac97_cfg_gpe(struct platform_device *pdev)
{
return s3c_gpio_cfgpin_range(S3C64XX_GPE(0), 5, S3C_GPIO_SFN(4));
}
static struct resource s3c64xx_ac97_resource[] = {
[0] = {
.start = S3C64XX_PA_AC97,
.end = S3C64XX_PA_AC97 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_AC97_PCMOUT,
.end = DMACH_AC97_PCMOUT,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_AC97_PCMIN,
.end = DMACH_AC97_PCMIN,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = DMACH_AC97_MICIN,
.end = DMACH_AC97_MICIN,
.flags = IORESOURCE_DMA,
},
[4] = {
.start = IRQ_AC97,
.end = IRQ_AC97,
.flags = IORESOURCE_IRQ,
},
};
static struct s3c_audio_pdata s3c_ac97_pdata;
static u64 s3c64xx_ac97_dmamask = DMA_BIT_MASK(32);
struct platform_device s3c64xx_device_ac97 = {
.name = "samsung-ac97",
.id = -1,
.num_resources = ARRAY_SIZE(s3c64xx_ac97_resource),
.resource = s3c64xx_ac97_resource,
.dev = {
.platform_data = &s3c_ac97_pdata,
.dma_mask = &s3c64xx_ac97_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
EXPORT_SYMBOL(s3c64xx_device_ac97);
void __init s3c64xx_ac97_setup_gpio(int num)
{
if (num == S3C64XX_AC97_GPD)
s3c_ac97_pdata.cfg_gpio = s3c64xx_ac97_cfg_gpd;
else
s3c_ac97_pdata.cfg_gpio = s3c64xx_ac97_cfg_gpe;
}
| gpl-2.0 |
JerryScript/VaeVictus | drivers/scsi/device_handler/scsi_dh_emc.c | 2933 | 18707 | /*
* Target driver for EMC CLARiiON AX/CX-series hardware.
* Based on code from Lars Marowsky-Bree <lmb@suse.de>
* and Ed Goggin <egoggin@emc.com>.
*
* Copyright (C) 2006 Red Hat, Inc. All rights reserved.
* Copyright (C) 2006 Mike Christie
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dh.h>
#include <scsi/scsi_device.h>
#define CLARIION_NAME "emc"
#define CLARIION_TRESPASS_PAGE 0x22
#define CLARIION_BUFFER_SIZE 0xFC
#define CLARIION_TIMEOUT (60 * HZ)
#define CLARIION_RETRIES 3
#define CLARIION_UNBOUND_LU -1
#define CLARIION_SP_A 0
#define CLARIION_SP_B 1
/* Flags */
#define CLARIION_SHORT_TRESPASS 1
#define CLARIION_HONOR_RESERVATIONS 2
/* LUN states */
#define CLARIION_LUN_UNINITIALIZED -1
#define CLARIION_LUN_UNBOUND 0
#define CLARIION_LUN_BOUND 1
#define CLARIION_LUN_OWNED 2
static unsigned char long_trespass[] = {
0, 0, 0, 0, 0, 0, 0, 0,
CLARIION_TRESPASS_PAGE, /* Page code */
0x09, /* Page length - 2 */
0x01, /* Trespass code */
0xff, 0xff, /* Trespass target */
0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
};
static unsigned char short_trespass[] = {
0, 0, 0, 0,
CLARIION_TRESPASS_PAGE, /* Page code */
0x02, /* Page length - 2 */
0x01, /* Trespass code */
0xff, /* Trespass target */
};
static const char * lun_state[] =
{
"not bound",
"bound",
"owned",
};
struct clariion_dh_data {
/*
* Flags:
* CLARIION_SHORT_TRESPASS
* Use short trespass command (FC-series) or the long version
* (default for AX/CX CLARiiON arrays).
*
* CLARIION_HONOR_RESERVATIONS
* Whether or not (default) to honor SCSI reservations when
* initiating a switch-over.
*/
unsigned flags;
/*
* I/O buffer for both MODE_SELECT and INQUIRY commands.
*/
unsigned char buffer[CLARIION_BUFFER_SIZE];
/*
* SCSI sense buffer for commands -- assumes serial issuance
* and completion sequence of all commands for same multipath.
*/
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
unsigned int senselen;
/*
* LUN state
*/
int lun_state;
/*
* SP Port number
*/
int port;
/*
* which SP (A=0,B=1,UNBOUND=-1) is the default SP for this
* path's mapped LUN
*/
int default_sp;
/*
* which SP (A=0,B=1,UNBOUND=-1) is the active SP for this
* path's mapped LUN
*/
int current_sp;
};
static inline struct clariion_dh_data
*get_clariion_data(struct scsi_device *sdev)
{
struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
BUG_ON(scsi_dh_data == NULL);
return ((struct clariion_dh_data *) scsi_dh_data->buf);
}
/*
* Parse MODE_SELECT cmd reply.
*/
static int trespass_endio(struct scsi_device *sdev, char *sense)
{
int err = SCSI_DH_IO;
struct scsi_sense_hdr sshdr;
if (!scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, "
"0x%2x, 0x%2x while sending CLARiiON trespass "
"command.\n", CLARIION_NAME, sshdr.sense_key,
sshdr.asc, sshdr.ascq);
if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) &&
(sshdr.ascq == 0x00)) {
/*
* Array based copy in progress -- do not send
* mode_select or copy will be aborted mid-stream.
*/
sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in "
"progress while sending CLARiiON trespass "
"command.\n", CLARIION_NAME);
err = SCSI_DH_DEV_TEMP_BUSY;
} else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) &&
(sshdr.ascq == 0x03)) {
/*
* LUN Not Ready - Manual Intervention Required
* indicates in-progress ucode upgrade (NDU).
*/
sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress "
"ucode upgrade NDU operation while sending "
"CLARiiON trespass command.\n", CLARIION_NAME);
err = SCSI_DH_DEV_TEMP_BUSY;
} else
err = SCSI_DH_DEV_FAILED;
} else {
sdev_printk(KERN_INFO, sdev,
"%s: failed to send MODE SELECT, no sense available\n",
CLARIION_NAME);
}
return err;
}
static int parse_sp_info_reply(struct scsi_device *sdev,
struct clariion_dh_data *csdev)
{
int err = SCSI_DH_OK;
/* check for in-progress ucode upgrade (NDU) */
if (csdev->buffer[48] != 0) {
sdev_printk(KERN_NOTICE, sdev, "%s: Detected in-progress "
"ucode upgrade NDU operation while finding "
"current active SP.", CLARIION_NAME);
err = SCSI_DH_DEV_TEMP_BUSY;
goto out;
}
if (csdev->buffer[4] > 2) {
/* Invalid buffer format */
sdev_printk(KERN_NOTICE, sdev,
"%s: invalid VPD page 0xC0 format\n",
CLARIION_NAME);
err = SCSI_DH_NOSYS;
goto out;
}
switch (csdev->buffer[28] & 0x0f) {
case 6:
sdev_printk(KERN_NOTICE, sdev,
"%s: ALUA failover mode detected\n",
CLARIION_NAME);
break;
case 4:
/* Linux failover */
break;
default:
sdev_printk(KERN_WARNING, sdev,
"%s: Invalid failover mode %d\n",
CLARIION_NAME, csdev->buffer[28] & 0x0f);
err = SCSI_DH_NOSYS;
goto out;
}
csdev->default_sp = csdev->buffer[5];
csdev->lun_state = csdev->buffer[4];
csdev->current_sp = csdev->buffer[8];
csdev->port = csdev->buffer[7];
out:
return err;
}
#define emc_default_str "FC (Legacy)"
static char * parse_sp_model(struct scsi_device *sdev, unsigned char *buffer)
{
unsigned char len = buffer[4] + 5;
char *sp_model = NULL;
unsigned char sp_len, serial_len;
if (len < 160) {
sdev_printk(KERN_WARNING, sdev,
"%s: Invalid information section length %d\n",
CLARIION_NAME, len);
/* Check for old FC arrays */
if (!strncmp(buffer + 8, "DGC", 3)) {
/* Old FC array, not supporting extended information */
sp_model = emc_default_str;
}
goto out;
}
/*
* Parse extended information for SP model number
*/
serial_len = buffer[160];
if (serial_len == 0 || serial_len + 161 > len) {
sdev_printk(KERN_WARNING, sdev,
"%s: Invalid array serial number length %d\n",
CLARIION_NAME, serial_len);
goto out;
}
sp_len = buffer[99];
if (sp_len == 0 || serial_len + sp_len + 161 > len) {
sdev_printk(KERN_WARNING, sdev,
"%s: Invalid model number length %d\n",
CLARIION_NAME, sp_len);
goto out;
}
sp_model = &buffer[serial_len + 161];
/* Strip whitespace at the end */
while (sp_len > 1 && sp_model[sp_len - 1] == ' ')
sp_len--;
sp_model[sp_len] = '\0';
out:
return sp_model;
}
/*
* Get block request for REQ_BLOCK_PC command issued to path. Currently
* limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
*
* Uses data and sense buffers in hardware handler context structure and
* assumes serial servicing of commands, both issuance and completion.
*/
static struct request *get_req(struct scsi_device *sdev, int cmd,
unsigned char *buffer)
{
struct request *rq;
int len = 0;
rq = blk_get_request(sdev->request_queue,
(cmd != INQUIRY) ? WRITE : READ, GFP_NOIO);
if (!rq) {
sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
return NULL;
}
rq->cmd_len = COMMAND_SIZE(cmd);
rq->cmd[0] = cmd;
switch (cmd) {
case MODE_SELECT:
len = sizeof(short_trespass);
rq->cmd[1] = 0x10;
rq->cmd[4] = len;
break;
case MODE_SELECT_10:
len = sizeof(long_trespass);
rq->cmd[1] = 0x10;
rq->cmd[8] = len;
break;
case INQUIRY:
len = CLARIION_BUFFER_SIZE;
rq->cmd[4] = len;
memset(buffer, 0, len);
break;
default:
BUG_ON(1);
break;
}
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
rq->timeout = CLARIION_TIMEOUT;
rq->retries = CLARIION_RETRIES;
if (blk_rq_map_kern(rq->q, rq, buffer, len, GFP_NOIO)) {
blk_put_request(rq);
return NULL;
}
return rq;
}
static int send_inquiry_cmd(struct scsi_device *sdev, int page,
struct clariion_dh_data *csdev)
{
struct request *rq = get_req(sdev, INQUIRY, csdev->buffer);
int err;
if (!rq)
return SCSI_DH_RES_TEMP_UNAVAIL;
rq->sense = csdev->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = csdev->senselen = 0;
rq->cmd[0] = INQUIRY;
if (page != 0) {
rq->cmd[1] = 1;
rq->cmd[2] = page;
}
err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
if (err == -EIO) {
sdev_printk(KERN_INFO, sdev,
"%s: failed to send %s INQUIRY: %x\n",
CLARIION_NAME, page?"EVPD":"standard",
rq->errors);
csdev->senselen = rq->sense_len;
err = SCSI_DH_IO;
}
blk_put_request(rq);
return err;
}
static int send_trespass_cmd(struct scsi_device *sdev,
struct clariion_dh_data *csdev)
{
struct request *rq;
unsigned char *page22;
int err, len, cmd;
if (csdev->flags & CLARIION_SHORT_TRESPASS) {
page22 = short_trespass;
if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS))
/* Set Honor Reservations bit */
page22[6] |= 0x80;
len = sizeof(short_trespass);
cmd = MODE_SELECT;
} else {
page22 = long_trespass;
if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS))
/* Set Honor Reservations bit */
page22[10] |= 0x80;
len = sizeof(long_trespass);
cmd = MODE_SELECT_10;
}
BUG_ON((len > CLARIION_BUFFER_SIZE));
memcpy(csdev->buffer, page22, len);
rq = get_req(sdev, cmd, csdev->buffer);
if (!rq)
return SCSI_DH_RES_TEMP_UNAVAIL;
rq->sense = csdev->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = csdev->senselen = 0;
err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
if (err == -EIO) {
if (rq->sense_len) {
err = trespass_endio(sdev, csdev->sense);
} else {
sdev_printk(KERN_INFO, sdev,
"%s: failed to send MODE SELECT: %x\n",
CLARIION_NAME, rq->errors);
}
}
blk_put_request(rq);
return err;
}
static int clariion_check_sense(struct scsi_device *sdev,
struct scsi_sense_hdr *sense_hdr)
{
switch (sense_hdr->sense_key) {
case NOT_READY:
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x03)
/*
* LUN Not Ready - Manual Intervention Required
* indicates this is a passive path.
*
* FIXME: However, if this is seen and EVPD C0
* indicates that this is due to a NDU in
* progress, we should set FAIL_PATH too.
* This indicates we might have to do a SCSI
* inquiry in the end_io path. Ugh.
*
* Can return FAILED only when we want the error
* recovery process to kick in.
*/
return SUCCESS;
break;
case ILLEGAL_REQUEST:
if (sense_hdr->asc == 0x25 && sense_hdr->ascq == 0x01)
/*
* An array based copy is in progress. Do not
* fail the path, do not bypass to another PG,
* do not retry. Fail the IO immediately.
* (Actually this is the same conclusion as in
* the default handler, but lets make sure.)
*
* Can return FAILED only when we want the error
* recovery process to kick in.
*/
return SUCCESS;
break;
case UNIT_ATTENTION:
if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
/*
* Unit Attention Code. This is the first IO
* to the new path, so just retry.
*/
return ADD_TO_MLQUEUE;
break;
}
return SCSI_RETURN_NOT_HANDLED;
}
static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
{
struct clariion_dh_data *h = get_clariion_data(sdev);
int ret = BLKPREP_OK;
if (h->lun_state != CLARIION_LUN_OWNED) {
ret = BLKPREP_KILL;
req->cmd_flags |= REQ_QUIET;
}
return ret;
}
static int clariion_std_inquiry(struct scsi_device *sdev,
struct clariion_dh_data *csdev)
{
int err;
char *sp_model;
err = send_inquiry_cmd(sdev, 0, csdev);
if (err != SCSI_DH_OK && csdev->senselen) {
struct scsi_sense_hdr sshdr;
if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
&sshdr)) {
sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
"%02x/%02x/%02x\n", CLARIION_NAME,
sshdr.sense_key, sshdr.asc, sshdr.ascq);
}
err = SCSI_DH_IO;
goto out;
}
sp_model = parse_sp_model(sdev, csdev->buffer);
if (!sp_model) {
err = SCSI_DH_DEV_UNSUPP;
goto out;
}
/*
* FC Series arrays do not support long trespass
*/
if (!strlen(sp_model) || !strncmp(sp_model, "FC",2))
csdev->flags |= CLARIION_SHORT_TRESPASS;
sdev_printk(KERN_INFO, sdev,
"%s: detected Clariion %s, flags %x\n",
CLARIION_NAME, sp_model, csdev->flags);
out:
return err;
}
static int clariion_send_inquiry(struct scsi_device *sdev,
struct clariion_dh_data *csdev)
{
int err, retry = CLARIION_RETRIES;
retry:
err = send_inquiry_cmd(sdev, 0xC0, csdev);
if (err != SCSI_DH_OK && csdev->senselen) {
struct scsi_sense_hdr sshdr;
err = scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
&sshdr);
if (!err)
return SCSI_DH_IO;
err = clariion_check_sense(sdev, &sshdr);
if (retry > 0 && err == ADD_TO_MLQUEUE) {
retry--;
goto retry;
}
sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
"%02x/%02x/%02x\n", CLARIION_NAME,
sshdr.sense_key, sshdr.asc, sshdr.ascq);
err = SCSI_DH_IO;
} else {
err = parse_sp_info_reply(sdev, csdev);
}
return err;
}
static int clariion_activate(struct scsi_device *sdev,
activate_complete fn, void *data)
{
struct clariion_dh_data *csdev = get_clariion_data(sdev);
int result;
result = clariion_send_inquiry(sdev, csdev);
if (result != SCSI_DH_OK)
goto done;
if (csdev->lun_state == CLARIION_LUN_OWNED)
goto done;
result = send_trespass_cmd(sdev, csdev);
if (result != SCSI_DH_OK)
goto done;
sdev_printk(KERN_INFO, sdev,"%s: %s trespass command sent\n",
CLARIION_NAME,
csdev->flags&CLARIION_SHORT_TRESPASS?"short":"long" );
/* Update status */
result = clariion_send_inquiry(sdev, csdev);
if (result != SCSI_DH_OK)
goto done;
done:
sdev_printk(KERN_INFO, sdev,
"%s: at SP %c Port %d (%s, default SP %c)\n",
CLARIION_NAME, csdev->current_sp + 'A',
csdev->port, lun_state[csdev->lun_state],
csdev->default_sp + 'A');
if (fn)
fn(data, result);
return 0;
}
/*
* params - parameters in the following format
* "no_of_params\0param1\0param2\0param3\0...\0"
* for example, string for 2 parameters with value 10 and 21
* is specified as "2\010\021\0".
*/
static int clariion_set_params(struct scsi_device *sdev, const char *params)
{
struct clariion_dh_data *csdev = get_clariion_data(sdev);
unsigned int hr = 0, st = 0, argc;
const char *p = params;
int result = SCSI_DH_OK;
if ((sscanf(params, "%u", &argc) != 1) || (argc != 2))
return -EINVAL;
while (*p++)
;
if ((sscanf(p, "%u", &st) != 1) || (st > 1))
return -EINVAL;
while (*p++)
;
if ((sscanf(p, "%u", &hr) != 1) || (hr > 1))
return -EINVAL;
if (st)
csdev->flags |= CLARIION_SHORT_TRESPASS;
else
csdev->flags &= ~CLARIION_SHORT_TRESPASS;
if (hr)
csdev->flags |= CLARIION_HONOR_RESERVATIONS;
else
csdev->flags &= ~CLARIION_HONOR_RESERVATIONS;
/*
* If this path is owned, we have to send a trespass command
* with the new parameters. If not, simply return. Next trespass
* command would use the parameters.
*/
if (csdev->lun_state != CLARIION_LUN_OWNED)
goto done;
csdev->lun_state = CLARIION_LUN_UNINITIALIZED;
result = send_trespass_cmd(sdev, csdev);
if (result != SCSI_DH_OK)
goto done;
/* Update status */
result = clariion_send_inquiry(sdev, csdev);
done:
return result;
}
static const struct scsi_dh_devlist clariion_dev_list[] = {
{"DGC", "RAID"},
{"DGC", "DISK"},
{"DGC", "VRAID"},
{NULL, NULL},
};
static int clariion_bus_attach(struct scsi_device *sdev);
static void clariion_bus_detach(struct scsi_device *sdev);
static struct scsi_device_handler clariion_dh = {
.name = CLARIION_NAME,
.module = THIS_MODULE,
.devlist = clariion_dev_list,
.attach = clariion_bus_attach,
.detach = clariion_bus_detach,
.check_sense = clariion_check_sense,
.activate = clariion_activate,
.prep_fn = clariion_prep_fn,
.set_params = clariion_set_params,
};
static int clariion_bus_attach(struct scsi_device *sdev)
{
struct scsi_dh_data *scsi_dh_data;
struct clariion_dh_data *h;
unsigned long flags;
int err;
scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
CLARIION_NAME);
return -ENOMEM;
}
scsi_dh_data->scsi_dh = &clariion_dh;
h = (struct clariion_dh_data *) scsi_dh_data->buf;
h->lun_state = CLARIION_LUN_UNINITIALIZED;
h->default_sp = CLARIION_UNBOUND_LU;
h->current_sp = CLARIION_UNBOUND_LU;
err = clariion_std_inquiry(sdev, h);
if (err != SCSI_DH_OK)
goto failed;
err = clariion_send_inquiry(sdev, h);
if (err != SCSI_DH_OK)
goto failed;
if (!try_module_get(THIS_MODULE))
goto failed;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
sdev->scsi_dh_data = scsi_dh_data;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
sdev_printk(KERN_INFO, sdev,
"%s: connected to SP %c Port %d (%s, default SP %c)\n",
CLARIION_NAME, h->current_sp + 'A',
h->port, lun_state[h->lun_state],
h->default_sp + 'A');
return 0;
failed:
kfree(scsi_dh_data);
sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
CLARIION_NAME);
return -EINVAL;
}
static void clariion_bus_detach(struct scsi_device *sdev)
{
struct scsi_dh_data *scsi_dh_data;
unsigned long flags;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
scsi_dh_data = sdev->scsi_dh_data;
sdev->scsi_dh_data = NULL;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n",
CLARIION_NAME);
kfree(scsi_dh_data);
module_put(THIS_MODULE);
}
static int __init clariion_init(void)
{
int r;
r = scsi_register_device_handler(&clariion_dh);
if (r != 0)
printk(KERN_ERR "%s: Failed to register scsi device handler.",
CLARIION_NAME);
return r;
}
static void __exit clariion_exit(void)
{
scsi_unregister_device_handler(&clariion_dh);
}
module_init(clariion_init);
module_exit(clariion_exit);
MODULE_DESCRIPTION("EMC CX/AX/FC-family driver");
MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, Chandra Seetharaman <sekharan@us.ibm.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
miamo/miamOv | drivers/media/video/noon010pc30.c | 2933 | 19536 | /*
* Driver for SiliconFile NOON010PC30 CIF (1/11") Image Sensor with ISP
*
* Copyright (C) 2010 Samsung Electronics
* Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
*
* Initial register configuration based on a driver authored by
* HeungJun Kim <riverful.kim@samsung.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later vergsion.
*/
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <media/noon010pc30.h>
#include <media/v4l2-chip-ident.h>
#include <linux/videodev2.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mediabus.h>
#include <media/v4l2-subdev.h>
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Enable module debug trace. Set to 1 to enable.");
#define MODULE_NAME "NOON010PC30"
/*
* Register offsets within a page
* b15..b8 - page id, b7..b0 - register address
*/
#define POWER_CTRL_REG 0x0001
#define PAGEMODE_REG 0x03
#define DEVICE_ID_REG 0x0004
#define NOON010PC30_ID 0x86
#define VDO_CTL_REG(n) (0x0010 + (n))
#define SYNC_CTL_REG 0x0012
/* Window size and position */
#define WIN_ROWH_REG 0x0013
#define WIN_ROWL_REG 0x0014
#define WIN_COLH_REG 0x0015
#define WIN_COLL_REG 0x0016
#define WIN_HEIGHTH_REG 0x0017
#define WIN_HEIGHTL_REG 0x0018
#define WIN_WIDTHH_REG 0x0019
#define WIN_WIDTHL_REG 0x001A
#define HBLANKH_REG 0x001B
#define HBLANKL_REG 0x001C
#define VSYNCH_REG 0x001D
#define VSYNCL_REG 0x001E
/* VSYNC control */
#define VS_CTL_REG(n) (0x00A1 + (n))
/* page 1 */
#define ISP_CTL_REG(n) (0x0110 + (n))
#define YOFS_REG 0x0119
#define DARK_YOFS_REG 0x011A
#define SAT_CTL_REG 0x0120
#define BSAT_REG 0x0121
#define RSAT_REG 0x0122
/* Color correction */
#define CMC_CTL_REG 0x0130
#define CMC_OFSGH_REG 0x0133
#define CMC_OFSGL_REG 0x0135
#define CMC_SIGN_REG 0x0136
#define CMC_GOFS_REG 0x0137
#define CMC_COEF_REG(n) (0x0138 + (n))
#define CMC_OFS_REG(n) (0x0141 + (n))
/* Gamma correction */
#define GMA_CTL_REG 0x0160
#define GMA_COEF_REG(n) (0x0161 + (n))
/* Lens Shading */
#define LENS_CTRL_REG 0x01D0
#define LENS_XCEN_REG 0x01D1
#define LENS_YCEN_REG 0x01D2
#define LENS_RC_REG 0x01D3
#define LENS_GC_REG 0x01D4
#define LENS_BC_REG 0x01D5
#define L_AGON_REG 0x01D6
#define L_AGOFF_REG 0x01D7
/* Page 3 - Auto Exposure */
#define AE_CTL_REG(n) (0x0310 + (n))
#define AE_CTL9_REG 0x032C
#define AE_CTL10_REG 0x032D
#define AE_YLVL_REG 0x031C
#define AE_YTH_REG(n) (0x031D + (n))
#define AE_WGT_REG 0x0326
#define EXP_TIMEH_REG 0x0333
#define EXP_TIMEM_REG 0x0334
#define EXP_TIMEL_REG 0x0335
#define EXP_MMINH_REG 0x0336
#define EXP_MMINL_REG 0x0337
#define EXP_MMAXH_REG 0x0338
#define EXP_MMAXM_REG 0x0339
#define EXP_MMAXL_REG 0x033A
/* Page 4 - Auto White Balance */
#define AWB_CTL_REG(n) (0x0410 + (n))
#define AWB_ENABE 0x80
#define AWB_WGHT_REG 0x0419
#define BGAIN_PAR_REG(n) (0x044F + (n))
/* Manual white balance, when AWB_CTL2[0]=1 */
#define MWB_RGAIN_REG 0x0466
#define MWB_BGAIN_REG 0x0467
/* The token to mark an array end */
#define REG_TERM 0xFFFF
struct noon010_format {
enum v4l2_mbus_pixelcode code;
enum v4l2_colorspace colorspace;
u16 ispctl1_reg;
};
struct noon010_frmsize {
u16 width;
u16 height;
int vid_ctl1;
};
static const char * const noon010_supply_name[] = {
"vdd_core", "vddio", "vdda"
};
#define NOON010_NUM_SUPPLIES ARRAY_SIZE(noon010_supply_name)
struct noon010_info {
struct v4l2_subdev sd;
struct v4l2_ctrl_handler hdl;
const struct noon010pc30_platform_data *pdata;
const struct noon010_format *curr_fmt;
const struct noon010_frmsize *curr_win;
unsigned int hflip:1;
unsigned int vflip:1;
unsigned int power:1;
u8 i2c_reg_page;
struct regulator_bulk_data supply[NOON010_NUM_SUPPLIES];
u32 gpio_nreset;
u32 gpio_nstby;
};
struct i2c_regval {
u16 addr;
u16 val;
};
/* Supported resolutions. */
static const struct noon010_frmsize noon010_sizes[] = {
{
.width = 352,
.height = 288,
.vid_ctl1 = 0,
}, {
.width = 176,
.height = 144,
.vid_ctl1 = 0x10,
}, {
.width = 88,
.height = 72,
.vid_ctl1 = 0x20,
},
};
/* Supported pixel formats. */
static const struct noon010_format noon010_formats[] = {
{
.code = V4L2_MBUS_FMT_YUYV8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x03,
}, {
.code = V4L2_MBUS_FMT_YVYU8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x02,
}, {
.code = V4L2_MBUS_FMT_VYUY8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0,
}, {
.code = V4L2_MBUS_FMT_UYVY8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x01,
}, {
.code = V4L2_MBUS_FMT_RGB565_2X8_BE,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x40,
},
};
static const struct i2c_regval noon010_base_regs[] = {
{ WIN_COLL_REG, 0x06 }, { HBLANKL_REG, 0x7C },
/* Color corection and saturation */
{ ISP_CTL_REG(0), 0x30 }, { ISP_CTL_REG(2), 0x30 },
{ YOFS_REG, 0x80 }, { DARK_YOFS_REG, 0x04 },
{ SAT_CTL_REG, 0x1F }, { BSAT_REG, 0x90 },
{ CMC_CTL_REG, 0x0F }, { CMC_OFSGH_REG, 0x3C },
{ CMC_OFSGL_REG, 0x2C }, { CMC_SIGN_REG, 0x3F },
{ CMC_COEF_REG(0), 0x79 }, { CMC_OFS_REG(0), 0x00 },
{ CMC_COEF_REG(1), 0x39 }, { CMC_OFS_REG(1), 0x00 },
{ CMC_COEF_REG(2), 0x00 }, { CMC_OFS_REG(2), 0x00 },
{ CMC_COEF_REG(3), 0x11 }, { CMC_OFS_REG(3), 0x8B },
{ CMC_COEF_REG(4), 0x65 }, { CMC_OFS_REG(4), 0x07 },
{ CMC_COEF_REG(5), 0x14 }, { CMC_OFS_REG(5), 0x04 },
{ CMC_COEF_REG(6), 0x01 }, { CMC_OFS_REG(6), 0x9C },
{ CMC_COEF_REG(7), 0x33 }, { CMC_OFS_REG(7), 0x89 },
{ CMC_COEF_REG(8), 0x74 }, { CMC_OFS_REG(8), 0x25 },
/* Automatic white balance */
{ AWB_CTL_REG(0), 0x78 }, { AWB_CTL_REG(1), 0x2E },
{ AWB_CTL_REG(2), 0x20 }, { AWB_CTL_REG(3), 0x85 },
/* Auto exposure */
{ AE_CTL_REG(0), 0xDC }, { AE_CTL_REG(1), 0x81 },
{ AE_CTL_REG(2), 0x30 }, { AE_CTL_REG(3), 0xA5 },
{ AE_CTL_REG(4), 0x40 }, { AE_CTL_REG(5), 0x51 },
{ AE_CTL_REG(6), 0x33 }, { AE_CTL_REG(7), 0x7E },
{ AE_CTL9_REG, 0x00 }, { AE_CTL10_REG, 0x02 },
{ AE_YLVL_REG, 0x44 }, { AE_YTH_REG(0), 0x34 },
{ AE_YTH_REG(1), 0x30 }, { AE_WGT_REG, 0xD5 },
/* Lens shading compensation */
{ LENS_CTRL_REG, 0x01 }, { LENS_XCEN_REG, 0x80 },
{ LENS_YCEN_REG, 0x70 }, { LENS_RC_REG, 0x53 },
{ LENS_GC_REG, 0x40 }, { LENS_BC_REG, 0x3E },
{ REG_TERM, 0 },
};
static inline struct noon010_info *to_noon010(struct v4l2_subdev *sd)
{
return container_of(sd, struct noon010_info, sd);
}
static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
{
return &container_of(ctrl->handler, struct noon010_info, hdl)->sd;
}
static inline int set_i2c_page(struct noon010_info *info,
struct i2c_client *client, unsigned int reg)
{
u32 page = reg >> 8 & 0xFF;
int ret = 0;
if (info->i2c_reg_page != page && (reg & 0xFF) != 0x03) {
ret = i2c_smbus_write_byte_data(client, PAGEMODE_REG, page);
if (!ret)
info->i2c_reg_page = page;
}
return ret;
}
static int cam_i2c_read(struct v4l2_subdev *sd, u32 reg_addr)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct noon010_info *info = to_noon010(sd);
int ret = set_i2c_page(info, client, reg_addr);
if (ret)
return ret;
return i2c_smbus_read_byte_data(client, reg_addr & 0xFF);
}
static int cam_i2c_write(struct v4l2_subdev *sd, u32 reg_addr, u32 val)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct noon010_info *info = to_noon010(sd);
int ret = set_i2c_page(info, client, reg_addr);
if (ret)
return ret;
return i2c_smbus_write_byte_data(client, reg_addr & 0xFF, val);
}
static inline int noon010_bulk_write_reg(struct v4l2_subdev *sd,
const struct i2c_regval *msg)
{
while (msg->addr != REG_TERM) {
int ret = cam_i2c_write(sd, msg->addr, msg->val);
if (ret)
return ret;
msg++;
}
return 0;
}
/* Device reset and sleep mode control */
static int noon010_power_ctrl(struct v4l2_subdev *sd, bool reset, bool sleep)
{
struct noon010_info *info = to_noon010(sd);
u8 reg = sleep ? 0xF1 : 0xF0;
int ret = 0;
if (reset)
ret = cam_i2c_write(sd, POWER_CTRL_REG, reg | 0x02);
if (!ret) {
ret = cam_i2c_write(sd, POWER_CTRL_REG, reg);
if (reset && !ret)
info->i2c_reg_page = -1;
}
return ret;
}
/* Automatic white balance control */
static int noon010_enable_autowhitebalance(struct v4l2_subdev *sd, int on)
{
int ret;
ret = cam_i2c_write(sd, AWB_CTL_REG(1), on ? 0x2E : 0x2F);
if (!ret)
ret = cam_i2c_write(sd, AWB_CTL_REG(0), on ? 0xFB : 0x7B);
return ret;
}
static int noon010_set_flip(struct v4l2_subdev *sd, int hflip, int vflip)
{
struct noon010_info *info = to_noon010(sd);
int reg, ret;
reg = cam_i2c_read(sd, VDO_CTL_REG(1));
if (reg < 0)
return reg;
reg &= 0x7C;
if (hflip)
reg |= 0x01;
if (vflip)
reg |= 0x02;
ret = cam_i2c_write(sd, VDO_CTL_REG(1), reg | 0x80);
if (!ret) {
info->hflip = hflip;
info->vflip = vflip;
}
return ret;
}
/* Configure resolution and color format */
static int noon010_set_params(struct v4l2_subdev *sd)
{
struct noon010_info *info = to_noon010(sd);
int ret;
if (!info->curr_win)
return -EINVAL;
ret = cam_i2c_write(sd, VDO_CTL_REG(0), info->curr_win->vid_ctl1);
if (!ret && info->curr_fmt)
ret = cam_i2c_write(sd, ISP_CTL_REG(0),
info->curr_fmt->ispctl1_reg);
return ret;
}
/* Find nearest matching image pixel size. */
static int noon010_try_frame_size(struct v4l2_mbus_framefmt *mf)
{
unsigned int min_err = ~0;
int i = ARRAY_SIZE(noon010_sizes);
const struct noon010_frmsize *fsize = &noon010_sizes[0],
*match = NULL;
while (i--) {
int err = abs(fsize->width - mf->width)
+ abs(fsize->height - mf->height);
if (err < min_err) {
min_err = err;
match = fsize;
}
fsize++;
}
if (match) {
mf->width = match->width;
mf->height = match->height;
return 0;
}
return -EINVAL;
}
static int power_enable(struct noon010_info *info)
{
int ret;
if (info->power) {
v4l2_info(&info->sd, "%s: sensor is already on\n", __func__);
return 0;
}
if (gpio_is_valid(info->gpio_nstby))
gpio_set_value(info->gpio_nstby, 0);
if (gpio_is_valid(info->gpio_nreset))
gpio_set_value(info->gpio_nreset, 0);
ret = regulator_bulk_enable(NOON010_NUM_SUPPLIES, info->supply);
if (ret)
return ret;
if (gpio_is_valid(info->gpio_nreset)) {
msleep(50);
gpio_set_value(info->gpio_nreset, 1);
}
if (gpio_is_valid(info->gpio_nstby)) {
udelay(1000);
gpio_set_value(info->gpio_nstby, 1);
}
if (gpio_is_valid(info->gpio_nreset)) {
udelay(1000);
gpio_set_value(info->gpio_nreset, 0);
msleep(100);
gpio_set_value(info->gpio_nreset, 1);
msleep(20);
}
info->power = 1;
v4l2_dbg(1, debug, &info->sd, "%s: sensor is on\n", __func__);
return 0;
}
static int power_disable(struct noon010_info *info)
{
int ret;
if (!info->power) {
v4l2_info(&info->sd, "%s: sensor is already off\n", __func__);
return 0;
}
ret = regulator_bulk_disable(NOON010_NUM_SUPPLIES, info->supply);
if (ret)
return ret;
if (gpio_is_valid(info->gpio_nstby))
gpio_set_value(info->gpio_nstby, 0);
if (gpio_is_valid(info->gpio_nreset))
gpio_set_value(info->gpio_nreset, 0);
info->power = 0;
v4l2_dbg(1, debug, &info->sd, "%s: sensor is off\n", __func__);
return 0;
}
static int noon010_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
v4l2_dbg(1, debug, sd, "%s: ctrl_id: %d, value: %d\n",
__func__, ctrl->id, ctrl->val);
switch (ctrl->id) {
case V4L2_CID_AUTO_WHITE_BALANCE:
return noon010_enable_autowhitebalance(sd, ctrl->val);
case V4L2_CID_BLUE_BALANCE:
return cam_i2c_write(sd, MWB_BGAIN_REG, ctrl->val);
case V4L2_CID_RED_BALANCE:
return cam_i2c_write(sd, MWB_RGAIN_REG, ctrl->val);
default:
return -EINVAL;
}
}
static int noon010_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
enum v4l2_mbus_pixelcode *code)
{
if (!code || index >= ARRAY_SIZE(noon010_formats))
return -EINVAL;
*code = noon010_formats[index].code;
return 0;
}
static int noon010_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
{
struct noon010_info *info = to_noon010(sd);
int ret;
if (!mf)
return -EINVAL;
if (!info->curr_win || !info->curr_fmt) {
ret = noon010_set_params(sd);
if (ret)
return ret;
}
mf->width = info->curr_win->width;
mf->height = info->curr_win->height;
mf->code = info->curr_fmt->code;
mf->colorspace = info->curr_fmt->colorspace;
mf->field = V4L2_FIELD_NONE;
return 0;
}
/* Return nearest media bus frame format. */
static const struct noon010_format *try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
int i = ARRAY_SIZE(noon010_formats);
noon010_try_frame_size(mf);
while (i--)
if (mf->code == noon010_formats[i].code)
break;
mf->code = noon010_formats[i].code;
return &noon010_formats[i];
}
static int noon010_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
if (!sd || !mf)
return -EINVAL;
try_fmt(sd, mf);
return 0;
}
static int noon010_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
struct noon010_info *info = to_noon010(sd);
if (!sd || !mf)
return -EINVAL;
info->curr_fmt = try_fmt(sd, mf);
return noon010_set_params(sd);
}
static int noon010_base_config(struct v4l2_subdev *sd)
{
struct noon010_info *info = to_noon010(sd);
int ret;
ret = noon010_bulk_write_reg(sd, noon010_base_regs);
if (!ret) {
info->curr_fmt = &noon010_formats[0];
info->curr_win = &noon010_sizes[0];
ret = noon010_set_params(sd);
}
if (!ret)
ret = noon010_set_flip(sd, 1, 0);
if (!ret)
ret = noon010_power_ctrl(sd, false, false);
/* sync the handler and the registers state */
v4l2_ctrl_handler_setup(&to_noon010(sd)->hdl);
return ret;
}
static int noon010_s_power(struct v4l2_subdev *sd, int on)
{
struct noon010_info *info = to_noon010(sd);
const struct noon010pc30_platform_data *pdata = info->pdata;
int ret = 0;
if (WARN(pdata == NULL, "No platform data!\n"))
return -ENOMEM;
if (on) {
ret = power_enable(info);
if (ret)
return ret;
ret = noon010_base_config(sd);
} else {
noon010_power_ctrl(sd, false, true);
ret = power_disable(info);
info->curr_win = NULL;
info->curr_fmt = NULL;
}
return ret;
}
static int noon010_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return v4l2_chip_ident_i2c_client(client, chip,
V4L2_IDENT_NOON010PC30, 0);
}
static int noon010_log_status(struct v4l2_subdev *sd)
{
struct noon010_info *info = to_noon010(sd);
v4l2_ctrl_handler_log_status(&info->hdl, sd->name);
return 0;
}
static const struct v4l2_ctrl_ops noon010_ctrl_ops = {
.s_ctrl = noon010_s_ctrl,
};
static const struct v4l2_subdev_core_ops noon010_core_ops = {
.g_chip_ident = noon010_g_chip_ident,
.s_power = noon010_s_power,
.g_ctrl = v4l2_subdev_g_ctrl,
.s_ctrl = v4l2_subdev_s_ctrl,
.queryctrl = v4l2_subdev_queryctrl,
.querymenu = v4l2_subdev_querymenu,
.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
.log_status = noon010_log_status,
};
static const struct v4l2_subdev_video_ops noon010_video_ops = {
.g_mbus_fmt = noon010_g_fmt,
.s_mbus_fmt = noon010_s_fmt,
.try_mbus_fmt = noon010_try_fmt,
.enum_mbus_fmt = noon010_enum_fmt,
};
static const struct v4l2_subdev_ops noon010_ops = {
.core = &noon010_core_ops,
.video = &noon010_video_ops,
};
/* Return 0 if NOON010PC30L sensor type was detected or -ENODEV otherwise. */
static int noon010_detect(struct i2c_client *client, struct noon010_info *info)
{
int ret;
ret = power_enable(info);
if (ret)
return ret;
ret = i2c_smbus_read_byte_data(client, DEVICE_ID_REG);
if (ret < 0)
dev_err(&client->dev, "I2C read failed: 0x%X\n", ret);
power_disable(info);
return ret == NOON010PC30_ID ? 0 : -ENODEV;
}
static int noon010_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct noon010_info *info;
struct v4l2_subdev *sd;
const struct noon010pc30_platform_data *pdata
= client->dev.platform_data;
int ret;
int i;
if (!pdata) {
dev_err(&client->dev, "No platform data!\n");
return -EIO;
}
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
sd = &info->sd;
strlcpy(sd->name, MODULE_NAME, sizeof(sd->name));
v4l2_i2c_subdev_init(sd, client, &noon010_ops);
v4l2_ctrl_handler_init(&info->hdl, 3);
v4l2_ctrl_new_std(&info->hdl, &noon010_ctrl_ops,
V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
v4l2_ctrl_new_std(&info->hdl, &noon010_ctrl_ops,
V4L2_CID_RED_BALANCE, 0, 127, 1, 64);
v4l2_ctrl_new_std(&info->hdl, &noon010_ctrl_ops,
V4L2_CID_BLUE_BALANCE, 0, 127, 1, 64);
sd->ctrl_handler = &info->hdl;
ret = info->hdl.error;
if (ret)
goto np_err;
info->pdata = client->dev.platform_data;
info->i2c_reg_page = -1;
info->gpio_nreset = -EINVAL;
info->gpio_nstby = -EINVAL;
if (gpio_is_valid(pdata->gpio_nreset)) {
ret = gpio_request(pdata->gpio_nreset, "NOON010PC30 NRST");
if (ret) {
dev_err(&client->dev, "GPIO request error: %d\n", ret);
goto np_err;
}
info->gpio_nreset = pdata->gpio_nreset;
gpio_direction_output(info->gpio_nreset, 0);
gpio_export(info->gpio_nreset, 0);
}
if (gpio_is_valid(pdata->gpio_nstby)) {
ret = gpio_request(pdata->gpio_nstby, "NOON010PC30 NSTBY");
if (ret) {
dev_err(&client->dev, "GPIO request error: %d\n", ret);
goto np_gpio_err;
}
info->gpio_nstby = pdata->gpio_nstby;
gpio_direction_output(info->gpio_nstby, 0);
gpio_export(info->gpio_nstby, 0);
}
for (i = 0; i < NOON010_NUM_SUPPLIES; i++)
info->supply[i].supply = noon010_supply_name[i];
ret = regulator_bulk_get(&client->dev, NOON010_NUM_SUPPLIES,
info->supply);
if (ret)
goto np_reg_err;
ret = noon010_detect(client, info);
if (!ret)
return 0;
/* the sensor detection failed */
regulator_bulk_free(NOON010_NUM_SUPPLIES, info->supply);
np_reg_err:
if (gpio_is_valid(info->gpio_nstby))
gpio_free(info->gpio_nstby);
np_gpio_err:
if (gpio_is_valid(info->gpio_nreset))
gpio_free(info->gpio_nreset);
np_err:
v4l2_ctrl_handler_free(&info->hdl);
v4l2_device_unregister_subdev(sd);
kfree(info);
return ret;
}
static int noon010_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct noon010_info *info = to_noon010(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&info->hdl);
regulator_bulk_free(NOON010_NUM_SUPPLIES, info->supply);
if (gpio_is_valid(info->gpio_nreset))
gpio_free(info->gpio_nreset);
if (gpio_is_valid(info->gpio_nstby))
gpio_free(info->gpio_nstby);
kfree(info);
return 0;
}
static const struct i2c_device_id noon010_id[] = {
{ MODULE_NAME, 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, noon010_id);
static struct i2c_driver noon010_i2c_driver = {
.driver = {
.name = MODULE_NAME
},
.probe = noon010_probe,
.remove = noon010_remove,
.id_table = noon010_id,
};
static int __init noon010_init(void)
{
return i2c_add_driver(&noon010_i2c_driver);
}
static void __exit noon010_exit(void)
{
i2c_del_driver(&noon010_i2c_driver);
}
module_init(noon010_init);
module_exit(noon010_exit);
MODULE_DESCRIPTION("Siliconfile NOON010PC30 camera driver");
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
sgs3/SGH-I747M_Kernel | drivers/mfd/twl4030-irq.c | 2933 | 22035 | /*
* twl4030-irq.c - TWL4030/TPS659x0 irq support
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* Modifications to defer interrupt handling to a kernel thread:
* Copyright (C) 2006 MontaVista Software, Inc.
*
* Based on tlv320aic23.c:
* Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
*
* Code cleanup and modifications to IRQ handler.
* by syed khasim <x0khasim@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/i2c/twl.h>
#include "twl-core.h"
/*
* TWL4030 IRQ handling has two stages in hardware, and thus in software.
* The Primary Interrupt Handler (PIH) stage exposes status bits saying
* which Secondary Interrupt Handler (SIH) stage is raising an interrupt.
* SIH modules are more traditional IRQ components, which support per-IRQ
* enable/disable and trigger controls; they do most of the work.
*
* These chips are designed to support IRQ handling from two different
* I2C masters. Each has a dedicated IRQ line, and dedicated IRQ status
* and mask registers in the PIH and SIH modules.
*
* We set up IRQs starting at a platform-specified base, always starting
* with PIH and the SIH for PWR_INT and then usually adding GPIO:
* base + 0 .. base + 7 PIH
* base + 8 .. base + 15 SIH for PWR_INT
* base + 16 .. base + 33 SIH for GPIO
*/
/* PIH register offsets */
#define REG_PIH_ISR_P1 0x01
#define REG_PIH_ISR_P2 0x02
#define REG_PIH_SIR 0x03 /* for testing */
/* Linux could (eventually) use either IRQ line */
static int irq_line;
struct sih {
char name[8];
u8 module; /* module id */
u8 control_offset; /* for SIH_CTRL */
bool set_cor;
u8 bits; /* valid in isr/imr */
u8 bytes_ixr; /* bytelen of ISR/IMR/SIR */
u8 edr_offset;
u8 bytes_edr; /* bytelen of EDR */
u8 irq_lines; /* number of supported irq lines */
/* SIR ignored -- set interrupt, for testing only */
struct sih_irq_data {
u8 isr_offset;
u8 imr_offset;
} mask[2];
/* + 2 bytes padding */
};
static const struct sih *sih_modules;
static int nr_sih_modules;
#define SIH_INITIALIZER(modname, nbits) \
.module = TWL4030_MODULE_ ## modname, \
.control_offset = TWL4030_ ## modname ## _SIH_CTRL, \
.bits = nbits, \
.bytes_ixr = DIV_ROUND_UP(nbits, 8), \
.edr_offset = TWL4030_ ## modname ## _EDR, \
.bytes_edr = DIV_ROUND_UP((2*(nbits)), 8), \
.irq_lines = 2, \
.mask = { { \
.isr_offset = TWL4030_ ## modname ## _ISR1, \
.imr_offset = TWL4030_ ## modname ## _IMR1, \
}, \
{ \
.isr_offset = TWL4030_ ## modname ## _ISR2, \
.imr_offset = TWL4030_ ## modname ## _IMR2, \
}, },
/* register naming policies are inconsistent ... */
#define TWL4030_INT_PWR_EDR TWL4030_INT_PWR_EDR1
#define TWL4030_MODULE_KEYPAD_KEYP TWL4030_MODULE_KEYPAD
#define TWL4030_MODULE_INT_PWR TWL4030_MODULE_INT
/* Order in this table matches order in PIH_ISR. That is,
* BIT(n) in PIH_ISR is sih_modules[n].
*/
/* sih_modules_twl4030 is used both in twl4030 and twl5030 */
static const struct sih sih_modules_twl4030[6] = {
[0] = {
.name = "gpio",
.module = TWL4030_MODULE_GPIO,
.control_offset = REG_GPIO_SIH_CTRL,
.set_cor = true,
.bits = TWL4030_GPIO_MAX,
.bytes_ixr = 3,
/* Note: *all* of these IRQs default to no-trigger */
.edr_offset = REG_GPIO_EDR1,
.bytes_edr = 5,
.irq_lines = 2,
.mask = { {
.isr_offset = REG_GPIO_ISR1A,
.imr_offset = REG_GPIO_IMR1A,
}, {
.isr_offset = REG_GPIO_ISR1B,
.imr_offset = REG_GPIO_IMR1B,
}, },
},
[1] = {
.name = "keypad",
.set_cor = true,
SIH_INITIALIZER(KEYPAD_KEYP, 4)
},
[2] = {
.name = "bci",
.module = TWL4030_MODULE_INTERRUPTS,
.control_offset = TWL4030_INTERRUPTS_BCISIHCTRL,
.set_cor = true,
.bits = 12,
.bytes_ixr = 2,
.edr_offset = TWL4030_INTERRUPTS_BCIEDR1,
/* Note: most of these IRQs default to no-trigger */
.bytes_edr = 3,
.irq_lines = 2,
.mask = { {
.isr_offset = TWL4030_INTERRUPTS_BCIISR1A,
.imr_offset = TWL4030_INTERRUPTS_BCIIMR1A,
}, {
.isr_offset = TWL4030_INTERRUPTS_BCIISR1B,
.imr_offset = TWL4030_INTERRUPTS_BCIIMR1B,
}, },
},
[3] = {
.name = "madc",
SIH_INITIALIZER(MADC, 4)
},
[4] = {
/* USB doesn't use the same SIH organization */
.name = "usb",
},
[5] = {
.name = "power",
.set_cor = true,
SIH_INITIALIZER(INT_PWR, 8)
},
/* there are no SIH modules #6 or #7 ... */
};
static const struct sih sih_modules_twl5031[8] = {
[0] = {
.name = "gpio",
.module = TWL4030_MODULE_GPIO,
.control_offset = REG_GPIO_SIH_CTRL,
.set_cor = true,
.bits = TWL4030_GPIO_MAX,
.bytes_ixr = 3,
/* Note: *all* of these IRQs default to no-trigger */
.edr_offset = REG_GPIO_EDR1,
.bytes_edr = 5,
.irq_lines = 2,
.mask = { {
.isr_offset = REG_GPIO_ISR1A,
.imr_offset = REG_GPIO_IMR1A,
}, {
.isr_offset = REG_GPIO_ISR1B,
.imr_offset = REG_GPIO_IMR1B,
}, },
},
[1] = {
.name = "keypad",
.set_cor = true,
SIH_INITIALIZER(KEYPAD_KEYP, 4)
},
[2] = {
.name = "bci",
.module = TWL5031_MODULE_INTERRUPTS,
.control_offset = TWL5031_INTERRUPTS_BCISIHCTRL,
.bits = 7,
.bytes_ixr = 1,
.edr_offset = TWL5031_INTERRUPTS_BCIEDR1,
/* Note: most of these IRQs default to no-trigger */
.bytes_edr = 2,
.irq_lines = 2,
.mask = { {
.isr_offset = TWL5031_INTERRUPTS_BCIISR1,
.imr_offset = TWL5031_INTERRUPTS_BCIIMR1,
}, {
.isr_offset = TWL5031_INTERRUPTS_BCIISR2,
.imr_offset = TWL5031_INTERRUPTS_BCIIMR2,
}, },
},
[3] = {
.name = "madc",
SIH_INITIALIZER(MADC, 4)
},
[4] = {
/* USB doesn't use the same SIH organization */
.name = "usb",
},
[5] = {
.name = "power",
.set_cor = true,
SIH_INITIALIZER(INT_PWR, 8)
},
[6] = {
/*
* ECI/DBI doesn't use the same SIH organization.
* For example, it supports only one interrupt output line.
* That is, the interrupts are seen on both INT1 and INT2 lines.
*/
.name = "eci_dbi",
.module = TWL5031_MODULE_ACCESSORY,
.bits = 9,
.bytes_ixr = 2,
.irq_lines = 1,
.mask = { {
.isr_offset = TWL5031_ACIIDR_LSB,
.imr_offset = TWL5031_ACIIMR_LSB,
}, },
},
[7] = {
/* Audio accessory */
.name = "audio",
.module = TWL5031_MODULE_ACCESSORY,
.control_offset = TWL5031_ACCSIHCTRL,
.bits = 2,
.bytes_ixr = 1,
.edr_offset = TWL5031_ACCEDR1,
/* Note: most of these IRQs default to no-trigger */
.bytes_edr = 1,
.irq_lines = 2,
.mask = { {
.isr_offset = TWL5031_ACCISR1,
.imr_offset = TWL5031_ACCIMR1,
}, {
.isr_offset = TWL5031_ACCISR2,
.imr_offset = TWL5031_ACCIMR2,
}, },
},
};
#undef TWL4030_MODULE_KEYPAD_KEYP
#undef TWL4030_MODULE_INT_PWR
#undef TWL4030_INT_PWR_EDR
/*----------------------------------------------------------------------*/
static unsigned twl4030_irq_base;
static struct completion irq_event;
/*
* This thread processes interrupts reported by the Primary Interrupt Handler.
*/
static int twl4030_irq_thread(void *data)
{
long irq = (long)data;
static unsigned i2c_errors;
static const unsigned max_i2c_errors = 100;
current->flags |= PF_NOFREEZE;
while (!kthread_should_stop()) {
int ret;
int module_irq;
u8 pih_isr;
/* Wait for IRQ, then read PIH irq status (also blocking) */
wait_for_completion_interruptible(&irq_event);
ret = twl_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
REG_PIH_ISR_P1);
if (ret) {
pr_warning("twl4030: I2C error %d reading PIH ISR\n",
ret);
if (++i2c_errors >= max_i2c_errors) {
printk(KERN_ERR "Maximum I2C error count"
" exceeded. Terminating %s.\n",
__func__);
break;
}
complete(&irq_event);
continue;
}
/* these handlers deal with the relevant SIH irq status */
local_irq_disable();
for (module_irq = twl4030_irq_base;
pih_isr;
pih_isr >>= 1, module_irq++) {
if (pih_isr & 0x1)
generic_handle_irq(module_irq);
}
local_irq_enable();
enable_irq(irq);
}
return 0;
}
/*
* handle_twl4030_pih() is the desc->handle method for the twl4030 interrupt.
* This is a chained interrupt, so there is no desc->action method for it.
* Now we need to query the interrupt controller in the twl4030 to determine
* which module is generating the interrupt request. However, we can't do i2c
* transactions in interrupt context, so we must defer that work to a kernel
* thread. All we do here is acknowledge and mask the interrupt and wakeup
* the kernel thread.
*/
static irqreturn_t handle_twl4030_pih(int irq, void *devid)
{
/* Acknowledge, clear *AND* mask the interrupt... */
disable_irq_nosync(irq);
complete(devid);
return IRQ_HANDLED;
}
/*----------------------------------------------------------------------*/
/*
* twl4030_init_sih_modules() ... start from a known state where no
* IRQs will be coming in, and where we can quickly enable them then
* handle them as they arrive. Mask all IRQs: maybe init SIH_CTRL.
*
* NOTE: we don't touch EDR registers here; they stay with hardware
* defaults or whatever the last value was. Note that when both EDR
* bits for an IRQ are clear, that's as if its IMR bit is set...
*/
static int twl4030_init_sih_modules(unsigned line)
{
const struct sih *sih;
u8 buf[4];
int i;
int status;
/* line 0 == int1_n signal; line 1 == int2_n signal */
if (line > 1)
return -EINVAL;
irq_line = line;
/* disable all interrupts on our line */
memset(buf, 0xff, sizeof buf);
sih = sih_modules;
for (i = 0; i < nr_sih_modules; i++, sih++) {
/* skip USB -- it's funky */
if (!sih->bytes_ixr)
continue;
/* Not all the SIH modules support multiple interrupt lines */
if (sih->irq_lines <= line)
continue;
status = twl_i2c_write(sih->module, buf,
sih->mask[line].imr_offset, sih->bytes_ixr);
if (status < 0)
pr_err("twl4030: err %d initializing %s %s\n",
status, sih->name, "IMR");
/* Maybe disable "exclusive" mode; buffer second pending irq;
* set Clear-On-Read (COR) bit.
*
* NOTE that sometimes COR polarity is documented as being
* inverted: for MADC, COR=1 means "clear on write".
* And for PWR_INT it's not documented...
*/
if (sih->set_cor) {
status = twl_i2c_write_u8(sih->module,
TWL4030_SIH_CTRL_COR_MASK,
sih->control_offset);
if (status < 0)
pr_err("twl4030: err %d initializing %s %s\n",
status, sih->name, "SIH_CTRL");
}
}
sih = sih_modules;
for (i = 0; i < nr_sih_modules; i++, sih++) {
u8 rxbuf[4];
int j;
/* skip USB */
if (!sih->bytes_ixr)
continue;
/* Not all the SIH modules support multiple interrupt lines */
if (sih->irq_lines <= line)
continue;
/* Clear pending interrupt status. Either the read was
* enough, or we need to write those bits. Repeat, in
* case an IRQ is pending (PENDDIS=0) ... that's not
* uncommon with PWR_INT.PWRON.
*/
for (j = 0; j < 2; j++) {
status = twl_i2c_read(sih->module, rxbuf,
sih->mask[line].isr_offset, sih->bytes_ixr);
if (status < 0)
pr_err("twl4030: err %d initializing %s %s\n",
status, sih->name, "ISR");
if (!sih->set_cor)
status = twl_i2c_write(sih->module, buf,
sih->mask[line].isr_offset,
sih->bytes_ixr);
/* else COR=1 means read sufficed.
* (for most SIH modules...)
*/
}
}
return 0;
}
static inline void activate_irq(int irq)
{
#ifdef CONFIG_ARM
/* ARM requires an extra step to clear IRQ_NOREQUEST, which it
* sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
*/
set_irq_flags(irq, IRQF_VALID);
#else
/* same effect on other architectures */
irq_set_noprobe(irq);
#endif
}
/*----------------------------------------------------------------------*/
static DEFINE_SPINLOCK(sih_agent_lock);
static struct workqueue_struct *wq;
struct sih_agent {
int irq_base;
const struct sih *sih;
u32 imr;
bool imr_change_pending;
struct work_struct mask_work;
u32 edge_change;
struct work_struct edge_work;
};
static void twl4030_sih_do_mask(struct work_struct *work)
{
struct sih_agent *agent;
const struct sih *sih;
union {
u8 bytes[4];
u32 word;
} imr;
int status;
agent = container_of(work, struct sih_agent, mask_work);
/* see what work we have */
spin_lock_irq(&sih_agent_lock);
if (agent->imr_change_pending) {
sih = agent->sih;
/* byte[0] gets overwritten as we write ... */
imr.word = cpu_to_le32(agent->imr << 8);
agent->imr_change_pending = false;
} else
sih = NULL;
spin_unlock_irq(&sih_agent_lock);
if (!sih)
return;
/* write the whole mask ... simpler than subsetting it */
status = twl_i2c_write(sih->module, imr.bytes,
sih->mask[irq_line].imr_offset, sih->bytes_ixr);
if (status)
pr_err("twl4030: %s, %s --> %d\n", __func__,
"write", status);
}
static void twl4030_sih_do_edge(struct work_struct *work)
{
struct sih_agent *agent;
const struct sih *sih;
u8 bytes[6];
u32 edge_change;
int status;
agent = container_of(work, struct sih_agent, edge_work);
/* see what work we have */
spin_lock_irq(&sih_agent_lock);
edge_change = agent->edge_change;
agent->edge_change = 0;
sih = edge_change ? agent->sih : NULL;
spin_unlock_irq(&sih_agent_lock);
if (!sih)
return;
/* Read, reserving first byte for write scratch. Yes, this
* could be cached for some speedup ... but be careful about
* any processor on the other IRQ line, EDR registers are
* shared.
*/
status = twl_i2c_read(sih->module, bytes + 1,
sih->edr_offset, sih->bytes_edr);
if (status) {
pr_err("twl4030: %s, %s --> %d\n", __func__,
"read", status);
return;
}
/* Modify only the bits we know must change */
while (edge_change) {
int i = fls(edge_change) - 1;
struct irq_data *idata = irq_get_irq_data(i + agent->irq_base);
int byte = 1 + (i >> 2);
int off = (i & 0x3) * 2;
unsigned int type;
bytes[byte] &= ~(0x03 << off);
type = irqd_get_trigger_type(idata);
if (type & IRQ_TYPE_EDGE_RISING)
bytes[byte] |= BIT(off + 1);
if (type & IRQ_TYPE_EDGE_FALLING)
bytes[byte] |= BIT(off + 0);
edge_change &= ~BIT(i);
}
/* Write */
status = twl_i2c_write(sih->module, bytes,
sih->edr_offset, sih->bytes_edr);
if (status)
pr_err("twl4030: %s, %s --> %d\n", __func__,
"write", status);
}
/*----------------------------------------------------------------------*/
/*
* All irq_chip methods get issued from code holding irq_desc[irq].lock,
* which can't perform the underlying I2C operations (because they sleep).
* So we must hand them off to a thread (workqueue) and cope with asynch
* completion, potentially including some re-ordering, of these requests.
*/
static void twl4030_sih_mask(struct irq_data *data)
{
struct sih_agent *sih = irq_data_get_irq_chip_data(data);
unsigned long flags;
spin_lock_irqsave(&sih_agent_lock, flags);
sih->imr |= BIT(data->irq - sih->irq_base);
sih->imr_change_pending = true;
queue_work(wq, &sih->mask_work);
spin_unlock_irqrestore(&sih_agent_lock, flags);
}
static void twl4030_sih_unmask(struct irq_data *data)
{
struct sih_agent *sih = irq_data_get_irq_chip_data(data);
unsigned long flags;
spin_lock_irqsave(&sih_agent_lock, flags);
sih->imr &= ~BIT(data->irq - sih->irq_base);
sih->imr_change_pending = true;
queue_work(wq, &sih->mask_work);
spin_unlock_irqrestore(&sih_agent_lock, flags);
}
static int twl4030_sih_set_type(struct irq_data *data, unsigned trigger)
{
struct sih_agent *sih = irq_data_get_irq_chip_data(data);
unsigned long flags;
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
return -EINVAL;
spin_lock_irqsave(&sih_agent_lock, flags);
if (irqd_get_trigger_type(data) != trigger) {
sih->edge_change |= BIT(data->irq - sih->irq_base);
queue_work(wq, &sih->edge_work);
}
spin_unlock_irqrestore(&sih_agent_lock, flags);
return 0;
}
static struct irq_chip twl4030_sih_irq_chip = {
.name = "twl4030",
.irq_mask = twl4030_sih_mask,
.irq_unmask = twl4030_sih_unmask,
.irq_set_type = twl4030_sih_set_type,
};
/*----------------------------------------------------------------------*/
static inline int sih_read_isr(const struct sih *sih)
{
int status;
union {
u8 bytes[4];
u32 word;
} isr;
/* FIXME need retry-on-error ... */
isr.word = 0;
status = twl_i2c_read(sih->module, isr.bytes,
sih->mask[irq_line].isr_offset, sih->bytes_ixr);
return (status < 0) ? status : le32_to_cpu(isr.word);
}
/*
* Generic handler for SIH interrupts ... we "know" this is called
* in task context, with IRQs enabled.
*/
static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
{
struct sih_agent *agent = irq_get_handler_data(irq);
const struct sih *sih = agent->sih;
int isr;
/* reading ISR acks the IRQs, using clear-on-read mode */
local_irq_enable();
isr = sih_read_isr(sih);
local_irq_disable();
if (isr < 0) {
pr_err("twl4030: %s SIH, read ISR error %d\n",
sih->name, isr);
/* REVISIT: recover; eventually mask it all, etc */
return;
}
while (isr) {
irq = fls(isr);
irq--;
isr &= ~BIT(irq);
if (irq < sih->bits)
generic_handle_irq(agent->irq_base + irq);
else
pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
sih->name, irq);
}
}
static unsigned twl4030_irq_next;
/* returns the first IRQ used by this SIH bank,
* or negative errno
*/
int twl4030_sih_setup(int module)
{
int sih_mod;
const struct sih *sih = NULL;
struct sih_agent *agent;
int i, irq;
int status = -EINVAL;
unsigned irq_base = twl4030_irq_next;
/* only support modules with standard clear-on-read for now */
for (sih_mod = 0, sih = sih_modules;
sih_mod < nr_sih_modules;
sih_mod++, sih++) {
if (sih->module == module && sih->set_cor) {
if (!WARN((irq_base + sih->bits) > NR_IRQS,
"irq %d for %s too big\n",
irq_base + sih->bits,
sih->name))
status = 0;
break;
}
}
if (status < 0)
return status;
agent = kzalloc(sizeof *agent, GFP_KERNEL);
if (!agent)
return -ENOMEM;
status = 0;
agent->irq_base = irq_base;
agent->sih = sih;
agent->imr = ~0;
INIT_WORK(&agent->mask_work, twl4030_sih_do_mask);
INIT_WORK(&agent->edge_work, twl4030_sih_do_edge);
for (i = 0; i < sih->bits; i++) {
irq = irq_base + i;
irq_set_chip_and_handler(irq, &twl4030_sih_irq_chip,
handle_edge_irq);
irq_set_chip_data(irq, agent);
activate_irq(irq);
}
status = irq_base;
twl4030_irq_next += i;
/* replace generic PIH handler (handle_simple_irq) */
irq = sih_mod + twl4030_irq_base;
irq_set_handler_data(irq, agent);
irq_set_chained_handler(irq, handle_twl4030_sih);
pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
irq, irq_base, twl4030_irq_next - 1);
return status;
}
/* FIXME need a call to reverse twl4030_sih_setup() ... */
/*----------------------------------------------------------------------*/
/* FIXME pass in which interrupt line we'll use ... */
#define twl_irq_line 0
int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
{
static struct irq_chip twl4030_irq_chip;
int status;
int i;
struct task_struct *task;
/*
* Mask and clear all TWL4030 interrupts since initially we do
* not have any TWL4030 module interrupt handlers present
*/
status = twl4030_init_sih_modules(twl_irq_line);
if (status < 0)
return status;
wq = create_singlethread_workqueue("twl4030-irqchip");
if (!wq) {
pr_err("twl4030: workqueue FAIL\n");
return -ESRCH;
}
twl4030_irq_base = irq_base;
/* install an irq handler for each of the SIH modules;
* clone dummy irq_chip since PIH can't *do* anything
*/
twl4030_irq_chip = dummy_irq_chip;
twl4030_irq_chip.name = "twl4030";
twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
for (i = irq_base; i < irq_end; i++) {
irq_set_chip_and_handler(i, &twl4030_irq_chip,
handle_simple_irq);
activate_irq(i);
}
twl4030_irq_next = i;
pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", "PIH",
irq_num, irq_base, twl4030_irq_next - 1);
/* ... and the PWR_INT module ... */
status = twl4030_sih_setup(TWL4030_MODULE_INT);
if (status < 0) {
pr_err("twl4030: sih_setup PWR INT --> %d\n", status);
goto fail;
}
/* install an irq handler to demultiplex the TWL4030 interrupt */
init_completion(&irq_event);
status = request_irq(irq_num, handle_twl4030_pih, IRQF_DISABLED,
"TWL4030-PIH", &irq_event);
if (status < 0) {
pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
goto fail_rqirq;
}
task = kthread_run(twl4030_irq_thread, (void *)(long)irq_num,
"twl4030-irq");
if (IS_ERR(task)) {
pr_err("twl4030: could not create irq %d thread!\n", irq_num);
status = PTR_ERR(task);
goto fail_kthread;
}
return status;
fail_kthread:
free_irq(irq_num, &irq_event);
fail_rqirq:
/* clean up twl4030_sih_setup */
fail:
for (i = irq_base; i < irq_end; i++)
irq_set_chip_and_handler(i, NULL, NULL);
destroy_workqueue(wq);
wq = NULL;
return status;
}
int twl4030_exit_irq(void)
{
/* FIXME undo twl_init_irq() */
if (twl4030_irq_base) {
pr_err("twl4030: can't yet clean up IRQs?\n");
return -ENOSYS;
}
return 0;
}
int twl4030_init_chip_irq(const char *chip)
{
if (!strcmp(chip, "twl5031")) {
sih_modules = sih_modules_twl5031;
nr_sih_modules = ARRAY_SIZE(sih_modules_twl5031);
} else {
sih_modules = sih_modules_twl4030;
nr_sih_modules = ARRAY_SIZE(sih_modules_twl4030);
}
return 0;
}
| gpl-2.0 |
hafidzduddin/codina | arch/arm/mach-pxa/clock-pxa2xx.c | 2933 | 1055 | /*
* linux/arch/arm/mach-pxa/clock-pxa2xx.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/syscore_ops.h>
#include <mach/pxa2xx-regs.h>
#include "clock.h"
void clk_pxa2xx_cken_enable(struct clk *clk)
{
CKEN |= 1 << clk->cken;
}
void clk_pxa2xx_cken_disable(struct clk *clk)
{
CKEN &= ~(1 << clk->cken);
}
const struct clkops clk_pxa2xx_cken_ops = {
.enable = clk_pxa2xx_cken_enable,
.disable = clk_pxa2xx_cken_disable,
};
#ifdef CONFIG_PM
static uint32_t saved_cken;
static int pxa2xx_clock_suspend(void)
{
saved_cken = CKEN;
return 0;
}
static void pxa2xx_clock_resume(void)
{
CKEN = saved_cken;
}
#else
#define pxa2xx_clock_suspend NULL
#define pxa2xx_clock_resume NULL
#endif
struct syscore_ops pxa2xx_clock_syscore_ops = {
.suspend = pxa2xx_clock_suspend,
.resume = pxa2xx_clock_resume,
};
| gpl-2.0 |
varunchitre15/android_kernel_sony_msm8930 | arch/mips/kernel/module.c | 4725 | 9794 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Copyright (C) 2001 Rusty Russell.
* Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2005 Thiemo Seufer
*/
#undef DEBUG
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/jump_label.h>
#include <asm/pgtable.h> /* MODULE_START */
struct mips_hi16 {
struct mips_hi16 *next;
Elf_Addr *addr;
Elf_Addr value;
};
static struct mips_hi16 *mips_hi16_list;
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
#ifdef MODULE_START
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
GFP_KERNEL, PAGE_KERNEL, -1,
__builtin_return_address(0));
}
#endif
static int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
{
return 0;
}
static int apply_r_mips_32_rel(struct module *me, u32 *location, Elf_Addr v)
{
*location += v;
return 0;
}
static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v)
{
*location = v;
return 0;
}
static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
{
if (v % 4) {
pr_err("module %s: dangerous R_MIPS_26 REL relocation\n",
me->name);
return -ENOEXEC;
}
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
printk(KERN_ERR
"module %s: relocation overflow\n",
me->name);
return -ENOEXEC;
}
*location = (*location & ~0x03ffffff) |
((*location + (v >> 2)) & 0x03ffffff);
return 0;
}
static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
{
if (v % 4) {
pr_err("module %s: dangerous R_MIPS_26 RELArelocation\n",
me->name);
return -ENOEXEC;
}
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
printk(KERN_ERR
"module %s: relocation overflow\n",
me->name);
return -ENOEXEC;
}
*location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
return 0;
}
static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
{
struct mips_hi16 *n;
/*
* We cannot relocate this one now because we don't know the value of
* the carry we need to add. Save the information, and let LO16 do the
* actual relocation.
*/
n = kmalloc(sizeof *n, GFP_KERNEL);
if (!n)
return -ENOMEM;
n->addr = (Elf_Addr *)location;
n->value = v;
n->next = mips_hi16_list;
mips_hi16_list = n;
return 0;
}
static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
{
*location = (*location & 0xffff0000) |
((((long long) v + 0x8000LL) >> 16) & 0xffff);
return 0;
}
static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
{
unsigned long insnlo = *location;
Elf_Addr val, vallo;
/* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
if (mips_hi16_list != NULL) {
struct mips_hi16 *l;
l = mips_hi16_list;
while (l != NULL) {
struct mips_hi16 *next;
unsigned long insn;
/*
* The value for the HI16 had best be the same.
*/
if (v != l->value)
goto out_danger;
/*
* Do the HI16 relocation. Note that we actually don't
* need to know anything about the LO16 itself, except
* where to find the low 16 bits of the addend needed
* by the LO16.
*/
insn = *l->addr;
val = ((insn & 0xffff) << 16) + vallo;
val += v;
/*
* Account for the sign extension that will happen in
* the low bits.
*/
val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
insn = (insn & ~0xffff) | val;
*l->addr = insn;
next = l->next;
kfree(l);
l = next;
}
mips_hi16_list = NULL;
}
/*
* Ok, we're done with the HI16 relocs. Now deal with the LO16.
*/
val = v + vallo;
insnlo = (insnlo & ~0xffff) | (val & 0xffff);
*location = insnlo;
return 0;
out_danger:
pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name);
return -ENOEXEC;
}
static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v)
{
*location = (*location & 0xffff0000) | (v & 0xffff);
return 0;
}
static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v)
{
*(Elf_Addr *)location = v;
return 0;
}
static int apply_r_mips_higher_rela(struct module *me, u32 *location,
Elf_Addr v)
{
*location = (*location & 0xffff0000) |
((((long long) v + 0x80008000LL) >> 32) & 0xffff);
return 0;
}
static int apply_r_mips_highest_rela(struct module *me, u32 *location,
Elf_Addr v)
{
*location = (*location & 0xffff0000) |
((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
return 0;
}
static int (*reloc_handlers_rel[]) (struct module *me, u32 *location,
Elf_Addr v) = {
[R_MIPS_NONE] = apply_r_mips_none,
[R_MIPS_32] = apply_r_mips_32_rel,
[R_MIPS_26] = apply_r_mips_26_rel,
[R_MIPS_HI16] = apply_r_mips_hi16_rel,
[R_MIPS_LO16] = apply_r_mips_lo16_rel
};
static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
Elf_Addr v) = {
[R_MIPS_NONE] = apply_r_mips_none,
[R_MIPS_32] = apply_r_mips_32_rela,
[R_MIPS_26] = apply_r_mips_26_rela,
[R_MIPS_HI16] = apply_r_mips_hi16_rela,
[R_MIPS_LO16] = apply_r_mips_lo16_rela,
[R_MIPS_64] = apply_r_mips_64_rela,
[R_MIPS_HIGHER] = apply_r_mips_higher_rela,
[R_MIPS_HIGHEST] = apply_r_mips_highest_rela
};
int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr;
Elf_Sym *sym;
u32 *location;
unsigned int i;
Elf_Addr v;
int res;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to */
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_MIPS_R_SYM(rel[i]);
if (IS_ERR_VALUE(sym->st_value)) {
/* Ignore unresolved weak symbol */
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
continue;
printk(KERN_WARNING "%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
return -ENOENT;
}
v = sym->st_value;
res = reloc_handlers_rel[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
if (res)
return res;
}
return 0;
}
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
Elf_Sym *sym;
u32 *location;
unsigned int i;
Elf_Addr v;
int res;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to */
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_MIPS_R_SYM(rel[i]);
if (IS_ERR_VALUE(sym->st_value)) {
/* Ignore unresolved weak symbol */
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
continue;
printk(KERN_WARNING "%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
return -ENOENT;
}
v = sym->st_value + rel[i].r_addend;
res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
if (res)
return res;
}
return 0;
}
/* Given an address, look for it in the module exception tables. */
const struct exception_table_entry *search_module_dbetables(unsigned long addr)
{
unsigned long flags;
const struct exception_table_entry *e = NULL;
struct mod_arch_specific *dbe;
spin_lock_irqsave(&dbe_lock, flags);
list_for_each_entry(dbe, &dbe_list, dbe_list) {
e = search_extable(dbe->dbe_start, dbe->dbe_end - 1, addr);
if (e)
break;
}
spin_unlock_irqrestore(&dbe_lock, flags);
/* Now, if we found one, we are running inside it now, hence
we cannot unload the module, hence no refcnt needed. */
return e;
}
/* Put in dbe list if necessary. */
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
const Elf_Shdr *s;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
/* Make jump label nops. */
jump_label_apply_nops(me);
INIT_LIST_HEAD(&me->arch.dbe_list);
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (strcmp("__dbe_table", secstrings + s->sh_name) != 0)
continue;
me->arch.dbe_start = (void *)s->sh_addr;
me->arch.dbe_end = (void *)s->sh_addr + s->sh_size;
spin_lock_irq(&dbe_lock);
list_add(&me->arch.dbe_list, &dbe_list);
spin_unlock_irq(&dbe_lock);
}
return 0;
}
void module_arch_cleanup(struct module *mod)
{
spin_lock_irq(&dbe_lock);
list_del(&mod->arch.dbe_list);
spin_unlock_irq(&dbe_lock);
}
| gpl-2.0 |
Albinoman887/Linux-3.4.x | drivers/net/sungem_phy.c | 5237 | 29304 | /*
* PHY drivers for the sungem ethernet driver.
*
* This file could be shared with other drivers.
*
* (c) 2002-2007, Benjamin Herrenscmidt (benh@kernel.crashing.org)
*
* TODO:
* - Add support for PHYs that provide an IRQ line
* - Eventually moved the entire polling state machine in
* there (out of the eth driver), so that it can easily be
* skipped on PHYs that implement it in hardware.
* - On LXT971 & BCM5201, Apple uses some chip specific regs
* to read the link status. Figure out why and if it makes
* sense to do the same (magic aneg ?)
* - Apple has some additional power management code for some
* Broadcom PHYs that they "hide" from the OpenSource version
* of darwin, still need to reverse engineer that
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
#ifdef CONFIG_PPC_PMAC
#include <asm/prom.h>
#endif
#include <linux/sungem_phy.h>
/* Link modes of the BCM5400 PHY */
static const int phy_BCM5400_link_table[8][3] = {
{ 0, 0, 0 }, /* No link */
{ 0, 0, 0 }, /* 10BT Half Duplex */
{ 1, 0, 0 }, /* 10BT Full Duplex */
{ 0, 1, 0 }, /* 100BT Half Duplex */
{ 0, 1, 0 }, /* 100BT Half Duplex */
{ 1, 1, 0 }, /* 100BT Full Duplex*/
{ 1, 0, 1 }, /* 1000BT */
{ 1, 0, 1 }, /* 1000BT */
};
static inline int __phy_read(struct mii_phy* phy, int id, int reg)
{
return phy->mdio_read(phy->dev, id, reg);
}
static inline void __phy_write(struct mii_phy* phy, int id, int reg, int val)
{
phy->mdio_write(phy->dev, id, reg, val);
}
static inline int phy_read(struct mii_phy* phy, int reg)
{
return phy->mdio_read(phy->dev, phy->mii_id, reg);
}
static inline void phy_write(struct mii_phy* phy, int reg, int val)
{
phy->mdio_write(phy->dev, phy->mii_id, reg, val);
}
static int reset_one_mii_phy(struct mii_phy* phy, int phy_id)
{
u16 val;
int limit = 10000;
val = __phy_read(phy, phy_id, MII_BMCR);
val &= ~(BMCR_ISOLATE | BMCR_PDOWN);
val |= BMCR_RESET;
__phy_write(phy, phy_id, MII_BMCR, val);
udelay(100);
while (--limit) {
val = __phy_read(phy, phy_id, MII_BMCR);
if ((val & BMCR_RESET) == 0)
break;
udelay(10);
}
if ((val & BMCR_ISOLATE) && limit > 0)
__phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE);
return limit <= 0;
}
static int bcm5201_init(struct mii_phy* phy)
{
u16 data;
data = phy_read(phy, MII_BCM5201_MULTIPHY);
data &= ~MII_BCM5201_MULTIPHY_SUPERISOLATE;
phy_write(phy, MII_BCM5201_MULTIPHY, data);
phy_write(phy, MII_BCM5201_INTERRUPT, 0);
return 0;
}
static int bcm5201_suspend(struct mii_phy* phy)
{
phy_write(phy, MII_BCM5201_INTERRUPT, 0);
phy_write(phy, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE);
return 0;
}
static int bcm5221_init(struct mii_phy* phy)
{
u16 data;
data = phy_read(phy, MII_BCM5221_TEST);
phy_write(phy, MII_BCM5221_TEST,
data | MII_BCM5221_TEST_ENABLE_SHADOWS);
data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2);
phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2,
data | MII_BCM5221_SHDOW_AUX_STAT2_APD);
data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
data | MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR);
data = phy_read(phy, MII_BCM5221_TEST);
phy_write(phy, MII_BCM5221_TEST,
data & ~MII_BCM5221_TEST_ENABLE_SHADOWS);
return 0;
}
static int bcm5221_suspend(struct mii_phy* phy)
{
u16 data;
data = phy_read(phy, MII_BCM5221_TEST);
phy_write(phy, MII_BCM5221_TEST,
data | MII_BCM5221_TEST_ENABLE_SHADOWS);
data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
data | MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE);
return 0;
}
static int bcm5241_init(struct mii_phy* phy)
{
u16 data;
data = phy_read(phy, MII_BCM5221_TEST);
phy_write(phy, MII_BCM5221_TEST,
data | MII_BCM5221_TEST_ENABLE_SHADOWS);
data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2);
phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2,
data | MII_BCM5221_SHDOW_AUX_STAT2_APD);
data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
data & ~MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR);
data = phy_read(phy, MII_BCM5221_TEST);
phy_write(phy, MII_BCM5221_TEST,
data & ~MII_BCM5221_TEST_ENABLE_SHADOWS);
return 0;
}
static int bcm5241_suspend(struct mii_phy* phy)
{
u16 data;
data = phy_read(phy, MII_BCM5221_TEST);
phy_write(phy, MII_BCM5221_TEST,
data | MII_BCM5221_TEST_ENABLE_SHADOWS);
data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
data | MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR);
return 0;
}
static int bcm5400_init(struct mii_phy* phy)
{
u16 data;
/* Configure for gigabit full duplex */
data = phy_read(phy, MII_BCM5400_AUXCONTROL);
data |= MII_BCM5400_AUXCONTROL_PWR10BASET;
phy_write(phy, MII_BCM5400_AUXCONTROL, data);
data = phy_read(phy, MII_BCM5400_GB_CONTROL);
data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
phy_write(phy, MII_BCM5400_GB_CONTROL, data);
udelay(100);
/* Reset and configure cascaded 10/100 PHY */
(void)reset_one_mii_phy(phy, 0x1f);
data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY);
data |= MII_BCM5201_MULTIPHY_SERIALMODE;
__phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data);
data = phy_read(phy, MII_BCM5400_AUXCONTROL);
data &= ~MII_BCM5400_AUXCONTROL_PWR10BASET;
phy_write(phy, MII_BCM5400_AUXCONTROL, data);
return 0;
}
static int bcm5400_suspend(struct mii_phy* phy)
{
#if 0 /* Commented out in Darwin... someone has those dawn docs ? */
phy_write(phy, MII_BMCR, BMCR_PDOWN);
#endif
return 0;
}
static int bcm5401_init(struct mii_phy* phy)
{
u16 data;
int rev;
rev = phy_read(phy, MII_PHYSID2) & 0x000f;
if (rev == 0 || rev == 3) {
/* Some revisions of 5401 appear to need this
* initialisation sequence to disable, according
* to OF, "tap power management"
*
* WARNING ! OF and Darwin don't agree on the
* register addresses. OF seem to interpret the
* register numbers below as decimal
*
* Note: This should (and does) match tg3_init_5401phy_dsp
* in the tg3.c driver. -DaveM
*/
phy_write(phy, 0x18, 0x0c20);
phy_write(phy, 0x17, 0x0012);
phy_write(phy, 0x15, 0x1804);
phy_write(phy, 0x17, 0x0013);
phy_write(phy, 0x15, 0x1204);
phy_write(phy, 0x17, 0x8006);
phy_write(phy, 0x15, 0x0132);
phy_write(phy, 0x17, 0x8006);
phy_write(phy, 0x15, 0x0232);
phy_write(phy, 0x17, 0x201f);
phy_write(phy, 0x15, 0x0a20);
}
/* Configure for gigabit full duplex */
data = phy_read(phy, MII_BCM5400_GB_CONTROL);
data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
phy_write(phy, MII_BCM5400_GB_CONTROL, data);
udelay(10);
/* Reset and configure cascaded 10/100 PHY */
(void)reset_one_mii_phy(phy, 0x1f);
data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY);
data |= MII_BCM5201_MULTIPHY_SERIALMODE;
__phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data);
return 0;
}
static int bcm5401_suspend(struct mii_phy* phy)
{
#if 0 /* Commented out in Darwin... someone has those dawn docs ? */
phy_write(phy, MII_BMCR, BMCR_PDOWN);
#endif
return 0;
}
static int bcm5411_init(struct mii_phy* phy)
{
u16 data;
/* Here's some more Apple black magic to setup
* some voltage stuffs.
*/
phy_write(phy, 0x1c, 0x8c23);
phy_write(phy, 0x1c, 0x8ca3);
phy_write(phy, 0x1c, 0x8c23);
/* Here, Apple seems to want to reset it, do
* it as well
*/
phy_write(phy, MII_BMCR, BMCR_RESET);
phy_write(phy, MII_BMCR, 0x1340);
data = phy_read(phy, MII_BCM5400_GB_CONTROL);
data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
phy_write(phy, MII_BCM5400_GB_CONTROL, data);
udelay(10);
/* Reset and configure cascaded 10/100 PHY */
(void)reset_one_mii_phy(phy, 0x1f);
return 0;
}
static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
{
u16 ctl, adv;
phy->autoneg = 1;
phy->speed = SPEED_10;
phy->duplex = DUPLEX_HALF;
phy->pause = 0;
phy->advertising = advertise;
/* Setup standard advertise */
adv = phy_read(phy, MII_ADVERTISE);
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
if (advertise & ADVERTISED_10baseT_Half)
adv |= ADVERTISE_10HALF;
if (advertise & ADVERTISED_10baseT_Full)
adv |= ADVERTISE_10FULL;
if (advertise & ADVERTISED_100baseT_Half)
adv |= ADVERTISE_100HALF;
if (advertise & ADVERTISED_100baseT_Full)
adv |= ADVERTISE_100FULL;
phy_write(phy, MII_ADVERTISE, adv);
/* Start/Restart aneg */
ctl = phy_read(phy, MII_BMCR);
ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
{
u16 ctl;
phy->autoneg = 0;
phy->speed = speed;
phy->duplex = fd;
phy->pause = 0;
ctl = phy_read(phy, MII_BMCR);
ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);
/* First reset the PHY */
phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
/* Select speed & duplex */
switch(speed) {
case SPEED_10:
break;
case SPEED_100:
ctl |= BMCR_SPEED100;
break;
case SPEED_1000:
default:
return -EINVAL;
}
if (fd == DUPLEX_FULL)
ctl |= BMCR_FULLDPLX;
phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int genmii_poll_link(struct mii_phy *phy)
{
u16 status;
(void)phy_read(phy, MII_BMSR);
status = phy_read(phy, MII_BMSR);
if ((status & BMSR_LSTATUS) == 0)
return 0;
if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
return 0;
return 1;
}
static int genmii_read_link(struct mii_phy *phy)
{
u16 lpa;
if (phy->autoneg) {
lpa = phy_read(phy, MII_LPA);
if (lpa & (LPA_10FULL | LPA_100FULL))
phy->duplex = DUPLEX_FULL;
else
phy->duplex = DUPLEX_HALF;
if (lpa & (LPA_100FULL | LPA_100HALF))
phy->speed = SPEED_100;
else
phy->speed = SPEED_10;
phy->pause = 0;
}
/* On non-aneg, we assume what we put in BMCR is the speed,
* though magic-aneg shouldn't prevent this case from occurring
*/
return 0;
}
static int generic_suspend(struct mii_phy* phy)
{
phy_write(phy, MII_BMCR, BMCR_PDOWN);
return 0;
}
static int bcm5421_init(struct mii_phy* phy)
{
u16 data;
unsigned int id;
id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2));
/* Revision 0 of 5421 needs some fixups */
if (id == 0x002060e0) {
/* This is borrowed from MacOS
*/
phy_write(phy, 0x18, 0x1007);
data = phy_read(phy, 0x18);
phy_write(phy, 0x18, data | 0x0400);
phy_write(phy, 0x18, 0x0007);
data = phy_read(phy, 0x18);
phy_write(phy, 0x18, data | 0x0800);
phy_write(phy, 0x17, 0x000a);
data = phy_read(phy, 0x15);
phy_write(phy, 0x15, data | 0x0200);
}
/* Pick up some init code from OF for K2 version */
if ((id & 0xfffffff0) == 0x002062e0) {
phy_write(phy, 4, 0x01e1);
phy_write(phy, 9, 0x0300);
}
/* Check if we can enable automatic low power */
#ifdef CONFIG_PPC_PMAC
if (phy->platform_data) {
struct device_node *np = of_get_parent(phy->platform_data);
int can_low_power = 1;
if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
can_low_power = 0;
if (can_low_power) {
/* Enable automatic low-power */
phy_write(phy, 0x1c, 0x9002);
phy_write(phy, 0x1c, 0xa821);
phy_write(phy, 0x1c, 0x941d);
}
}
#endif /* CONFIG_PPC_PMAC */
return 0;
}
static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
{
u16 ctl, adv;
phy->autoneg = 1;
phy->speed = SPEED_10;
phy->duplex = DUPLEX_HALF;
phy->pause = 0;
phy->advertising = advertise;
/* Setup standard advertise */
adv = phy_read(phy, MII_ADVERTISE);
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
if (advertise & ADVERTISED_10baseT_Half)
adv |= ADVERTISE_10HALF;
if (advertise & ADVERTISED_10baseT_Full)
adv |= ADVERTISE_10FULL;
if (advertise & ADVERTISED_100baseT_Half)
adv |= ADVERTISE_100HALF;
if (advertise & ADVERTISED_100baseT_Full)
adv |= ADVERTISE_100FULL;
if (advertise & ADVERTISED_Pause)
adv |= ADVERTISE_PAUSE_CAP;
if (advertise & ADVERTISED_Asym_Pause)
adv |= ADVERTISE_PAUSE_ASYM;
phy_write(phy, MII_ADVERTISE, adv);
/* Setup 1000BT advertise */
adv = phy_read(phy, MII_1000BASETCONTROL);
adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP|MII_1000BASETCONTROL_HALFDUPLEXCAP);
if (advertise & SUPPORTED_1000baseT_Half)
adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
if (advertise & SUPPORTED_1000baseT_Full)
adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
phy_write(phy, MII_1000BASETCONTROL, adv);
/* Start/Restart aneg */
ctl = phy_read(phy, MII_BMCR);
ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int bcm54xx_setup_forced(struct mii_phy *phy, int speed, int fd)
{
u16 ctl;
phy->autoneg = 0;
phy->speed = speed;
phy->duplex = fd;
phy->pause = 0;
ctl = phy_read(phy, MII_BMCR);
ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE);
/* First reset the PHY */
phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
/* Select speed & duplex */
switch(speed) {
case SPEED_10:
break;
case SPEED_100:
ctl |= BMCR_SPEED100;
break;
case SPEED_1000:
ctl |= BMCR_SPD2;
}
if (fd == DUPLEX_FULL)
ctl |= BMCR_FULLDPLX;
// XXX Should we set the sungem to GII now on 1000BT ?
phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int bcm54xx_read_link(struct mii_phy *phy)
{
int link_mode;
u16 val;
if (phy->autoneg) {
val = phy_read(phy, MII_BCM5400_AUXSTATUS);
link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >>
MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT);
phy->duplex = phy_BCM5400_link_table[link_mode][0] ?
DUPLEX_FULL : DUPLEX_HALF;
phy->speed = phy_BCM5400_link_table[link_mode][2] ?
SPEED_1000 :
(phy_BCM5400_link_table[link_mode][1] ?
SPEED_100 : SPEED_10);
val = phy_read(phy, MII_LPA);
phy->pause = (phy->duplex == DUPLEX_FULL) &&
((val & LPA_PAUSE) != 0);
}
/* On non-aneg, we assume what we put in BMCR is the speed,
* though magic-aneg shouldn't prevent this case from occurring
*/
return 0;
}
static int marvell88e1111_init(struct mii_phy* phy)
{
u16 rev;
/* magic init sequence for rev 0 */
rev = phy_read(phy, MII_PHYSID2) & 0x000f;
if (rev == 0) {
phy_write(phy, 0x1d, 0x000a);
phy_write(phy, 0x1e, 0x0821);
phy_write(phy, 0x1d, 0x0006);
phy_write(phy, 0x1e, 0x8600);
phy_write(phy, 0x1d, 0x000b);
phy_write(phy, 0x1e, 0x0100);
phy_write(phy, 0x1d, 0x0004);
phy_write(phy, 0x1e, 0x4850);
}
return 0;
}
#define BCM5421_MODE_MASK (1 << 5)
static int bcm5421_poll_link(struct mii_phy* phy)
{
u32 phy_reg;
int mode;
/* find out in what mode we are */
phy_write(phy, MII_NCONFIG, 0x1000);
phy_reg = phy_read(phy, MII_NCONFIG);
mode = (phy_reg & BCM5421_MODE_MASK) >> 5;
if ( mode == BCM54XX_COPPER)
return genmii_poll_link(phy);
/* try to find out wether we have a link */
phy_write(phy, MII_NCONFIG, 0x2000);
phy_reg = phy_read(phy, MII_NCONFIG);
if (phy_reg & 0x0020)
return 0;
else
return 1;
}
static int bcm5421_read_link(struct mii_phy* phy)
{
u32 phy_reg;
int mode;
/* find out in what mode we are */
phy_write(phy, MII_NCONFIG, 0x1000);
phy_reg = phy_read(phy, MII_NCONFIG);
mode = (phy_reg & BCM5421_MODE_MASK ) >> 5;
if ( mode == BCM54XX_COPPER)
return bcm54xx_read_link(phy);
phy->speed = SPEED_1000;
/* find out wether we are running half- or full duplex */
phy_write(phy, MII_NCONFIG, 0x2000);
phy_reg = phy_read(phy, MII_NCONFIG);
if ( (phy_reg & 0x0080) >> 7)
phy->duplex |= DUPLEX_HALF;
else
phy->duplex |= DUPLEX_FULL;
return 0;
}
static int bcm5421_enable_fiber(struct mii_phy* phy, int autoneg)
{
/* enable fiber mode */
phy_write(phy, MII_NCONFIG, 0x9020);
/* LEDs active in both modes, autosense prio = fiber */
phy_write(phy, MII_NCONFIG, 0x945f);
if (!autoneg) {
/* switch off fibre autoneg */
phy_write(phy, MII_NCONFIG, 0xfc01);
phy_write(phy, 0x0b, 0x0004);
}
phy->autoneg = autoneg;
return 0;
}
#define BCM5461_FIBER_LINK (1 << 2)
#define BCM5461_MODE_MASK (3 << 1)
static int bcm5461_poll_link(struct mii_phy* phy)
{
u32 phy_reg;
int mode;
/* find out in what mode we are */
phy_write(phy, MII_NCONFIG, 0x7c00);
phy_reg = phy_read(phy, MII_NCONFIG);
mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
if ( mode == BCM54XX_COPPER)
return genmii_poll_link(phy);
/* find out wether we have a link */
phy_write(phy, MII_NCONFIG, 0x7000);
phy_reg = phy_read(phy, MII_NCONFIG);
if (phy_reg & BCM5461_FIBER_LINK)
return 1;
else
return 0;
}
#define BCM5461_FIBER_DUPLEX (1 << 3)
static int bcm5461_read_link(struct mii_phy* phy)
{
u32 phy_reg;
int mode;
/* find out in what mode we are */
phy_write(phy, MII_NCONFIG, 0x7c00);
phy_reg = phy_read(phy, MII_NCONFIG);
mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
if ( mode == BCM54XX_COPPER) {
return bcm54xx_read_link(phy);
}
phy->speed = SPEED_1000;
/* find out wether we are running half- or full duplex */
phy_write(phy, MII_NCONFIG, 0x7000);
phy_reg = phy_read(phy, MII_NCONFIG);
if (phy_reg & BCM5461_FIBER_DUPLEX)
phy->duplex |= DUPLEX_FULL;
else
phy->duplex |= DUPLEX_HALF;
return 0;
}
static int bcm5461_enable_fiber(struct mii_phy* phy, int autoneg)
{
/* select fiber mode, enable 1000 base-X registers */
phy_write(phy, MII_NCONFIG, 0xfc0b);
if (autoneg) {
/* enable fiber with no autonegotiation */
phy_write(phy, MII_ADVERTISE, 0x01e0);
phy_write(phy, MII_BMCR, 0x1140);
} else {
/* enable fiber with autonegotiation */
phy_write(phy, MII_BMCR, 0x0140);
}
phy->autoneg = autoneg;
return 0;
}
static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
{
u16 ctl, adv;
phy->autoneg = 1;
phy->speed = SPEED_10;
phy->duplex = DUPLEX_HALF;
phy->pause = 0;
phy->advertising = advertise;
/* Setup standard advertise */
adv = phy_read(phy, MII_ADVERTISE);
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
if (advertise & ADVERTISED_10baseT_Half)
adv |= ADVERTISE_10HALF;
if (advertise & ADVERTISED_10baseT_Full)
adv |= ADVERTISE_10FULL;
if (advertise & ADVERTISED_100baseT_Half)
adv |= ADVERTISE_100HALF;
if (advertise & ADVERTISED_100baseT_Full)
adv |= ADVERTISE_100FULL;
if (advertise & ADVERTISED_Pause)
adv |= ADVERTISE_PAUSE_CAP;
if (advertise & ADVERTISED_Asym_Pause)
adv |= ADVERTISE_PAUSE_ASYM;
phy_write(phy, MII_ADVERTISE, adv);
/* Setup 1000BT advertise & enable crossover detect
* XXX How do we advertise 1000BT ? Darwin source is
* confusing here, they read from specific control and
* write to control... Someone has specs for those
* beasts ?
*/
adv = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL);
adv |= MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX;
adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
MII_1000BASETCONTROL_HALFDUPLEXCAP);
if (advertise & SUPPORTED_1000baseT_Half)
adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
if (advertise & SUPPORTED_1000baseT_Full)
adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
phy_write(phy, MII_1000BASETCONTROL, adv);
/* Start/Restart aneg */
ctl = phy_read(phy, MII_BMCR);
ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd)
{
u16 ctl, ctl2;
phy->autoneg = 0;
phy->speed = speed;
phy->duplex = fd;
phy->pause = 0;
ctl = phy_read(phy, MII_BMCR);
ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE);
ctl |= BMCR_RESET;
/* Select speed & duplex */
switch(speed) {
case SPEED_10:
break;
case SPEED_100:
ctl |= BMCR_SPEED100;
break;
/* I'm not sure about the one below, again, Darwin source is
* quite confusing and I lack chip specs
*/
case SPEED_1000:
ctl |= BMCR_SPD2;
}
if (fd == DUPLEX_FULL)
ctl |= BMCR_FULLDPLX;
/* Disable crossover. Again, the way Apple does it is strange,
* though I don't assume they are wrong ;)
*/
ctl2 = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL);
ctl2 &= ~(MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX |
MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX |
MII_1000BASETCONTROL_FULLDUPLEXCAP |
MII_1000BASETCONTROL_HALFDUPLEXCAP);
if (speed == SPEED_1000)
ctl2 |= (fd == DUPLEX_FULL) ?
MII_1000BASETCONTROL_FULLDUPLEXCAP :
MII_1000BASETCONTROL_HALFDUPLEXCAP;
phy_write(phy, MII_1000BASETCONTROL, ctl2);
// XXX Should we set the sungem to GII now on 1000BT ?
phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int marvell_read_link(struct mii_phy *phy)
{
u16 status, pmask;
if (phy->autoneg) {
status = phy_read(phy, MII_M1011_PHY_SPEC_STATUS);
if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0)
return -EAGAIN;
if (status & MII_M1011_PHY_SPEC_STATUS_1000)
phy->speed = SPEED_1000;
else if (status & MII_M1011_PHY_SPEC_STATUS_100)
phy->speed = SPEED_100;
else
phy->speed = SPEED_10;
if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
phy->duplex = DUPLEX_FULL;
else
phy->duplex = DUPLEX_HALF;
pmask = MII_M1011_PHY_SPEC_STATUS_TX_PAUSE |
MII_M1011_PHY_SPEC_STATUS_RX_PAUSE;
phy->pause = (status & pmask) == pmask;
}
/* On non-aneg, we assume what we put in BMCR is the speed,
* though magic-aneg shouldn't prevent this case from occurring
*/
return 0;
}
#define MII_BASIC_FEATURES \
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII | \
SUPPORTED_Pause)
/* On gigabit capable PHYs, we advertise Pause support but not asym pause
* support for now as I'm not sure it's supported and Darwin doesn't do
* it neither. --BenH.
*/
#define MII_GBIT_FEATURES \
(MII_BASIC_FEATURES | \
SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
/* Broadcom BCM 5201 */
static struct mii_phy_ops bcm5201_phy_ops = {
.init = bcm5201_init,
.suspend = bcm5201_suspend,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link,
};
static struct mii_phy_def bcm5201_phy_def = {
.phy_id = 0x00406210,
.phy_id_mask = 0xfffffff0,
.name = "BCM5201",
.features = MII_BASIC_FEATURES,
.magic_aneg = 1,
.ops = &bcm5201_phy_ops
};
/* Broadcom BCM 5221 */
static struct mii_phy_ops bcm5221_phy_ops = {
.suspend = bcm5221_suspend,
.init = bcm5221_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link,
};
static struct mii_phy_def bcm5221_phy_def = {
.phy_id = 0x004061e0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5221",
.features = MII_BASIC_FEATURES,
.magic_aneg = 1,
.ops = &bcm5221_phy_ops
};
/* Broadcom BCM 5241 */
static struct mii_phy_ops bcm5241_phy_ops = {
.suspend = bcm5241_suspend,
.init = bcm5241_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link,
};
static struct mii_phy_def bcm5241_phy_def = {
.phy_id = 0x0143bc30,
.phy_id_mask = 0xfffffff0,
.name = "BCM5241",
.features = MII_BASIC_FEATURES,
.magic_aneg = 1,
.ops = &bcm5241_phy_ops
};
/* Broadcom BCM 5400 */
static struct mii_phy_ops bcm5400_phy_ops = {
.init = bcm5400_init,
.suspend = bcm5400_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
.poll_link = genmii_poll_link,
.read_link = bcm54xx_read_link,
};
static struct mii_phy_def bcm5400_phy_def = {
.phy_id = 0x00206040,
.phy_id_mask = 0xfffffff0,
.name = "BCM5400",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &bcm5400_phy_ops
};
/* Broadcom BCM 5401 */
static struct mii_phy_ops bcm5401_phy_ops = {
.init = bcm5401_init,
.suspend = bcm5401_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
.poll_link = genmii_poll_link,
.read_link = bcm54xx_read_link,
};
static struct mii_phy_def bcm5401_phy_def = {
.phy_id = 0x00206050,
.phy_id_mask = 0xfffffff0,
.name = "BCM5401",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &bcm5401_phy_ops
};
/* Broadcom BCM 5411 */
static struct mii_phy_ops bcm5411_phy_ops = {
.init = bcm5411_init,
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
.poll_link = genmii_poll_link,
.read_link = bcm54xx_read_link,
};
static struct mii_phy_def bcm5411_phy_def = {
.phy_id = 0x00206070,
.phy_id_mask = 0xfffffff0,
.name = "BCM5411",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &bcm5411_phy_ops
};
/* Broadcom BCM 5421 */
static struct mii_phy_ops bcm5421_phy_ops = {
.init = bcm5421_init,
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
.poll_link = bcm5421_poll_link,
.read_link = bcm5421_read_link,
.enable_fiber = bcm5421_enable_fiber,
};
static struct mii_phy_def bcm5421_phy_def = {
.phy_id = 0x002060e0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5421",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &bcm5421_phy_ops
};
/* Broadcom BCM 5421 built-in K2 */
static struct mii_phy_ops bcm5421k2_phy_ops = {
.init = bcm5421_init,
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
.poll_link = genmii_poll_link,
.read_link = bcm54xx_read_link,
};
static struct mii_phy_def bcm5421k2_phy_def = {
.phy_id = 0x002062e0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5421-K2",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &bcm5421k2_phy_ops
};
static struct mii_phy_ops bcm5461_phy_ops = {
.init = bcm5421_init,
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
.poll_link = bcm5461_poll_link,
.read_link = bcm5461_read_link,
.enable_fiber = bcm5461_enable_fiber,
};
static struct mii_phy_def bcm5461_phy_def = {
.phy_id = 0x002060c0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5461",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &bcm5461_phy_ops
};
/* Broadcom BCM 5462 built-in Vesta */
static struct mii_phy_ops bcm5462V_phy_ops = {
.init = bcm5421_init,
.suspend = generic_suspend,
.setup_aneg = bcm54xx_setup_aneg,
.setup_forced = bcm54xx_setup_forced,
.poll_link = genmii_poll_link,
.read_link = bcm54xx_read_link,
};
static struct mii_phy_def bcm5462V_phy_def = {
.phy_id = 0x002060d0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5462-Vesta",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &bcm5462V_phy_ops
};
/* Marvell 88E1101 amd 88E1111 */
static struct mii_phy_ops marvell88e1101_phy_ops = {
.suspend = generic_suspend,
.setup_aneg = marvell_setup_aneg,
.setup_forced = marvell_setup_forced,
.poll_link = genmii_poll_link,
.read_link = marvell_read_link
};
static struct mii_phy_ops marvell88e1111_phy_ops = {
.init = marvell88e1111_init,
.suspend = generic_suspend,
.setup_aneg = marvell_setup_aneg,
.setup_forced = marvell_setup_forced,
.poll_link = genmii_poll_link,
.read_link = marvell_read_link
};
/* two revs in darwin for the 88e1101 ... I could use a datasheet
* to get the proper names...
*/
static struct mii_phy_def marvell88e1101v1_phy_def = {
.phy_id = 0x01410c20,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1101v1",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &marvell88e1101_phy_ops
};
static struct mii_phy_def marvell88e1101v2_phy_def = {
.phy_id = 0x01410c60,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1101v2",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &marvell88e1101_phy_ops
};
static struct mii_phy_def marvell88e1111_phy_def = {
.phy_id = 0x01410cc0,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1111",
.features = MII_GBIT_FEATURES,
.magic_aneg = 1,
.ops = &marvell88e1111_phy_ops
};
/* Generic implementation for most 10/100 PHYs */
static struct mii_phy_ops generic_phy_ops = {
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link
};
static struct mii_phy_def genmii_phy_def = {
.phy_id = 0x00000000,
.phy_id_mask = 0x00000000,
.name = "Generic MII",
.features = MII_BASIC_FEATURES,
.magic_aneg = 0,
.ops = &generic_phy_ops
};
static struct mii_phy_def* mii_phy_table[] = {
&bcm5201_phy_def,
&bcm5221_phy_def,
&bcm5241_phy_def,
&bcm5400_phy_def,
&bcm5401_phy_def,
&bcm5411_phy_def,
&bcm5421_phy_def,
&bcm5421k2_phy_def,
&bcm5461_phy_def,
&bcm5462V_phy_def,
&marvell88e1101v1_phy_def,
&marvell88e1101v2_phy_def,
&marvell88e1111_phy_def,
&genmii_phy_def,
NULL
};
int sungem_phy_probe(struct mii_phy *phy, int mii_id)
{
int rc;
u32 id;
struct mii_phy_def* def;
int i;
/* We do not reset the mii_phy structure as the driver
* may re-probe the PHY regulary
*/
phy->mii_id = mii_id;
/* Take PHY out of isloate mode and reset it. */
rc = reset_one_mii_phy(phy, mii_id);
if (rc)
goto fail;
/* Read ID and find matching entry */
id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2));
printk(KERN_DEBUG KBUILD_MODNAME ": " "PHY ID: %x, addr: %x\n",
id, mii_id);
for (i=0; (def = mii_phy_table[i]) != NULL; i++)
if ((id & def->phy_id_mask) == def->phy_id)
break;
/* Should never be NULL (we have a generic entry), but... */
if (def == NULL)
goto fail;
phy->def = def;
return 0;
fail:
phy->speed = 0;
phy->duplex = 0;
phy->pause = 0;
phy->advertising = 0;
return -ENODEV;
}
EXPORT_SYMBOL(sungem_phy_probe);
MODULE_LICENSE("GPL");
| gpl-2.0 |
GustavoRD78/78Kernel-5.1.1-23.4.A.1.200 | drivers/crypto/padlock-sha.c | 7029 | 16251 | /*
* Cryptographic API.
*
* Support for VIA PadLock hardware crypto engine.
*
* Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <crypto/internal/hash.h>
#include <crypto/padlock.h>
#include <crypto/sha.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include <asm/cpu_device_id.h>
#include <asm/i387.h>
struct padlock_sha_desc {
struct shash_desc fallback;
};
struct padlock_sha_ctx {
struct crypto_shash *fallback;
};
static int padlock_sha_init(struct shash_desc *desc)
{
struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
dctx->fallback.tfm = ctx->fallback;
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_init(&dctx->fallback);
}
static int padlock_sha_update(struct shash_desc *desc,
const u8 *data, unsigned int length)
{
struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_update(&dctx->fallback, data, length);
}
static int padlock_sha_export(struct shash_desc *desc, void *out)
{
struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
return crypto_shash_export(&dctx->fallback, out);
}
static int padlock_sha_import(struct shash_desc *desc, const void *in)
{
struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
dctx->fallback.tfm = ctx->fallback;
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_import(&dctx->fallback, in);
}
static inline void padlock_output_block(uint32_t *src,
uint32_t *dst, size_t count)
{
while (count--)
*dst++ = swab32(*src++);
}
static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
unsigned int count, u8 *out)
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
struct sha1_state state;
unsigned int space;
unsigned int leftover;
int ts_state;
int err;
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_export(&dctx->fallback, &state);
if (err)
goto out;
if (state.count + count > ULONG_MAX)
return crypto_shash_finup(&dctx->fallback, in, count, out);
leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
space = SHA1_BLOCK_SIZE - leftover;
if (space) {
if (count > space) {
err = crypto_shash_update(&dctx->fallback, in, space) ?:
crypto_shash_export(&dctx->fallback, &state);
if (err)
goto out;
count -= space;
in += space;
} else {
memcpy(state.buffer + leftover, in, count);
in = state.buffer;
count += leftover;
state.count &= ~(SHA1_BLOCK_SIZE - 1);
}
}
memcpy(result, &state.state, SHA1_DIGEST_SIZE);
/* prevent taking the spurious DNA fault with padlock. */
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
: \
: "c"((unsigned long)state.count + count), \
"a"((unsigned long)state.count), \
"S"(in), "D"(result));
irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
out:
return err;
}
static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
{
u8 buf[4];
return padlock_sha1_finup(desc, buf, 0, out);
}
static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
unsigned int count, u8 *out)
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
struct sha256_state state;
unsigned int space;
unsigned int leftover;
int ts_state;
int err;
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_export(&dctx->fallback, &state);
if (err)
goto out;
if (state.count + count > ULONG_MAX)
return crypto_shash_finup(&dctx->fallback, in, count, out);
leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
space = SHA256_BLOCK_SIZE - leftover;
if (space) {
if (count > space) {
err = crypto_shash_update(&dctx->fallback, in, space) ?:
crypto_shash_export(&dctx->fallback, &state);
if (err)
goto out;
count -= space;
in += space;
} else {
memcpy(state.buf + leftover, in, count);
in = state.buf;
count += leftover;
state.count &= ~(SHA1_BLOCK_SIZE - 1);
}
}
memcpy(result, &state.state, SHA256_DIGEST_SIZE);
/* prevent taking the spurious DNA fault with padlock. */
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
: \
: "c"((unsigned long)state.count + count), \
"a"((unsigned long)state.count), \
"S"(in), "D"(result));
irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
out:
return err;
}
static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
{
u8 buf[4];
return padlock_sha256_finup(desc, buf, 0, out);
}
static int padlock_cra_init(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
const char *fallback_driver_name = tfm->__crt_alg->cra_name;
struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_shash *fallback_tfm;
int err = -ENOMEM;
/* Allocate a fallback and abort if it failed. */
fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name);
err = PTR_ERR(fallback_tfm);
goto out;
}
ctx->fallback = fallback_tfm;
hash->descsize += crypto_shash_descsize(fallback_tfm);
return 0;
out:
return err;
}
static void padlock_cra_exit(struct crypto_tfm *tfm)
{
struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_shash(ctx->fallback);
}
static struct shash_alg sha1_alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = padlock_sha_init,
.update = padlock_sha_update,
.finup = padlock_sha1_finup,
.final = padlock_sha1_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
.descsize = sizeof(struct padlock_sha_desc),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
.cra_init = padlock_cra_init,
.cra_exit = padlock_cra_exit,
}
};
static struct shash_alg sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
.init = padlock_sha_init,
.update = padlock_sha_update,
.finup = padlock_sha256_finup,
.final = padlock_sha256_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
.descsize = sizeof(struct padlock_sha_desc),
.statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
.cra_init = padlock_cra_init,
.cra_exit = padlock_cra_exit,
}
};
/* Add two shash_alg instance for hardware-implemented *
* multiple-parts hash supported by VIA Nano Processor.*/
static int padlock_sha1_init_nano(struct shash_desc *desc)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
*sctx = (struct sha1_state){
.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};
return 0;
}
static int padlock_sha1_update_nano(struct shash_desc *desc,
const u8 *data, unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial, done;
const u8 *src;
/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
int ts_state;
partial = sctx->count & 0x3f;
sctx->count += len;
done = 0;
src = data;
memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE);
if ((partial + len) >= SHA1_BLOCK_SIZE) {
/* Append the bytes in state's buffer to a block to handle */
if (partial) {
done = -partial;
memcpy(sctx->buffer + partial, data,
done + SHA1_BLOCK_SIZE);
src = sctx->buffer;
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
: "+S"(src), "+D"(dst) \
: "a"((long)-1), "c"((unsigned long)1));
irq_ts_restore(ts_state);
done += SHA1_BLOCK_SIZE;
src = data + done;
}
/* Process the left bytes from the input data */
if (len - done >= SHA1_BLOCK_SIZE) {
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
: "+S"(src), "+D"(dst)
: "a"((long)-1),
"c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
irq_ts_restore(ts_state);
done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
src = data + done;
}
partial = 0;
}
memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE);
memcpy(sctx->buffer + partial, src, len - done);
return 0;
}
static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out)
{
struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc);
unsigned int partial, padlen;
__be64 bits;
static const u8 padding[64] = { 0x80, };
bits = cpu_to_be64(state->count << 3);
/* Pad out to 56 mod 64 */
partial = state->count & 0x3f;
padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
padlock_sha1_update_nano(desc, padding, padlen);
/* Append length field bytes */
padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits));
/* Swap to output */
padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5);
return 0;
}
static int padlock_sha256_init_nano(struct shash_desc *desc)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
*sctx = (struct sha256_state){
.state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \
SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7},
};
return 0;
}
static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
unsigned int partial, done;
const u8 *src;
/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
int ts_state;
partial = sctx->count & 0x3f;
sctx->count += len;
done = 0;
src = data;
memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE);
if ((partial + len) >= SHA256_BLOCK_SIZE) {
/* Append the bytes in state's buffer to a block to handle */
if (partial) {
done = -partial;
memcpy(sctx->buf + partial, data,
done + SHA256_BLOCK_SIZE);
src = sctx->buf;
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
: "+S"(src), "+D"(dst)
: "a"((long)-1), "c"((unsigned long)1));
irq_ts_restore(ts_state);
done += SHA256_BLOCK_SIZE;
src = data + done;
}
/* Process the left bytes from input data*/
if (len - done >= SHA256_BLOCK_SIZE) {
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
: "+S"(src), "+D"(dst)
: "a"((long)-1),
"c"((unsigned long)((len - done) / 64)));
irq_ts_restore(ts_state);
done += ((len - done) - (len - done) % 64);
src = data + done;
}
partial = 0;
}
memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE);
memcpy(sctx->buf + partial, src, len - done);
return 0;
}
static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out)
{
struct sha256_state *state =
(struct sha256_state *)shash_desc_ctx(desc);
unsigned int partial, padlen;
__be64 bits;
static const u8 padding[64] = { 0x80, };
bits = cpu_to_be64(state->count << 3);
/* Pad out to 56 mod 64 */
partial = state->count & 0x3f;
padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
padlock_sha256_update_nano(desc, padding, padlen);
/* Append length field bytes */
padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits));
/* Swap to output */
padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8);
return 0;
}
static int padlock_sha_export_nano(struct shash_desc *desc,
void *out)
{
int statesize = crypto_shash_statesize(desc->tfm);
void *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, statesize);
return 0;
}
static int padlock_sha_import_nano(struct shash_desc *desc,
const void *in)
{
int statesize = crypto_shash_statesize(desc->tfm);
void *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, statesize);
return 0;
}
static struct shash_alg sha1_alg_nano = {
.digestsize = SHA1_DIGEST_SIZE,
.init = padlock_sha1_init_nano,
.update = padlock_sha1_update_nano,
.final = padlock_sha1_final_nano,
.export = padlock_sha_export_nano,
.import = padlock_sha_import_nano,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static struct shash_alg sha256_alg_nano = {
.digestsize = SHA256_DIGEST_SIZE,
.init = padlock_sha256_init_nano,
.update = padlock_sha256_update_nano,
.final = padlock_sha256_final_nano,
.export = padlock_sha_export_nano,
.import = padlock_sha_import_nano,
.descsize = sizeof(struct sha256_state),
.statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static struct x86_cpu_id padlock_sha_ids[] = {
X86_FEATURE_MATCH(X86_FEATURE_PHE),
{}
};
MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids);
static int __init padlock_init(void)
{
int rc = -ENODEV;
struct cpuinfo_x86 *c = &cpu_data(0);
struct shash_alg *sha1;
struct shash_alg *sha256;
if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled)
return -ENODEV;
/* Register the newly added algorithm module if on *
* VIA Nano processor, or else just do as before */
if (c->x86_model < 0x0f) {
sha1 = &sha1_alg;
sha256 = &sha256_alg;
} else {
sha1 = &sha1_alg_nano;
sha256 = &sha256_alg_nano;
}
rc = crypto_register_shash(sha1);
if (rc)
goto out;
rc = crypto_register_shash(sha256);
if (rc)
goto out_unreg1;
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
return 0;
out_unreg1:
crypto_unregister_shash(sha1);
out:
printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
return rc;
}
static void __exit padlock_fini(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
if (c->x86_model >= 0x0f) {
crypto_unregister_shash(&sha1_alg_nano);
crypto_unregister_shash(&sha256_alg_nano);
} else {
crypto_unregister_shash(&sha1_alg);
crypto_unregister_shash(&sha256_alg);
}
}
module_init(padlock_init);
module_exit(padlock_fini);
MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");
MODULE_ALIAS("sha1-all");
MODULE_ALIAS("sha256-all");
MODULE_ALIAS("sha1-padlock");
MODULE_ALIAS("sha256-padlock");
| gpl-2.0 |
mifl/android_kernel_pantech_ef45k | net/wimax/op-state-get.c | 11637 | 2495 | /*
* Linux WiMAX
* Implement and export a method for getting a WiMAX device current state
*
* Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
*
* Based on previous WiMAX core work by:
* Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <net/wimax.h>
#include <net/genetlink.h>
#include <linux/wimax.h>
#include <linux/security.h>
#include "wimax-internal.h"
#define D_SUBMODULE op_state_get
#include "debug-levels.h"
static const struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = {
[WIMAX_GNL_STGET_IFIDX] = {
.type = NLA_U32,
},
};
/*
* Exporting to user space over generic netlink
*
* Parse the state get command from user space, return a combination
* value that describe the current state.
*
* No attributes.
*/
static
int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
{
int result, ifindex;
struct wimax_dev *wimax_dev;
d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
result = -ENODEV;
if (info->attrs[WIMAX_GNL_STGET_IFIDX] == NULL) {
printk(KERN_ERR "WIMAX_GNL_OP_STATE_GET: can't find IFIDX "
"attribute\n");
goto error_no_wimax_dev;
}
ifindex = nla_get_u32(info->attrs[WIMAX_GNL_STGET_IFIDX]);
wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
if (wimax_dev == NULL)
goto error_no_wimax_dev;
/* Execute the operation and send the result back to user space */
result = wimax_state_get(wimax_dev);
dev_put(wimax_dev->net_dev);
error_no_wimax_dev:
d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
return result;
}
struct genl_ops wimax_gnl_state_get = {
.cmd = WIMAX_GNL_OP_STATE_GET,
.flags = GENL_ADMIN_PERM,
.policy = wimax_gnl_state_get_policy,
.doit = wimax_gnl_doit_state_get,
.dumpit = NULL,
};
| gpl-2.0 |
ktoonsez/KTSGS6 | drivers/scsi/qlogicfas408.c | 12917 | 14930 | /*----------------------------------------------------------------*/
/*
Qlogic linux driver - work in progress. No Warranty express or implied.
Use at your own risk. Support Tort Reform so you won't have to read all
these silly disclaimers.
Copyright 1994, Tom Zerucha.
tz@execpc.com
Additional Code, and much appreciated help by
Michael A. Griffith
grif@cs.ucr.edu
Thanks to Eric Youngdale and Dave Hinds for loadable module and PCMCIA
help respectively, and for suffering through my foolishness during the
debugging process.
Reference Qlogic FAS408 Technical Manual, 53408-510-00A, May 10, 1994
(you can reference it, but it is incomplete and inaccurate in places)
Version 0.46 1/30/97 - kernel 1.2.0+
Functions as standalone, loadable, and PCMCIA driver, the latter from
Dave Hinds' PCMCIA package.
Cleaned up 26/10/2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> as part of the 2.5
SCSI driver cleanup and audit. This driver still needs work on the
following
- Non terminating hardware waits
- Some layering violations with its pcmcia stub
Redistributable under terms of the GNU General Public License
For the avoidance of doubt the "preferred form" of this code is one which
is in an open non patent encumbered format. Where cryptographic key signing
forms part of the process of creating an executable the information
including keys needed to generate an equivalently functional executable
are deemed to be part of the source code.
*/
#include <linux/module.h>
#include <linux/blkdev.h> /* to get disk capacity */
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/proc_fs.h>
#include <linux/unistd.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/dma.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "qlogicfas408.h"
/*----------------------------------------------------------------*/
static int qlcfg5 = (XTALFREQ << 5); /* 15625/512 */
static int qlcfg6 = SYNCXFRPD;
static int qlcfg7 = SYNCOFFST;
static int qlcfg8 = (SLOWCABLE << 7) | (QL_ENABLE_PARITY << 4);
static int qlcfg9 = ((XTALFREQ + 4) / 5);
static int qlcfgc = (FASTCLK << 3) | (FASTSCSI << 4);
/*----------------------------------------------------------------*/
/*----------------------------------------------------------------*/
/* local functions */
/*----------------------------------------------------------------*/
/* error recovery - reset everything */
static void ql_zap(struct qlogicfas408_priv *priv)
{
int x;
int qbase = priv->qbase;
int int_type = priv->int_type;
x = inb(qbase + 0xd);
REG0;
outb(3, qbase + 3); /* reset SCSI */
outb(2, qbase + 3); /* reset chip */
if (x & 0x80)
REG1;
}
/*
* Do a pseudo-dma tranfer
*/
static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int reqlen)
{
int j;
int qbase = priv->qbase;
j = 0;
if (phase & 1) { /* in */
#if QL_TURBO_PDMA
rtrc(4)
/* empty fifo in large chunks */
if (reqlen >= 128 && (inb(qbase + 8) & 2)) { /* full */
insl(qbase + 4, request, 32);
reqlen -= 128;
request += 128;
}
while (reqlen >= 84 && !(j & 0xc0)) /* 2/3 */
if ((j = inb(qbase + 8)) & 4)
{
insl(qbase + 4, request, 21);
reqlen -= 84;
request += 84;
}
if (reqlen >= 44 && (inb(qbase + 8) & 8)) { /* 1/3 */
insl(qbase + 4, request, 11);
reqlen -= 44;
request += 44;
}
#endif
/* until both empty and int (or until reclen is 0) */
rtrc(7)
j = 0;
while (reqlen && !((j & 0x10) && (j & 0xc0)))
{
/* while bytes to receive and not empty */
j &= 0xc0;
while (reqlen && !((j = inb(qbase + 8)) & 0x10))
{
*request++ = inb(qbase + 4);
reqlen--;
}
if (j & 0x10)
j = inb(qbase + 8);
}
} else { /* out */
#if QL_TURBO_PDMA
rtrc(4)
if (reqlen >= 128 && inb(qbase + 8) & 0x10) { /* empty */
outsl(qbase + 4, request, 32);
reqlen -= 128;
request += 128;
}
while (reqlen >= 84 && !(j & 0xc0)) /* 1/3 */
if (!((j = inb(qbase + 8)) & 8)) {
outsl(qbase + 4, request, 21);
reqlen -= 84;
request += 84;
}
if (reqlen >= 40 && !(inb(qbase + 8) & 4)) { /* 2/3 */
outsl(qbase + 4, request, 10);
reqlen -= 40;
request += 40;
}
#endif
/* until full and int (or until reclen is 0) */
rtrc(7)
j = 0;
while (reqlen && !((j & 2) && (j & 0xc0))) {
/* while bytes to send and not full */
while (reqlen && !((j = inb(qbase + 8)) & 2))
{
outb(*request++, qbase + 4);
reqlen--;
}
if (j & 2)
j = inb(qbase + 8);
}
}
/* maybe return reqlen */
return inb(qbase + 8) & 0xc0;
}
/*
* Wait for interrupt flag (polled - not real hardware interrupt)
*/
static int ql_wai(struct qlogicfas408_priv *priv)
{
int k;
int qbase = priv->qbase;
unsigned long i;
k = 0;
i = jiffies + WATCHDOG;
while (time_before(jiffies, i) && !priv->qabort &&
!((k = inb(qbase + 4)) & 0xe0)) {
barrier();
cpu_relax();
}
if (time_after_eq(jiffies, i))
return (DID_TIME_OUT);
if (priv->qabort)
return (priv->qabort == 1 ? DID_ABORT : DID_RESET);
if (k & 0x60)
ql_zap(priv);
if (k & 0x20)
return (DID_PARITY);
if (k & 0x40)
return (DID_ERROR);
return 0;
}
/*
* Initiate scsi command - queueing handler
* caller must hold host lock
*/
static void ql_icmd(struct scsi_cmnd *cmd)
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
int qbase = priv->qbase;
int int_type = priv->int_type;
unsigned int i;
priv->qabort = 0;
REG0;
/* clearing of interrupts and the fifo is needed */
inb(qbase + 5); /* clear interrupts */
if (inb(qbase + 5)) /* if still interrupting */
outb(2, qbase + 3); /* reset chip */
else if (inb(qbase + 7) & 0x1f)
outb(1, qbase + 3); /* clear fifo */
while (inb(qbase + 5)); /* clear ints */
REG1;
outb(1, qbase + 8); /* set for PIO pseudo DMA */
outb(0, qbase + 0xb); /* disable ints */
inb(qbase + 8); /* clear int bits */
REG0;
outb(0x40, qbase + 0xb); /* enable features */
/* configurables */
outb(qlcfgc, qbase + 0xc);
/* config: no reset interrupt, (initiator) bus id */
outb(0x40 | qlcfg8 | priv->qinitid, qbase + 8);
outb(qlcfg7, qbase + 7);
outb(qlcfg6, qbase + 6);
/**/ outb(qlcfg5, qbase + 5); /* select timer */
outb(qlcfg9 & 7, qbase + 9); /* prescaler */
/* outb(0x99, qbase + 5); */
outb(scmd_id(cmd), qbase + 4);
for (i = 0; i < cmd->cmd_len; i++)
outb(cmd->cmnd[i], qbase + 2);
priv->qlcmd = cmd;
outb(0x41, qbase + 3); /* select and send command */
}
/*
* Process scsi command - usually after interrupt
*/
static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
{
unsigned int i, j;
unsigned long k;
unsigned int result; /* ultimate return result */
unsigned int status; /* scsi returned status */
unsigned int message; /* scsi returned message */
unsigned int phase; /* recorded scsi phase */
unsigned int reqlen; /* total length of transfer */
char *buf;
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
int qbase = priv->qbase;
int int_type = priv->int_type;
rtrc(1)
j = inb(qbase + 6);
i = inb(qbase + 5);
if (i == 0x20) {
return (DID_NO_CONNECT << 16);
}
i |= inb(qbase + 5); /* the 0x10 bit can be set after the 0x08 */
if (i != 0x18) {
printk(KERN_ERR "Ql:Bad Interrupt status:%02x\n", i);
ql_zap(priv);
return (DID_BAD_INTR << 16);
}
j &= 7; /* j = inb( qbase + 7 ) >> 5; */
/* correct status is supposed to be step 4 */
/* it sometimes returns step 3 but with 0 bytes left to send */
/* We can try stuffing the FIFO with the max each time, but we will get a
sequence of 3 if any bytes are left (but we do flush the FIFO anyway */
if (j != 3 && j != 4) {
printk(KERN_ERR "Ql:Bad sequence for command %d, int %02X, cmdleft = %d\n",
j, i, inb(qbase + 7) & 0x1f);
ql_zap(priv);
return (DID_ERROR << 16);
}
result = DID_OK;
if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */
outb(1, qbase + 3); /* clear fifo */
/* note that request_bufflen is the total xfer size when sg is used */
reqlen = scsi_bufflen(cmd);
/* note that it won't work if transfers > 16M are requested */
if (reqlen && !((phase = inb(qbase + 4)) & 6)) { /* data phase */
struct scatterlist *sg;
rtrc(2)
outb(reqlen, qbase); /* low-mid xfer cnt */
outb(reqlen >> 8, qbase + 1); /* low-mid xfer cnt */
outb(reqlen >> 16, qbase + 0xe); /* high xfer cnt */
outb(0x90, qbase + 3); /* command do xfer */
/* PIO pseudo DMA to buffer or sglist */
REG1;
scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
if (priv->qabort) {
REG0;
return ((priv->qabort == 1 ?
DID_ABORT : DID_RESET) << 16);
}
buf = sg_virt(sg);
if (ql_pdma(priv, phase, buf, sg->length))
break;
}
REG0;
rtrc(2)
/*
* Wait for irq (split into second state of irq handler
* if this can take time)
*/
if ((k = ql_wai(priv)))
return (k << 16);
k = inb(qbase + 5); /* should be 0x10, bus service */
}
/*
* Enter Status (and Message In) Phase
*/
k = jiffies + WATCHDOG;
while (time_before(jiffies, k) && !priv->qabort &&
!(inb(qbase + 4) & 6))
cpu_relax(); /* wait for status phase */
if (time_after_eq(jiffies, k)) {
ql_zap(priv);
return (DID_TIME_OUT << 16);
}
/* FIXME: timeout ?? */
while (inb(qbase + 5))
cpu_relax(); /* clear pending ints */
if (priv->qabort)
return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16);
outb(0x11, qbase + 3); /* get status and message */
if ((k = ql_wai(priv)))
return (k << 16);
i = inb(qbase + 5); /* get chip irq stat */
j = inb(qbase + 7) & 0x1f; /* and bytes rec'd */
status = inb(qbase + 2);
message = inb(qbase + 2);
/*
* Should get function complete int if Status and message, else
* bus serv if only status
*/
if (!((i == 8 && j == 2) || (i == 0x10 && j == 1))) {
printk(KERN_ERR "Ql:Error during status phase, int=%02X, %d bytes recd\n", i, j);
result = DID_ERROR;
}
outb(0x12, qbase + 3); /* done, disconnect */
rtrc(1)
if ((k = ql_wai(priv)))
return (k << 16);
/*
* Should get bus service interrupt and disconnect interrupt
*/
i = inb(qbase + 5); /* should be bus service */
while (!priv->qabort && ((i & 0x20) != 0x20)) {
barrier();
cpu_relax();
i |= inb(qbase + 5);
}
rtrc(0)
if (priv->qabort)
return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16);
return (result << 16) | (message << 8) | (status & STATUS_MASK);
}
/*
* Interrupt handler
*/
static void ql_ihandl(void *dev_id)
{
struct scsi_cmnd *icmd;
struct Scsi_Host *host = dev_id;
struct qlogicfas408_priv *priv = get_priv_by_host(host);
int qbase = priv->qbase;
REG0;
if (!(inb(qbase + 4) & 0x80)) /* false alarm? */
return;
if (priv->qlcmd == NULL) { /* no command to process? */
int i;
i = 16;
while (i-- && inb(qbase + 5)); /* maybe also ql_zap() */
return;
}
icmd = priv->qlcmd;
icmd->result = ql_pcmd(icmd);
priv->qlcmd = NULL;
/*
* If result is CHECK CONDITION done calls qcommand to request
* sense
*/
(icmd->scsi_done) (icmd);
}
irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id)
{
unsigned long flags;
struct Scsi_Host *host = dev_id;
spin_lock_irqsave(host->host_lock, flags);
ql_ihandl(dev_id);
spin_unlock_irqrestore(host->host_lock, flags);
return IRQ_HANDLED;
}
/*
* Queued command
*/
static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done) (struct scsi_cmnd *))
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
if (scmd_id(cmd) == priv->qinitid) {
cmd->result = DID_BAD_TARGET << 16;
done(cmd);
return 0;
}
cmd->scsi_done = done;
/* wait for the last command's interrupt to finish */
while (priv->qlcmd != NULL) {
barrier();
cpu_relax();
}
ql_icmd(cmd);
return 0;
}
DEF_SCSI_QCMD(qlogicfas408_queuecommand)
/*
* Return bios parameters
*/
int qlogicfas408_biosparam(struct scsi_device *disk, struct block_device *dev,
sector_t capacity, int ip[])
{
/* This should mimic the DOS Qlogic driver's behavior exactly */
ip[0] = 0x40;
ip[1] = 0x20;
ip[2] = (unsigned long) capacity / (ip[0] * ip[1]);
if (ip[2] > 1024) {
ip[0] = 0xff;
ip[1] = 0x3f;
ip[2] = (unsigned long) capacity / (ip[0] * ip[1]);
#if 0
if (ip[2] > 1023)
ip[2] = 1023;
#endif
}
return 0;
}
/*
* Abort a command in progress
*/
int qlogicfas408_abort(struct scsi_cmnd *cmd)
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
priv->qabort = 1;
ql_zap(priv);
return SUCCESS;
}
/*
* Reset SCSI bus
* FIXME: This function is invoked with cmd = NULL directly by
* the PCMCIA qlogic_stub code. This wants fixing
*/
int qlogicfas408_bus_reset(struct scsi_cmnd *cmd)
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
unsigned long flags;
priv->qabort = 2;
spin_lock_irqsave(cmd->device->host->host_lock, flags);
ql_zap(priv);
spin_unlock_irqrestore(cmd->device->host->host_lock, flags);
return SUCCESS;
}
/*
* Return info string
*/
const char *qlogicfas408_info(struct Scsi_Host *host)
{
struct qlogicfas408_priv *priv = get_priv_by_host(host);
return priv->qinfo;
}
/*
* Get type of chip
*/
int qlogicfas408_get_chip_type(int qbase, int int_type)
{
REG1;
return inb(qbase + 0xe) & 0xf8;
}
/*
* Perform initialization tasks
*/
void qlogicfas408_setup(int qbase, int id, int int_type)
{
outb(1, qbase + 8); /* set for PIO pseudo DMA */
REG0;
outb(0x40 | qlcfg8 | id, qbase + 8); /* (ini) bus id, disable scsi rst */
outb(qlcfg5, qbase + 5); /* select timer */
outb(qlcfg9, qbase + 9); /* prescaler */
#if QL_RESET_AT_START
outb(3, qbase + 3);
REG1;
/* FIXME: timeout */
while (inb(qbase + 0xf) & 4)
cpu_relax();
REG0;
#endif
}
/*
* Checks if this is a QLogic FAS 408
*/
int qlogicfas408_detect(int qbase, int int_type)
{
REG1;
return (((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7) &&
((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7));
}
/*
* Disable interrupts
*/
void qlogicfas408_disable_ints(struct qlogicfas408_priv *priv)
{
int qbase = priv->qbase;
int int_type = priv->int_type;
REG1;
outb(0, qbase + 0xb); /* disable ints */
}
/*
* Init and exit functions
*/
static int __init qlogicfas408_init(void)
{
return 0;
}
static void __exit qlogicfas408_exit(void)
{
}
MODULE_AUTHOR("Tom Zerucha, Michael Griffith");
MODULE_DESCRIPTION("Driver for the Qlogic FAS SCSI controllers");
MODULE_LICENSE("GPL");
module_init(qlogicfas408_init);
module_exit(qlogicfas408_exit);
EXPORT_SYMBOL(qlogicfas408_info);
EXPORT_SYMBOL(qlogicfas408_queuecommand);
EXPORT_SYMBOL(qlogicfas408_abort);
EXPORT_SYMBOL(qlogicfas408_bus_reset);
EXPORT_SYMBOL(qlogicfas408_biosparam);
EXPORT_SYMBOL(qlogicfas408_ihandl);
EXPORT_SYMBOL(qlogicfas408_get_chip_type);
EXPORT_SYMBOL(qlogicfas408_setup);
EXPORT_SYMBOL(qlogicfas408_detect);
EXPORT_SYMBOL(qlogicfas408_disable_ints);
| gpl-2.0 |
regalstreak/android_kernel_samsung_logan2g | drivers/scsi/qlogicfas408.c | 12917 | 14930 | /*----------------------------------------------------------------*/
/*
Qlogic linux driver - work in progress. No Warranty express or implied.
Use at your own risk. Support Tort Reform so you won't have to read all
these silly disclaimers.
Copyright 1994, Tom Zerucha.
tz@execpc.com
Additional Code, and much appreciated help by
Michael A. Griffith
grif@cs.ucr.edu
Thanks to Eric Youngdale and Dave Hinds for loadable module and PCMCIA
help respectively, and for suffering through my foolishness during the
debugging process.
Reference Qlogic FAS408 Technical Manual, 53408-510-00A, May 10, 1994
(you can reference it, but it is incomplete and inaccurate in places)
Version 0.46 1/30/97 - kernel 1.2.0+
Functions as standalone, loadable, and PCMCIA driver, the latter from
Dave Hinds' PCMCIA package.
Cleaned up 26/10/2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> as part of the 2.5
SCSI driver cleanup and audit. This driver still needs work on the
following
- Non terminating hardware waits
- Some layering violations with its pcmcia stub
Redistributable under terms of the GNU General Public License
For the avoidance of doubt the "preferred form" of this code is one which
is in an open non patent encumbered format. Where cryptographic key signing
forms part of the process of creating an executable the information
including keys needed to generate an equivalently functional executable
are deemed to be part of the source code.
*/
#include <linux/module.h>
#include <linux/blkdev.h> /* to get disk capacity */
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/proc_fs.h>
#include <linux/unistd.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/dma.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "qlogicfas408.h"
/*----------------------------------------------------------------*/
static int qlcfg5 = (XTALFREQ << 5); /* 15625/512 */
static int qlcfg6 = SYNCXFRPD;
static int qlcfg7 = SYNCOFFST;
static int qlcfg8 = (SLOWCABLE << 7) | (QL_ENABLE_PARITY << 4);
static int qlcfg9 = ((XTALFREQ + 4) / 5);
static int qlcfgc = (FASTCLK << 3) | (FASTSCSI << 4);
/*----------------------------------------------------------------*/
/*----------------------------------------------------------------*/
/* local functions */
/*----------------------------------------------------------------*/
/* error recovery - reset everything */
static void ql_zap(struct qlogicfas408_priv *priv)
{
int x;
int qbase = priv->qbase;
int int_type = priv->int_type;
x = inb(qbase + 0xd);
REG0;
outb(3, qbase + 3); /* reset SCSI */
outb(2, qbase + 3); /* reset chip */
if (x & 0x80)
REG1;
}
/*
* Do a pseudo-dma tranfer
*/
static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int reqlen)
{
int j;
int qbase = priv->qbase;
j = 0;
if (phase & 1) { /* in */
#if QL_TURBO_PDMA
rtrc(4)
/* empty fifo in large chunks */
if (reqlen >= 128 && (inb(qbase + 8) & 2)) { /* full */
insl(qbase + 4, request, 32);
reqlen -= 128;
request += 128;
}
while (reqlen >= 84 && !(j & 0xc0)) /* 2/3 */
if ((j = inb(qbase + 8)) & 4)
{
insl(qbase + 4, request, 21);
reqlen -= 84;
request += 84;
}
if (reqlen >= 44 && (inb(qbase + 8) & 8)) { /* 1/3 */
insl(qbase + 4, request, 11);
reqlen -= 44;
request += 44;
}
#endif
/* until both empty and int (or until reclen is 0) */
rtrc(7)
j = 0;
while (reqlen && !((j & 0x10) && (j & 0xc0)))
{
/* while bytes to receive and not empty */
j &= 0xc0;
while (reqlen && !((j = inb(qbase + 8)) & 0x10))
{
*request++ = inb(qbase + 4);
reqlen--;
}
if (j & 0x10)
j = inb(qbase + 8);
}
} else { /* out */
#if QL_TURBO_PDMA
rtrc(4)
if (reqlen >= 128 && inb(qbase + 8) & 0x10) { /* empty */
outsl(qbase + 4, request, 32);
reqlen -= 128;
request += 128;
}
while (reqlen >= 84 && !(j & 0xc0)) /* 1/3 */
if (!((j = inb(qbase + 8)) & 8)) {
outsl(qbase + 4, request, 21);
reqlen -= 84;
request += 84;
}
if (reqlen >= 40 && !(inb(qbase + 8) & 4)) { /* 2/3 */
outsl(qbase + 4, request, 10);
reqlen -= 40;
request += 40;
}
#endif
/* until full and int (or until reclen is 0) */
rtrc(7)
j = 0;
while (reqlen && !((j & 2) && (j & 0xc0))) {
/* while bytes to send and not full */
while (reqlen && !((j = inb(qbase + 8)) & 2))
{
outb(*request++, qbase + 4);
reqlen--;
}
if (j & 2)
j = inb(qbase + 8);
}
}
/* maybe return reqlen */
return inb(qbase + 8) & 0xc0;
}
/*
* Wait for interrupt flag (polled - not real hardware interrupt)
*/
static int ql_wai(struct qlogicfas408_priv *priv)
{
int k;
int qbase = priv->qbase;
unsigned long i;
k = 0;
i = jiffies + WATCHDOG;
while (time_before(jiffies, i) && !priv->qabort &&
!((k = inb(qbase + 4)) & 0xe0)) {
barrier();
cpu_relax();
}
if (time_after_eq(jiffies, i))
return (DID_TIME_OUT);
if (priv->qabort)
return (priv->qabort == 1 ? DID_ABORT : DID_RESET);
if (k & 0x60)
ql_zap(priv);
if (k & 0x20)
return (DID_PARITY);
if (k & 0x40)
return (DID_ERROR);
return 0;
}
/*
* Initiate scsi command - queueing handler
* caller must hold host lock
*/
static void ql_icmd(struct scsi_cmnd *cmd)
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
int qbase = priv->qbase;
int int_type = priv->int_type;
unsigned int i;
priv->qabort = 0;
REG0;
/* clearing of interrupts and the fifo is needed */
inb(qbase + 5); /* clear interrupts */
if (inb(qbase + 5)) /* if still interrupting */
outb(2, qbase + 3); /* reset chip */
else if (inb(qbase + 7) & 0x1f)
outb(1, qbase + 3); /* clear fifo */
while (inb(qbase + 5)); /* clear ints */
REG1;
outb(1, qbase + 8); /* set for PIO pseudo DMA */
outb(0, qbase + 0xb); /* disable ints */
inb(qbase + 8); /* clear int bits */
REG0;
outb(0x40, qbase + 0xb); /* enable features */
/* configurables */
outb(qlcfgc, qbase + 0xc);
/* config: no reset interrupt, (initiator) bus id */
outb(0x40 | qlcfg8 | priv->qinitid, qbase + 8);
outb(qlcfg7, qbase + 7);
outb(qlcfg6, qbase + 6);
/**/ outb(qlcfg5, qbase + 5); /* select timer */
outb(qlcfg9 & 7, qbase + 9); /* prescaler */
/* outb(0x99, qbase + 5); */
outb(scmd_id(cmd), qbase + 4);
for (i = 0; i < cmd->cmd_len; i++)
outb(cmd->cmnd[i], qbase + 2);
priv->qlcmd = cmd;
outb(0x41, qbase + 3); /* select and send command */
}
/*
* Process scsi command - usually after interrupt
*/
static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
{
unsigned int i, j;
unsigned long k;
unsigned int result; /* ultimate return result */
unsigned int status; /* scsi returned status */
unsigned int message; /* scsi returned message */
unsigned int phase; /* recorded scsi phase */
unsigned int reqlen; /* total length of transfer */
char *buf;
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
int qbase = priv->qbase;
int int_type = priv->int_type;
rtrc(1)
j = inb(qbase + 6);
i = inb(qbase + 5);
if (i == 0x20) {
return (DID_NO_CONNECT << 16);
}
i |= inb(qbase + 5); /* the 0x10 bit can be set after the 0x08 */
if (i != 0x18) {
printk(KERN_ERR "Ql:Bad Interrupt status:%02x\n", i);
ql_zap(priv);
return (DID_BAD_INTR << 16);
}
j &= 7; /* j = inb( qbase + 7 ) >> 5; */
/* correct status is supposed to be step 4 */
/* it sometimes returns step 3 but with 0 bytes left to send */
/* We can try stuffing the FIFO with the max each time, but we will get a
sequence of 3 if any bytes are left (but we do flush the FIFO anyway */
if (j != 3 && j != 4) {
printk(KERN_ERR "Ql:Bad sequence for command %d, int %02X, cmdleft = %d\n",
j, i, inb(qbase + 7) & 0x1f);
ql_zap(priv);
return (DID_ERROR << 16);
}
result = DID_OK;
if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */
outb(1, qbase + 3); /* clear fifo */
/* note that request_bufflen is the total xfer size when sg is used */
reqlen = scsi_bufflen(cmd);
/* note that it won't work if transfers > 16M are requested */
if (reqlen && !((phase = inb(qbase + 4)) & 6)) { /* data phase */
struct scatterlist *sg;
rtrc(2)
outb(reqlen, qbase); /* low-mid xfer cnt */
outb(reqlen >> 8, qbase + 1); /* low-mid xfer cnt */
outb(reqlen >> 16, qbase + 0xe); /* high xfer cnt */
outb(0x90, qbase + 3); /* command do xfer */
/* PIO pseudo DMA to buffer or sglist */
REG1;
scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
if (priv->qabort) {
REG0;
return ((priv->qabort == 1 ?
DID_ABORT : DID_RESET) << 16);
}
buf = sg_virt(sg);
if (ql_pdma(priv, phase, buf, sg->length))
break;
}
REG0;
rtrc(2)
/*
* Wait for irq (split into second state of irq handler
* if this can take time)
*/
if ((k = ql_wai(priv)))
return (k << 16);
k = inb(qbase + 5); /* should be 0x10, bus service */
}
/*
* Enter Status (and Message In) Phase
*/
k = jiffies + WATCHDOG;
while (time_before(jiffies, k) && !priv->qabort &&
!(inb(qbase + 4) & 6))
cpu_relax(); /* wait for status phase */
if (time_after_eq(jiffies, k)) {
ql_zap(priv);
return (DID_TIME_OUT << 16);
}
/* FIXME: timeout ?? */
while (inb(qbase + 5))
cpu_relax(); /* clear pending ints */
if (priv->qabort)
return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16);
outb(0x11, qbase + 3); /* get status and message */
if ((k = ql_wai(priv)))
return (k << 16);
i = inb(qbase + 5); /* get chip irq stat */
j = inb(qbase + 7) & 0x1f; /* and bytes rec'd */
status = inb(qbase + 2);
message = inb(qbase + 2);
/*
* Should get function complete int if Status and message, else
* bus serv if only status
*/
if (!((i == 8 && j == 2) || (i == 0x10 && j == 1))) {
printk(KERN_ERR "Ql:Error during status phase, int=%02X, %d bytes recd\n", i, j);
result = DID_ERROR;
}
outb(0x12, qbase + 3); /* done, disconnect */
rtrc(1)
if ((k = ql_wai(priv)))
return (k << 16);
/*
* Should get bus service interrupt and disconnect interrupt
*/
i = inb(qbase + 5); /* should be bus service */
while (!priv->qabort && ((i & 0x20) != 0x20)) {
barrier();
cpu_relax();
i |= inb(qbase + 5);
}
rtrc(0)
if (priv->qabort)
return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16);
return (result << 16) | (message << 8) | (status & STATUS_MASK);
}
/*
* Interrupt handler
*/
static void ql_ihandl(void *dev_id)
{
struct scsi_cmnd *icmd;
struct Scsi_Host *host = dev_id;
struct qlogicfas408_priv *priv = get_priv_by_host(host);
int qbase = priv->qbase;
REG0;
if (!(inb(qbase + 4) & 0x80)) /* false alarm? */
return;
if (priv->qlcmd == NULL) { /* no command to process? */
int i;
i = 16;
while (i-- && inb(qbase + 5)); /* maybe also ql_zap() */
return;
}
icmd = priv->qlcmd;
icmd->result = ql_pcmd(icmd);
priv->qlcmd = NULL;
/*
* If result is CHECK CONDITION done calls qcommand to request
* sense
*/
(icmd->scsi_done) (icmd);
}
irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id)
{
unsigned long flags;
struct Scsi_Host *host = dev_id;
spin_lock_irqsave(host->host_lock, flags);
ql_ihandl(dev_id);
spin_unlock_irqrestore(host->host_lock, flags);
return IRQ_HANDLED;
}
/*
* Queued command
*/
static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done) (struct scsi_cmnd *))
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
if (scmd_id(cmd) == priv->qinitid) {
cmd->result = DID_BAD_TARGET << 16;
done(cmd);
return 0;
}
cmd->scsi_done = done;
/* wait for the last command's interrupt to finish */
while (priv->qlcmd != NULL) {
barrier();
cpu_relax();
}
ql_icmd(cmd);
return 0;
}
DEF_SCSI_QCMD(qlogicfas408_queuecommand)
/*
* Return bios parameters
*/
int qlogicfas408_biosparam(struct scsi_device *disk, struct block_device *dev,
sector_t capacity, int ip[])
{
/* This should mimic the DOS Qlogic driver's behavior exactly */
ip[0] = 0x40;
ip[1] = 0x20;
ip[2] = (unsigned long) capacity / (ip[0] * ip[1]);
if (ip[2] > 1024) {
ip[0] = 0xff;
ip[1] = 0x3f;
ip[2] = (unsigned long) capacity / (ip[0] * ip[1]);
#if 0
if (ip[2] > 1023)
ip[2] = 1023;
#endif
}
return 0;
}
/*
* Abort a command in progress
*/
int qlogicfas408_abort(struct scsi_cmnd *cmd)
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
priv->qabort = 1;
ql_zap(priv);
return SUCCESS;
}
/*
* Reset SCSI bus
* FIXME: This function is invoked with cmd = NULL directly by
* the PCMCIA qlogic_stub code. This wants fixing
*/
int qlogicfas408_bus_reset(struct scsi_cmnd *cmd)
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
unsigned long flags;
priv->qabort = 2;
spin_lock_irqsave(cmd->device->host->host_lock, flags);
ql_zap(priv);
spin_unlock_irqrestore(cmd->device->host->host_lock, flags);
return SUCCESS;
}
/*
* Return info string
*/
const char *qlogicfas408_info(struct Scsi_Host *host)
{
struct qlogicfas408_priv *priv = get_priv_by_host(host);
return priv->qinfo;
}
/*
* Get type of chip
*/
int qlogicfas408_get_chip_type(int qbase, int int_type)
{
REG1;
return inb(qbase + 0xe) & 0xf8;
}
/*
* Perform initialization tasks
*/
void qlogicfas408_setup(int qbase, int id, int int_type)
{
outb(1, qbase + 8); /* set for PIO pseudo DMA */
REG0;
outb(0x40 | qlcfg8 | id, qbase + 8); /* (ini) bus id, disable scsi rst */
outb(qlcfg5, qbase + 5); /* select timer */
outb(qlcfg9, qbase + 9); /* prescaler */
#if QL_RESET_AT_START
outb(3, qbase + 3);
REG1;
/* FIXME: timeout */
while (inb(qbase + 0xf) & 4)
cpu_relax();
REG0;
#endif
}
/*
* Checks if this is a QLogic FAS 408
*/
int qlogicfas408_detect(int qbase, int int_type)
{
REG1;
return (((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7) &&
((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7));
}
/*
* Disable interrupts
*/
void qlogicfas408_disable_ints(struct qlogicfas408_priv *priv)
{
int qbase = priv->qbase;
int int_type = priv->int_type;
REG1;
outb(0, qbase + 0xb); /* disable ints */
}
/*
* Init and exit functions
*/
static int __init qlogicfas408_init(void)
{
return 0;
}
static void __exit qlogicfas408_exit(void)
{
}
MODULE_AUTHOR("Tom Zerucha, Michael Griffith");
MODULE_DESCRIPTION("Driver for the Qlogic FAS SCSI controllers");
MODULE_LICENSE("GPL");
module_init(qlogicfas408_init);
module_exit(qlogicfas408_exit);
EXPORT_SYMBOL(qlogicfas408_info);
EXPORT_SYMBOL(qlogicfas408_queuecommand);
EXPORT_SYMBOL(qlogicfas408_abort);
EXPORT_SYMBOL(qlogicfas408_bus_reset);
EXPORT_SYMBOL(qlogicfas408_biosparam);
EXPORT_SYMBOL(qlogicfas408_ihandl);
EXPORT_SYMBOL(qlogicfas408_get_chip_type);
EXPORT_SYMBOL(qlogicfas408_setup);
EXPORT_SYMBOL(qlogicfas408_detect);
EXPORT_SYMBOL(qlogicfas408_disable_ints);
| gpl-2.0 |
andip71/boeffla-kernel-omnirom-s3 | drivers/media/video/samsung/mali_r3p1/linux/mali_profiling_internal.c | 118 | 8106 | /*
* Copyright (C) 2010-2012 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
*
* A copy of the licence is included with the program, and can also be obtained from Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
#include "mali_osk.h"
#include "mali_osk_mali.h"
#include "mali_ukk.h"
#include "mali_timestamp.h"
#include "mali_osk_profiling.h"
#include "mali_user_settings_db.h"
#include "mali_profiling_internal.h"
typedef struct mali_profiling_entry
{
u64 timestamp;
u32 event_id;
u32 data[5];
} mali_profiling_entry;
typedef enum mali_profiling_state
{
MALI_PROFILING_STATE_UNINITIALIZED,
MALI_PROFILING_STATE_IDLE,
MALI_PROFILING_STATE_RUNNING,
MALI_PROFILING_STATE_RETURN,
} mali_profiling_state;
static _mali_osk_lock_t *lock = NULL;
static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
static mali_profiling_entry* profile_entries = NULL;
static _mali_osk_atomic_t profile_insert_index;
static u32 profile_mask = 0;
static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
void probe_mali_timeline_event(void *data, TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1, unsigned
int d2, unsigned int d3, unsigned int d4))
{
add_event(event_id, d0, d1, d2, d3, d4);
}
_mali_osk_errcode_t _mali_internal_profiling_init(mali_bool auto_start)
{
profile_entries = NULL;
profile_mask = 0;
_mali_osk_atomic_init(&profile_insert_index, 0);
lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_PROFILING);
if (NULL == lock)
{
return _MALI_OSK_ERR_FAULT;
}
prof_state = MALI_PROFILING_STATE_IDLE;
if (MALI_TRUE == auto_start)
{
u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* Use maximum buffer size */
mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit))
{
return _MALI_OSK_ERR_FAULT;
}
}
return _MALI_OSK_ERR_OK;
}
void _mali_internal_profiling_term(void)
{
u32 count;
/* Ensure profiling is stopped */
_mali_internal_profiling_stop(&count);
prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
if (NULL != profile_entries)
{
_mali_osk_vfree(profile_entries);
profile_entries = NULL;
}
if (NULL != lock)
{
_mali_osk_lock_term(lock);
lock = NULL;
}
}
_mali_osk_errcode_t _mali_internal_profiling_start(u32 * limit)
{
_mali_osk_errcode_t ret;
mali_profiling_entry *new_profile_entries;
_mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
if (MALI_PROFILING_MAX_BUFFER_ENTRIES < *limit)
{
*limit = MALI_PROFILING_MAX_BUFFER_ENTRIES;
}
profile_mask = 1;
while (profile_mask <= *limit)
{
profile_mask <<= 1;
}
profile_mask >>= 1;
*limit = profile_mask;
profile_mask--; /* turns the power of two into a mask of one less */
new_profile_entries = _mali_osk_valloc(*limit * sizeof(mali_profiling_entry));
if (NULL == new_profile_entries)
{
_mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
return _MALI_OSK_ERR_NOMEM;
}
if (MALI_PROFILING_STATE_IDLE != prof_state)
{
_mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
_mali_osk_vfree(new_profile_entries);
return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
}
profile_entries = new_profile_entries;
ret = _mali_timestamp_reset();
if (_MALI_OSK_ERR_OK == ret)
{
prof_state = MALI_PROFILING_STATE_RUNNING;
}
else
{
_mali_osk_vfree(profile_entries);
profile_entries = NULL;
}
register_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
_mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
return ret;
}
static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
{
u32 cur_index = (_mali_osk_atomic_inc_return(&profile_insert_index) - 1) & profile_mask;
profile_entries[cur_index].timestamp = _mali_timestamp_get();
profile_entries[cur_index].event_id = event_id;
profile_entries[cur_index].data[0] = data0;
profile_entries[cur_index].data[1] = data1;
profile_entries[cur_index].data[2] = data2;
profile_entries[cur_index].data[3] = data3;
profile_entries[cur_index].data[4] = data4;
/* If event is "leave API function", add current memory usage to the event
* as data point 4. This is used in timeline profiling to indicate how
* much memory was used when leaving a function. */
if (event_id == (MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC))
{
profile_entries[cur_index].data[4] = _mali_ukk_report_memory_usage();
}
}
_mali_osk_errcode_t _mali_internal_profiling_stop(u32 * count)
{
_mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
if (MALI_PROFILING_STATE_RUNNING != prof_state)
{
_mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
}
/* go into return state (user to retreive events), no more events will be added after this */
prof_state = MALI_PROFILING_STATE_RETURN;
unregister_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
tracepoint_synchronize_unregister();
_mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
*count = _mali_osk_atomic_read(&profile_insert_index);
if (*count > profile_mask) *count = profile_mask;
return _MALI_OSK_ERR_OK;
}
u32 _mali_internal_profiling_get_count(void)
{
u32 retval = 0;
_mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
if (MALI_PROFILING_STATE_RETURN == prof_state)
{
retval = _mali_osk_atomic_read(&profile_insert_index);
if (retval > profile_mask) retval = profile_mask;
}
_mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
return retval;
}
_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5])
{
u32 raw_index = _mali_osk_atomic_read(&profile_insert_index);
_mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
if (index < profile_mask)
{
if ((raw_index & ~profile_mask) != 0)
{
index += raw_index;
index &= profile_mask;
}
if (prof_state != MALI_PROFILING_STATE_RETURN)
{
_mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
}
if(index >= raw_index)
{
_mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
return _MALI_OSK_ERR_FAULT;
}
*timestamp = profile_entries[index].timestamp;
*event_id = profile_entries[index].event_id;
data[0] = profile_entries[index].data[0];
data[1] = profile_entries[index].data[1];
data[2] = profile_entries[index].data[2];
data[3] = profile_entries[index].data[3];
data[4] = profile_entries[index].data[4];
}
else
{
_mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
return _MALI_OSK_ERR_FAULT;
}
_mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
return _MALI_OSK_ERR_OK;
}
_mali_osk_errcode_t _mali_internal_profiling_clear(void)
{
_mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
if (MALI_PROFILING_STATE_RETURN != prof_state)
{
_mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
}
prof_state = MALI_PROFILING_STATE_IDLE;
profile_mask = 0;
_mali_osk_atomic_init(&profile_insert_index, 0);
if (NULL != profile_entries)
{
_mali_osk_vfree(profile_entries);
profile_entries = NULL;
}
_mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
return _MALI_OSK_ERR_OK;
}
mali_bool _mali_internal_profiling_is_recording(void)
{
return prof_state == MALI_PROFILING_STATE_RUNNING ? MALI_TRUE : MALI_FALSE;
}
mali_bool _mali_internal_profiling_have_recording(void)
{
return prof_state == MALI_PROFILING_STATE_RETURN ? MALI_TRUE : MALI_FALSE;
}
| gpl-2.0 |
EPDCenter/android_kernel_allwinner_a31_unusual | arch/arm/kernel/signal.c | 118 | 20817 | /*
* linux/arch/arm/kernel/signal.c
*
* Copyright (C) 1995-2009 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/personality.h>
#include <linux/freezer.h>
#include <linux/uaccess.h>
#include <linux/tracehook.h>
#include <asm/elf.h>
#include <asm/cacheflush.h>
#include <asm/ucontext.h>
#include <asm/unistd.h>
#include <asm/vfp.h>
#include "signal.h"
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
/*
* For ARM syscalls, we encode the syscall number into the instruction.
*/
#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
#define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
/*
* With EABI, the syscall number has to be loaded into r7.
*/
#define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
/*
* For Thumb syscalls, we pass the syscall number via r7. We therefore
* need two 16-bit instructions.
*/
#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
const unsigned long sigreturn_codes[7] = {
MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
};
/*
* Either we support OABI only, or we have EABI with the OABI
* compat layer enabled. In the later case we don't know if
* user space is EABI or not, and if not we must not clobber r7.
* Always using the OABI syscall solves that issue and works for
* all those cases.
*/
const unsigned long syscall_restart_code[2] = {
SWI_SYS_RESTART, /* swi __NR_restart_syscall */
0xe49df004, /* ldr pc, [sp], #4 */
};
/*
* atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
{
mask &= _BLOCKABLE;
spin_lock_irq(¤t->sighand->siglock);
current->saved_sigmask = current->blocked;
siginitset(¤t->blocked, mask);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
current->state = TASK_INTERRUPTIBLE;
schedule();
set_restore_sigmask();
return -ERESTARTNOHAND;
}
asmlinkage int
sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
if (act) {
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
return -EFAULT;
__get_user(new_ka.sa.sa_flags, &act->sa_flags);
__get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
return -EFAULT;
__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
}
#ifdef CONFIG_CRUNCH
static int preserve_crunch_context(struct crunch_sigframe __user *frame)
{
char kbuf[sizeof(*frame) + 8];
struct crunch_sigframe *kframe;
/* the crunch context must be 64 bit aligned */
kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
kframe->magic = CRUNCH_MAGIC;
kframe->size = CRUNCH_STORAGE_SIZE;
crunch_task_copy(current_thread_info(), &kframe->storage);
return __copy_to_user(frame, kframe, sizeof(*frame));
}
static int restore_crunch_context(struct crunch_sigframe __user *frame)
{
char kbuf[sizeof(*frame) + 8];
struct crunch_sigframe *kframe;
/* the crunch context must be 64 bit aligned */
kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
if (__copy_from_user(kframe, frame, sizeof(*frame)))
return -1;
if (kframe->magic != CRUNCH_MAGIC ||
kframe->size != CRUNCH_STORAGE_SIZE)
return -1;
crunch_task_restore(current_thread_info(), &kframe->storage);
return 0;
}
#endif
#ifdef CONFIG_IWMMXT
static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
{
char kbuf[sizeof(*frame) + 8];
struct iwmmxt_sigframe *kframe;
/* the iWMMXt context must be 64 bit aligned */
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
kframe->magic = IWMMXT_MAGIC;
kframe->size = IWMMXT_STORAGE_SIZE;
iwmmxt_task_copy(current_thread_info(), &kframe->storage);
return __copy_to_user(frame, kframe, sizeof(*frame));
}
static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
{
char kbuf[sizeof(*frame) + 8];
struct iwmmxt_sigframe *kframe;
/* the iWMMXt context must be 64 bit aligned */
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
if (__copy_from_user(kframe, frame, sizeof(*frame)))
return -1;
if (kframe->magic != IWMMXT_MAGIC ||
kframe->size != IWMMXT_STORAGE_SIZE)
return -1;
iwmmxt_task_restore(current_thread_info(), &kframe->storage);
return 0;
}
#endif
#ifdef CONFIG_VFP
static int preserve_vfp_context(struct vfp_sigframe __user *frame)
{
const unsigned long magic = VFP_MAGIC;
const unsigned long size = VFP_STORAGE_SIZE;
int err = 0;
__put_user_error(magic, &frame->magic, err);
__put_user_error(size, &frame->size, err);
if (err)
return -EFAULT;
return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
}
static int restore_vfp_context(struct vfp_sigframe __user *frame)
{
unsigned long magic;
unsigned long size;
int err = 0;
__get_user_error(magic, &frame->magic, err);
__get_user_error(size, &frame->size, err);
if (err)
return -EFAULT;
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL;
return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
}
#endif
/*
* Do a signal return; undo the signal stack. These are aligned to 64-bit.
*/
struct sigframe {
struct ucontext uc;
unsigned long retcode[2];
};
struct rt_sigframe {
struct siginfo info;
struct sigframe sig;
};
static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
{
struct aux_sigframe __user *aux;
sigset_t set;
int err;
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
if (err == 0) {
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(¤t->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
}
__get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
__get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
__get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
__get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
__get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
__get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
__get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
__get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
__get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
__get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
__get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
__get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
__get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
__get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
__get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
__get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
__get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
err |= !valid_user_regs(regs);
aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
#ifdef CONFIG_CRUNCH
if (err == 0)
err |= restore_crunch_context(&aux->crunch);
#endif
#ifdef CONFIG_IWMMXT
if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
err |= restore_iwmmxt_context(&aux->iwmmxt);
#endif
#ifdef CONFIG_VFP
if (err == 0)
err |= restore_vfp_context(&aux->vfp);
#endif
return err;
}
asmlinkage int sys_sigreturn(struct pt_regs *regs)
{
struct sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a 64-bit boundary,
* then 'sp' should be word aligned here. If it's
* not, then the user is trying to mess with us.
*/
if (regs->ARM_sp & 7)
goto badframe;
frame = (struct sigframe __user *)regs->ARM_sp;
if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
goto badframe;
if (restore_sigframe(regs, frame))
goto badframe;
return regs->ARM_r0;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a 64-bit boundary,
* then 'sp' should be word aligned here. If it's
* not, then the user is trying to mess with us.
*/
if (regs->ARM_sp & 7)
goto badframe;
frame = (struct rt_sigframe __user *)regs->ARM_sp;
if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
goto badframe;
if (restore_sigframe(regs, &frame->sig))
goto badframe;
if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
goto badframe;
return regs->ARM_r0;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
static int
setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
{
struct aux_sigframe __user *aux;
int err = 0;
__put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
__put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
__put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
__put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
__put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
__put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
__put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
__put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
__put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
__put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
__put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
__put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
__put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
__put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
__put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
__put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
__put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
__put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
__put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
__put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
#ifdef CONFIG_CRUNCH
if (err == 0)
err |= preserve_crunch_context(&aux->crunch);
#endif
#ifdef CONFIG_IWMMXT
if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
err |= preserve_iwmmxt_context(&aux->iwmmxt);
#endif
#ifdef CONFIG_VFP
if (err == 0)
err |= preserve_vfp_context(&aux->vfp);
#endif
__put_user_error(0, &aux->end_magic, err);
return err;
}
static inline void __user *
get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize)
{
unsigned long sp = regs->ARM_sp;
void __user *frame;
/*
* This is the X/Open sanctioned signal stack switching.
*/
if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
/*
* ATPCS B01 mandates 8-byte alignment
*/
frame = (void __user *)((sp - framesize) & ~7);
/*
* Check that we can actually write to the signal frame.
*/
if (!access_ok(VERIFY_WRITE, frame, framesize))
frame = NULL;
return frame;
}
static int
setup_return(struct pt_regs *regs, struct k_sigaction *ka,
unsigned long __user *rc, void __user *frame, int usig)
{
unsigned long handler = (unsigned long)ka->sa.sa_handler;
unsigned long retcode;
int thumb = 0;
unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
cpsr |= PSR_ENDSTATE;
/*
* Maybe we need to deliver a 32-bit signal to a 26-bit task.
*/
if (ka->sa.sa_flags & SA_THIRTYTWO)
cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
#ifdef CONFIG_ARM_THUMB
if (elf_hwcap & HWCAP_THUMB) {
/*
* The LSB of the handler determines if we're going to
* be using THUMB or ARM mode for this signal handler.
*/
thumb = handler & 1;
if (thumb) {
cpsr |= PSR_T_BIT;
#if __LINUX_ARM_ARCH__ >= 7
/* clear the If-Then Thumb-2 execution state */
cpsr &= ~PSR_IT_MASK;
#endif
} else
cpsr &= ~PSR_T_BIT;
}
#endif
if (ka->sa.sa_flags & SA_RESTORER) {
retcode = (unsigned long)ka->sa.sa_restorer;
} else {
unsigned int idx = thumb << 1;
if (ka->sa.sa_flags & SA_SIGINFO)
idx += 3;
if (__put_user(sigreturn_codes[idx], rc) ||
__put_user(sigreturn_codes[idx+1], rc+1))
return 1;
if (cpsr & MODE32_BIT) {
/*
* 32-bit code can use the new high-page
* signal return code support.
*/
retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
} else {
/*
* Ensure that the instruction cache sees
* the return code written onto the stack.
*/
flush_icache_range((unsigned long)rc,
(unsigned long)(rc + 2));
retcode = ((unsigned long)rc) + thumb;
}
}
regs->ARM_r0 = usig;
regs->ARM_sp = (unsigned long)frame;
regs->ARM_lr = retcode;
regs->ARM_pc = handler;
regs->ARM_cpsr = cpsr;
return 0;
}
static int
setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs)
{
struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
int err = 0;
if (!frame)
return 1;
/*
* Set uc.uc_flags to a value which sc.trap_no would never have.
*/
__put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
err |= setup_sigframe(frame, regs, set);
if (err == 0)
err = setup_return(regs, ka, frame->retcode, frame, usig);
return err;
}
static int
setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
stack_t stack;
int err = 0;
if (!frame)
return 1;
err |= copy_siginfo_to_user(&frame->info, info);
__put_user_error(0, &frame->sig.uc.uc_flags, err);
__put_user_error(NULL, &frame->sig.uc.uc_link, err);
memset(&stack, 0, sizeof(stack));
stack.ss_sp = (void __user *)current->sas_ss_sp;
stack.ss_flags = sas_ss_flags(regs->ARM_sp);
stack.ss_size = current->sas_ss_size;
err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack));
err |= setup_sigframe(&frame->sig, regs, set);
if (err == 0)
err = setup_return(regs, ka, frame->sig.retcode, frame, usig);
if (err == 0) {
/*
* For realtime signals we must also set the second and third
* arguments for the signal handler.
* -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
*/
regs->ARM_r1 = (unsigned long)&frame->info;
regs->ARM_r2 = (unsigned long)&frame->sig.uc;
}
return err;
}
/*
* OK, we're invoking a handler
*/
static int
handle_signal(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset,
struct pt_regs * regs)
{
struct thread_info *thread = current_thread_info();
struct task_struct *tsk = current;
int usig = sig;
int ret;
/*
* translate the signal
*/
if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
usig = thread->exec_domain->signal_invmap[usig];
/*
* Set up the stack frame
*/
if (ka->sa.sa_flags & SA_SIGINFO)
ret = setup_rt_frame(usig, ka, info, oldset, regs);
else
ret = setup_frame(usig, ka, oldset, regs);
/*
* Check that the resulting registers are actually sane.
*/
ret |= !valid_user_regs(regs);
if (ret != 0) {
force_sigsegv(sig, tsk);
return ret;
}
/*
* Block the signal if we were successful.
*/
spin_lock_irq(&tsk->sighand->siglock);
sigorsets(&tsk->blocked, &tsk->blocked,
&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&tsk->blocked, sig);
recalc_sigpending();
spin_unlock_irq(&tsk->sighand->siglock);
return 0;
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*
* Note that we go through the signals twice: once to check the signals that
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
static void do_signal(struct pt_regs *regs, int syscall)
{
unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
struct k_sigaction ka;
siginfo_t info;
int signr;
/*
* We want the common case to go fast, which
* is why we may in certain cases get here from
* kernel mode. Just return without doing anything
* if so.
*/
if (!user_mode(regs))
return;
/*
* If we were from a system call, check for system call restarting...
*/
if (syscall) {
continue_addr = regs->ARM_pc;
restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
retval = regs->ARM_r0;
/*
* Prepare for system call restart. We do this here so that a
* debugger will see the already changed PSW.
*/
switch (retval) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->ARM_r0 = regs->ARM_ORIG_r0;
regs->ARM_pc = restart_addr;
break;
case -ERESTART_RESTARTBLOCK:
regs->ARM_r0 = -EINTR;
break;
}
}
if (try_to_freeze())
goto no_signal;
/*
* Get the signal to deliver. When running under ptrace, at this
* point the debugger may change all our registers ...
*/
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
sigset_t *oldset;
/*
* Depending on the signal settings we may need to revert the
* decision to restart the system call. But skip this if a
* debugger has chosen to restart at a different PC.
*/
if (regs->ARM_pc == restart_addr) {
if (retval == -ERESTARTNOHAND
|| (retval == -ERESTARTSYS
&& !(ka.sa.sa_flags & SA_RESTART))) {
regs->ARM_r0 = -EINTR;
regs->ARM_pc = continue_addr;
}
}
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = ¤t->saved_sigmask;
else
oldset = ¤t->blocked;
if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
/*
* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TIF_RESTORE_SIGMASK flag.
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
return;
}
no_signal:
if (syscall) {
/*
* Handle restarting a different system call. As above,
* if a debugger has chosen to restart at a different PC,
* ignore the restart.
*/
if (retval == -ERESTART_RESTARTBLOCK
&& regs->ARM_pc == continue_addr) {
if (thumb_mode(regs)) {
regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
regs->ARM_pc -= 2;
} else {
#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
regs->ARM_r7 = __NR_restart_syscall;
regs->ARM_pc -= 4;
#else
u32 __user *usp;
regs->ARM_sp -= 4;
usp = (u32 __user *)regs->ARM_sp;
if (put_user(regs->ARM_pc, usp) == 0) {
regs->ARM_pc = KERN_RESTART_CODE;
} else {
regs->ARM_sp += 4;
force_sigsegv(0, current);
}
#endif
}
}
/* If there's no signal to deliver, we just put the saved sigmask
* back.
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
}
}
}
asmlinkage void
do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
if (thread_flags & _TIF_SIGPENDING)
do_signal(regs, syscall);
if (thread_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
if (current->replacement_session_keyring)
key_replace_session_keyring();
}
}
| gpl-2.0 |
p500-ics-cm9/LGE_3.0_KERNEL | drivers/mmc/host/sdhci-pci.c | 1142 | 28287 | /* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface
*
* Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* Thanks to the following companies for their support:
*
* - JMicron (hardware and technical support)
*/
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/mmc/host.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
#include "sdhci.h"
/*
* PCI registers
*/
#define PCI_SDHCI_IFPIO 0x00
#define PCI_SDHCI_IFDMA 0x01
#define PCI_SDHCI_IFVENDOR 0x02
#define PCI_SLOT_INFO 0x40 /* 8 bits */
#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7)
#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07
#define MAX_SLOTS 8
struct sdhci_pci_chip;
struct sdhci_pci_slot;
struct sdhci_pci_fixes {
unsigned int quirks;
int (*probe) (struct sdhci_pci_chip *);
int (*probe_slot) (struct sdhci_pci_slot *);
void (*remove_slot) (struct sdhci_pci_slot *, int);
int (*suspend) (struct sdhci_pci_chip *,
pm_message_t);
int (*resume) (struct sdhci_pci_chip *);
};
struct sdhci_pci_slot {
struct sdhci_pci_chip *chip;
struct sdhci_host *host;
int pci_bar;
};
struct sdhci_pci_chip {
struct pci_dev *pdev;
unsigned int quirks;
const struct sdhci_pci_fixes *fixes;
int num_slots; /* Slots on controller */
struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */
};
/*****************************************************************************\
* *
* Hardware specific quirk handling *
* *
\*****************************************************************************/
static int ricoh_probe(struct sdhci_pci_chip *chip)
{
if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG ||
chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY)
chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
return 0;
}
static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->caps =
((0x21 << SDHCI_TIMEOUT_CLK_SHIFT)
& SDHCI_TIMEOUT_CLK_MASK) |
((0x21 << SDHCI_CLOCK_BASE_SHIFT)
& SDHCI_CLOCK_BASE_MASK) |
SDHCI_TIMEOUT_CLK_UNIT |
SDHCI_CAN_VDD_330 |
SDHCI_CAN_DO_SDMA;
return 0;
}
static int ricoh_mmc_resume(struct sdhci_pci_chip *chip)
{
/* Apply a delay to allow controller to settle */
/* Otherwise it becomes confused if card state changed
during suspend */
msleep(500);
return 0;
}
static const struct sdhci_pci_fixes sdhci_ricoh = {
.probe = ricoh_probe,
.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_FORCE_DMA |
SDHCI_QUIRK_CLOCK_BEFORE_RESET,
};
static const struct sdhci_pci_fixes sdhci_ricoh_mmc = {
.probe_slot = ricoh_mmc_probe_slot,
.resume = ricoh_mmc_resume,
.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_CLOCK_BEFORE_RESET |
SDHCI_QUIRK_NO_CARD_NO_RESET |
SDHCI_QUIRK_MISSING_CAPS
};
static const struct sdhci_pci_fixes sdhci_ene_712 = {
.quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_BROKEN_DMA,
};
static const struct sdhci_pci_fixes sdhci_ene_714 = {
.quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
SDHCI_QUIRK_BROKEN_DMA,
};
static const struct sdhci_pci_fixes sdhci_cafe = {
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
SDHCI_QUIRK_NO_BUSY_IRQ |
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
};
/*
* ADMA operation is disabled for Moorestown platform due to
* hardware bugs.
*/
static int mrst_hc_probe(struct sdhci_pci_chip *chip)
{
/*
* slots number is fixed here for MRST as SDIO3/5 are never used and
* have hardware bugs.
*/
chip->num_slots = 1;
return 0;
}
static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
.quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
};
static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
.quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
.probe = mrst_hc_probe,
};
static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
};
static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
};
/* O2Micro extra registers */
#define O2_SD_LOCK_WP 0xD3
#define O2_SD_MULTI_VCC3V 0xEE
#define O2_SD_CLKREQ 0xEC
#define O2_SD_CAPS 0xE0
#define O2_SD_ADMA1 0xE2
#define O2_SD_ADMA2 0xE7
#define O2_SD_INF_MOD 0xF1
static int o2_probe(struct sdhci_pci_chip *chip)
{
int ret;
u8 scratch;
switch (chip->pdev->device) {
case PCI_DEVICE_ID_O2_8220:
case PCI_DEVICE_ID_O2_8221:
case PCI_DEVICE_ID_O2_8320:
case PCI_DEVICE_ID_O2_8321:
/* This extra setup is required due to broken ADMA. */
ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
/* Set Multi 3 to VCC3V# */
pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
/* Disable CLK_REQ# support after media DET */
ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch);
if (ret)
return ret;
scratch |= 0x20;
pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
/* Choose capabilities, enable SDMA. We have to write 0x01
* to the capabilities register first to unlock it.
*/
ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
if (ret)
return ret;
scratch |= 0x01;
pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
/* Disable ADMA1/2 */
pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
/* Disable the infinite transfer mode */
ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch);
if (ret)
return ret;
scratch |= 0x08;
pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
/* Lock WP */
ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
}
return 0;
}
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{
u8 scratch;
int ret;
ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
if (ret)
return ret;
/*
* Turn PMOS on [bit 0], set over current detection to 2.4 V
* [bit 1:2] and enable over current debouncing [bit 6].
*/
if (on)
scratch |= 0x47;
else
scratch &= ~0x47;
ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
if (ret)
return ret;
return 0;
}
static int jmicron_probe(struct sdhci_pci_chip *chip)
{
int ret;
u16 mmcdev = 0;
if (chip->pdev->revision == 0) {
chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE |
SDHCI_QUIRK_32BIT_ADMA_SIZE |
SDHCI_QUIRK_RESET_AFTER_REQUEST |
SDHCI_QUIRK_BROKEN_SMALL_PIO;
}
/*
* JMicron chips can have two interfaces to the same hardware
* in order to work around limitations in Microsoft's driver.
* We need to make sure we only bind to one of them.
*
* This code assumes two things:
*
* 1. The PCI code adds subfunctions in order.
*
* 2. The MMC interface has a lower subfunction number
* than the SD interface.
*/
if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
if (mmcdev) {
struct pci_dev *sd_dev;
sd_dev = NULL;
while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
mmcdev, sd_dev)) != NULL) {
if ((PCI_SLOT(chip->pdev->devfn) ==
PCI_SLOT(sd_dev->devfn)) &&
(chip->pdev->bus == sd_dev->bus))
break;
}
if (sd_dev) {
pci_dev_put(sd_dev);
dev_info(&chip->pdev->dev, "Refusing to bind to "
"secondary interface.\n");
return -ENODEV;
}
}
/*
* JMicron chips need a bit of a nudge to enable the power
* output pins.
*/
ret = jmicron_pmos(chip, 1);
if (ret) {
dev_err(&chip->pdev->dev, "Failure enabling card power\n");
return ret;
}
/* quirk for unsable RO-detection on JM388 chips */
if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT;
return 0;
}
static void jmicron_enable_mmc(struct sdhci_host *host, int on)
{
u8 scratch;
scratch = readb(host->ioaddr + 0xC0);
if (on)
scratch |= 0x01;
else
scratch &= ~0x01;
writeb(scratch, host->ioaddr + 0xC0);
}
static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
{
if (slot->chip->pdev->revision == 0) {
u16 version;
version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION);
version = (version & SDHCI_VENDOR_VER_MASK) >>
SDHCI_VENDOR_VER_SHIFT;
/*
* Older versions of the chip have lots of nasty glitches
* in the ADMA engine. It's best just to avoid it
* completely.
*/
if (version < 0xAC)
slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
}
/* JM388 MMC doesn't support 1.8V while SD supports it */
if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
MMC_VDD_29_30 | MMC_VDD_30_31 |
MMC_VDD_165_195; /* allow 1.8V */
slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
}
/*
* The secondary interface requires a bit set to get the
* interrupts.
*/
if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
jmicron_enable_mmc(slot->host, 1);
slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
return 0;
}
static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
{
if (dead)
return;
if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
jmicron_enable_mmc(slot->host, 0);
}
static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
{
int i;
if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
for (i = 0; i < chip->num_slots; i++)
jmicron_enable_mmc(chip->slots[i]->host, 0);
}
return 0;
}
static int jmicron_resume(struct sdhci_pci_chip *chip)
{
int ret, i;
if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
for (i = 0; i < chip->num_slots; i++)
jmicron_enable_mmc(chip->slots[i]->host, 1);
}
ret = jmicron_pmos(chip, 1);
if (ret) {
dev_err(&chip->pdev->dev, "Failure enabling card power\n");
return ret;
}
return 0;
}
static const struct sdhci_pci_fixes sdhci_o2 = {
.probe = o2_probe,
};
static const struct sdhci_pci_fixes sdhci_jmicron = {
.probe = jmicron_probe,
.probe_slot = jmicron_probe_slot,
.remove_slot = jmicron_remove_slot,
.suspend = jmicron_suspend,
.resume = jmicron_resume,
};
/* SysKonnect CardBus2SDIO extra registers */
#define SYSKT_CTRL 0x200
#define SYSKT_RDFIFO_STAT 0x204
#define SYSKT_WRFIFO_STAT 0x208
#define SYSKT_POWER_DATA 0x20c
#define SYSKT_POWER_330 0xef
#define SYSKT_POWER_300 0xf8
#define SYSKT_POWER_184 0xcc
#define SYSKT_POWER_CMD 0x20d
#define SYSKT_POWER_START (1 << 7)
#define SYSKT_POWER_STATUS 0x20e
#define SYSKT_POWER_STATUS_OK (1 << 0)
#define SYSKT_BOARD_REV 0x210
#define SYSKT_CHIP_REV 0x211
#define SYSKT_CONF_DATA 0x212
#define SYSKT_CONF_DATA_1V8 (1 << 2)
#define SYSKT_CONF_DATA_2V5 (1 << 1)
#define SYSKT_CONF_DATA_3V3 (1 << 0)
static int syskt_probe(struct sdhci_pci_chip *chip)
{
if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
chip->pdev->class &= ~0x0000FF;
chip->pdev->class |= PCI_SDHCI_IFDMA;
}
return 0;
}
static int syskt_probe_slot(struct sdhci_pci_slot *slot)
{
int tm, ps;
u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV);
u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV);
dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, "
"board rev %d.%d, chip rev %d.%d\n",
board_rev >> 4, board_rev & 0xf,
chip_rev >> 4, chip_rev & 0xf);
if (chip_rev >= 0x20)
slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA;
writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA);
writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD);
udelay(50);
tm = 10; /* Wait max 1 ms */
do {
ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS);
if (ps & SYSKT_POWER_STATUS_OK)
break;
udelay(100);
} while (--tm);
if (!tm) {
dev_err(&slot->chip->pdev->dev,
"power regulator never stabilized");
writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD);
return -ENODEV;
}
return 0;
}
static const struct sdhci_pci_fixes sdhci_syskt = {
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER,
.probe = syskt_probe,
.probe_slot = syskt_probe_slot,
};
static int via_probe(struct sdhci_pci_chip *chip)
{
if (chip->pdev->revision == 0x10)
chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
return 0;
}
static const struct sdhci_pci_fixes sdhci_via = {
.probe = via_probe,
};
static const struct pci_device_id pci_ids[] __devinitdata = {
{
.vendor = PCI_VENDOR_ID_RICOH,
.device = PCI_DEVICE_ID_RICOH_R5C822,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_ricoh,
},
{
.vendor = PCI_VENDOR_ID_RICOH,
.device = 0x843,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
},
{
.vendor = PCI_VENDOR_ID_RICOH,
.device = 0xe822,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
},
{
.vendor = PCI_VENDOR_ID_RICOH,
.device = 0xe823,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
},
{
.vendor = PCI_VENDOR_ID_ENE,
.device = PCI_DEVICE_ID_ENE_CB712_SD,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_ene_712,
},
{
.vendor = PCI_VENDOR_ID_ENE,
.device = PCI_DEVICE_ID_ENE_CB712_SD_2,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_ene_712,
},
{
.vendor = PCI_VENDOR_ID_ENE,
.device = PCI_DEVICE_ID_ENE_CB714_SD,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_ene_714,
},
{
.vendor = PCI_VENDOR_ID_ENE,
.device = PCI_DEVICE_ID_ENE_CB714_SD_2,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_ene_714,
},
{
.vendor = PCI_VENDOR_ID_MARVELL,
.device = PCI_DEVICE_ID_MARVELL_88ALP01_SD,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_cafe,
},
{
.vendor = PCI_VENDOR_ID_JMICRON,
.device = PCI_DEVICE_ID_JMICRON_JMB38X_SD,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_jmicron,
},
{
.vendor = PCI_VENDOR_ID_JMICRON,
.device = PCI_DEVICE_ID_JMICRON_JMB38X_MMC,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_jmicron,
},
{
.vendor = PCI_VENDOR_ID_JMICRON,
.device = PCI_DEVICE_ID_JMICRON_JMB388_SD,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_jmicron,
},
{
.vendor = PCI_VENDOR_ID_JMICRON,
.device = PCI_DEVICE_ID_JMICRON_JMB388_ESD,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_jmicron,
},
{
.vendor = PCI_VENDOR_ID_SYSKONNECT,
.device = 0x8000,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_syskt,
},
{
.vendor = PCI_VENDOR_ID_VIA,
.device = 0x95d0,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_via,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_MRST_SD0,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc0,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_MRST_SD1,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_MRST_SD2,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_MFD_SD,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_MFD_SDIO1,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_MFD_SDIO2,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_MFD_EMMC0,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
},
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_MFD_EMMC1,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
},
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8120,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_o2,
},
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8220,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_o2,
},
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8221,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_o2,
},
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8320,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_o2,
},
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8321,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_o2,
},
{ /* Generic SD host controller */
PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(pci, pci_ids);
/*****************************************************************************\
* *
* SDHCI core callbacks *
* *
\*****************************************************************************/
static int sdhci_pci_enable_dma(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot;
struct pci_dev *pdev;
int ret;
slot = sdhci_priv(host);
pdev = slot->chip->pdev;
if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
(host->flags & SDHCI_USE_SDMA)) {
dev_warn(&pdev->dev, "Will use DMA mode even though HW "
"doesn't fully claim to support it.\n");
}
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret)
return ret;
pci_set_master(pdev);
return 0;
}
static struct sdhci_ops sdhci_pci_ops = {
.enable_dma = sdhci_pci_enable_dma,
};
/*****************************************************************************\
* *
* Suspend/resume *
* *
\*****************************************************************************/
#ifdef CONFIG_PM
static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
mmc_pm_flag_t slot_pm_flags;
mmc_pm_flag_t pm_flags = 0;
int i, ret;
chip = pci_get_drvdata(pdev);
if (!chip)
return 0;
for (i = 0; i < chip->num_slots; i++) {
slot = chip->slots[i];
if (!slot)
continue;
ret = sdhci_suspend_host(slot->host, state);
if (ret) {
for (i--; i >= 0; i--)
sdhci_resume_host(chip->slots[i]->host);
return ret;
}
slot_pm_flags = slot->host->mmc->pm_flags;
if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ)
sdhci_enable_irq_wakeups(slot->host);
pm_flags |= slot_pm_flags;
}
if (chip->fixes && chip->fixes->suspend) {
ret = chip->fixes->suspend(chip, state);
if (ret) {
for (i = chip->num_slots - 1; i >= 0; i--)
sdhci_resume_host(chip->slots[i]->host);
return ret;
}
}
pci_save_state(pdev);
if (pm_flags & MMC_PM_KEEP_POWER) {
if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) {
pci_pme_active(pdev, true);
pci_enable_wake(pdev, PCI_D3hot, 1);
}
pci_set_power_state(pdev, PCI_D3hot);
} else {
pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
}
return 0;
}
static int sdhci_pci_resume(struct pci_dev *pdev)
{
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
int i, ret;
chip = pci_get_drvdata(pdev);
if (!chip)
return 0;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
return ret;
if (chip->fixes && chip->fixes->resume) {
ret = chip->fixes->resume(chip);
if (ret)
return ret;
}
for (i = 0; i < chip->num_slots; i++) {
slot = chip->slots[i];
if (!slot)
continue;
ret = sdhci_resume_host(slot->host);
if (ret)
return ret;
}
return 0;
}
#else /* CONFIG_PM */
#define sdhci_pci_suspend NULL
#define sdhci_pci_resume NULL
#endif /* CONFIG_PM */
/*****************************************************************************\
* *
* Device probing/removal *
* *
\*****************************************************************************/
static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
struct pci_dev *pdev, struct sdhci_pci_chip *chip, int bar)
{
struct sdhci_pci_slot *slot;
struct sdhci_host *host;
int ret;
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
return ERR_PTR(-ENODEV);
}
if (pci_resource_len(pdev, bar) != 0x100) {
dev_err(&pdev->dev, "Invalid iomem size. You may "
"experience problems.\n");
}
if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n");
return ERR_PTR(-ENODEV);
}
if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
dev_err(&pdev->dev, "Unknown interface. Aborting.\n");
return ERR_PTR(-ENODEV);
}
host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot));
if (IS_ERR(host)) {
dev_err(&pdev->dev, "cannot allocate host\n");
return ERR_CAST(host);
}
slot = sdhci_priv(host);
slot->chip = chip;
slot->host = host;
slot->pci_bar = bar;
host->hw_name = "PCI";
host->ops = &sdhci_pci_ops;
host->quirks = chip->quirks;
host->irq = pdev->irq;
ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc));
if (ret) {
dev_err(&pdev->dev, "cannot request region\n");
goto free;
}
host->ioaddr = pci_ioremap_bar(pdev, bar);
if (!host->ioaddr) {
dev_err(&pdev->dev, "failed to remap registers\n");
ret = -ENOMEM;
goto release;
}
if (chip->fixes && chip->fixes->probe_slot) {
ret = chip->fixes->probe_slot(slot);
if (ret)
goto unmap;
}
host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
ret = sdhci_add_host(host);
if (ret)
goto remove;
return slot;
remove:
if (chip->fixes && chip->fixes->remove_slot)
chip->fixes->remove_slot(slot, 0);
unmap:
iounmap(host->ioaddr);
release:
pci_release_region(pdev, bar);
free:
sdhci_free_host(host);
return ERR_PTR(ret);
}
static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
{
int dead;
u32 scratch;
dead = 0;
scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
if (scratch == (u32)-1)
dead = 1;
sdhci_remove_host(slot->host, dead);
if (slot->chip->fixes && slot->chip->fixes->remove_slot)
slot->chip->fixes->remove_slot(slot, dead);
pci_release_region(slot->chip->pdev, slot->pci_bar);
sdhci_free_host(slot->host);
}
static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
u8 slots, first_bar;
int ret, i;
BUG_ON(pdev == NULL);
BUG_ON(ent == NULL);
dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
(int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
if (ret)
return ret;
slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
if (slots == 0)
return -ENODEV;
BUG_ON(slots > MAX_SLOTS);
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
if (ret)
return ret;
first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
if (first_bar > 5) {
dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
return -ENODEV;
}
ret = pci_enable_device(pdev);
if (ret)
return ret;
chip = kzalloc(sizeof(struct sdhci_pci_chip), GFP_KERNEL);
if (!chip) {
ret = -ENOMEM;
goto err;
}
chip->pdev = pdev;
chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
if (chip->fixes)
chip->quirks = chip->fixes->quirks;
chip->num_slots = slots;
pci_set_drvdata(pdev, chip);
if (chip->fixes && chip->fixes->probe) {
ret = chip->fixes->probe(chip);
if (ret)
goto free;
}
slots = chip->num_slots; /* Quirk may have changed this */
for (i = 0; i < slots; i++) {
slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i);
if (IS_ERR(slot)) {
for (i--; i >= 0; i--)
sdhci_pci_remove_slot(chip->slots[i]);
ret = PTR_ERR(slot);
goto free;
}
chip->slots[i] = slot;
}
return 0;
free:
pci_set_drvdata(pdev, NULL);
kfree(chip);
err:
pci_disable_device(pdev);
return ret;
}
static void __devexit sdhci_pci_remove(struct pci_dev *pdev)
{
int i;
struct sdhci_pci_chip *chip;
chip = pci_get_drvdata(pdev);
if (chip) {
for (i = 0; i < chip->num_slots; i++)
sdhci_pci_remove_slot(chip->slots[i]);
pci_set_drvdata(pdev, NULL);
kfree(chip);
}
pci_disable_device(pdev);
}
static struct pci_driver sdhci_driver = {
.name = "sdhci-pci",
.id_table = pci_ids,
.probe = sdhci_pci_probe,
.remove = __devexit_p(sdhci_pci_remove),
.suspend = sdhci_pci_suspend,
.resume = sdhci_pci_resume,
};
/*****************************************************************************\
* *
* Driver init/exit *
* *
\*****************************************************************************/
static int __init sdhci_drv_init(void)
{
return pci_register_driver(&sdhci_driver);
}
static void __exit sdhci_drv_exit(void)
{
pci_unregister_driver(&sdhci_driver);
}
module_init(sdhci_drv_init);
module_exit(sdhci_drv_exit);
MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mdeejay/shooteru-ics-caf | fs/proc/task_mmu.c | 1398 | 27856 | #include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/huge_mm.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/highmem.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <asm/elf.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include "internal.h"
void task_mem(struct seq_file *m, struct mm_struct *mm)
{
unsigned long data, text, lib, swap;
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
/*
* Note: to minimize their overhead, mm maintains hiwater_vm and
* hiwater_rss only when about to *lower* total_vm or rss. Any
* collector of these hiwater stats must therefore get total_vm
* and rss too, which will usually be the higher. Barriers? not
* worth the effort, such snapshots can always be inconsistent.
*/
hiwater_vm = total_vm = mm->total_vm;
if (hiwater_vm < mm->hiwater_vm)
hiwater_vm = mm->hiwater_vm;
hiwater_rss = total_rss = get_mm_rss(mm);
if (hiwater_rss < mm->hiwater_rss)
hiwater_rss = mm->hiwater_rss;
data = mm->total_vm - mm->shared_vm - mm->stack_vm;
text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
swap = get_mm_counter(mm, MM_SWAPENTS);
seq_printf(m,
"VmPeak:\t%8lu kB\n"
"VmSize:\t%8lu kB\n"
"VmLck:\t%8lu kB\n"
"VmHWM:\t%8lu kB\n"
"VmRSS:\t%8lu kB\n"
"VmData:\t%8lu kB\n"
"VmStk:\t%8lu kB\n"
"VmExe:\t%8lu kB\n"
"VmLib:\t%8lu kB\n"
"VmPTE:\t%8lu kB\n"
"VmSwap:\t%8lu kB\n",
hiwater_vm << (PAGE_SHIFT-10),
(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
mm->locked_vm << (PAGE_SHIFT-10),
hiwater_rss << (PAGE_SHIFT-10),
total_rss << (PAGE_SHIFT-10),
data << (PAGE_SHIFT-10),
mm->stack_vm << (PAGE_SHIFT-10), text, lib,
(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
swap << (PAGE_SHIFT-10));
}
unsigned long task_vsize(struct mm_struct *mm)
{
return PAGE_SIZE * mm->total_vm;
}
unsigned long task_statm(struct mm_struct *mm,
unsigned long *shared, unsigned long *text,
unsigned long *data, unsigned long *resident)
{
*shared = get_mm_counter(mm, MM_FILEPAGES);
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT;
*data = mm->total_vm - mm->shared_vm;
*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
return mm->total_vm;
}
static void pad_len_spaces(struct seq_file *m, int len)
{
len = 25 + sizeof(void*) * 6 - len;
if (len < 1)
len = 1;
seq_printf(m, "%*c", len, ' ');
}
static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
{
if (vma && vma != priv->tail_vma) {
struct mm_struct *mm = vma->vm_mm;
up_read(&mm->mmap_sem);
mmput(mm);
}
}
static void *m_start(struct seq_file *m, loff_t *pos)
{
struct proc_maps_private *priv = m->private;
unsigned long last_addr = m->version;
struct mm_struct *mm;
struct vm_area_struct *vma, *tail_vma = NULL;
loff_t l = *pos;
/* Clear the per syscall fields in priv */
priv->task = NULL;
priv->tail_vma = NULL;
/*
* We remember last_addr rather than next_addr to hit with
* mmap_cache most of the time. We have zero last_addr at
* the beginning and also after lseek. We will have -1 last_addr
* after the end of the vmas.
*/
if (last_addr == -1UL)
return NULL;
priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
if (!priv->task)
return ERR_PTR(-ESRCH);
mm = mm_for_maps(priv->task);
if (!mm || IS_ERR(mm))
return mm;
down_read(&mm->mmap_sem);
tail_vma = get_gate_vma(priv->task->mm);
priv->tail_vma = tail_vma;
/* Start with last addr hint */
vma = find_vma(mm, last_addr);
if (last_addr && vma) {
vma = vma->vm_next;
goto out;
}
/*
* Check the vma index is within the range and do
* sequential scan until m_index.
*/
vma = NULL;
if ((unsigned long)l < mm->map_count) {
vma = mm->mmap;
while (l-- && vma)
vma = vma->vm_next;
goto out;
}
if (l != mm->map_count)
tail_vma = NULL; /* After gate vma */
out:
if (vma)
return vma;
/* End of vmas has been reached */
m->version = (tail_vma != NULL)? 0: -1UL;
up_read(&mm->mmap_sem);
mmput(mm);
return tail_vma;
}
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
struct proc_maps_private *priv = m->private;
struct vm_area_struct *vma = v;
struct vm_area_struct *tail_vma = priv->tail_vma;
(*pos)++;
if (vma && (vma != tail_vma) && vma->vm_next)
return vma->vm_next;
vma_stop(priv, vma);
return (vma != tail_vma)? tail_vma: NULL;
}
static void m_stop(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
struct vm_area_struct *vma = v;
if (!IS_ERR(vma))
vma_stop(priv, vma);
if (priv->task)
put_task_struct(priv->task);
}
static int do_maps_open(struct inode *inode, struct file *file,
const struct seq_operations *ops)
{
struct proc_maps_private *priv;
int ret = -ENOMEM;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv) {
priv->pid = proc_pid(inode);
ret = seq_open(file, ops);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = priv;
} else {
kfree(priv);
}
}
return ret;
}
static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
{
struct mm_struct *mm = vma->vm_mm;
struct file *file = vma->vm_file;
vm_flags_t flags = vma->vm_flags;
unsigned long ino = 0;
unsigned long long pgoff = 0;
unsigned long start, end;
dev_t dev = 0;
int len;
if (file) {
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
}
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
if (stack_guard_page_start(vma, start))
start += PAGE_SIZE;
end = vma->vm_end;
if (stack_guard_page_end(vma, end))
end -= PAGE_SIZE;
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
start,
end,
flags & VM_READ ? 'r' : '-',
flags & VM_WRITE ? 'w' : '-',
flags & VM_EXEC ? 'x' : '-',
flags & VM_MAYSHARE ? 's' : 'p',
pgoff,
MAJOR(dev), MINOR(dev), ino, &len);
/*
* Print the dentry name for named mappings, and a
* special [heap] marker for the heap:
*/
if (file) {
pad_len_spaces(m, len);
seq_path(m, &file->f_path, "\n");
} else {
const char *name = arch_vma_name(vma);
if (!name) {
if (mm) {
if (vma->vm_start <= mm->brk &&
vma->vm_end >= mm->start_brk) {
name = "[heap]";
} else if (vma->vm_start <= mm->start_stack &&
vma->vm_end >= mm->start_stack) {
name = "[stack]";
}
} else {
name = "[vdso]";
}
}
if (name) {
pad_len_spaces(m, len);
seq_puts(m, name);
}
}
seq_putc(m, '\n');
}
static int show_map(struct seq_file *m, void *v)
{
struct vm_area_struct *vma = v;
struct proc_maps_private *priv = m->private;
struct task_struct *task = priv->task;
show_map_vma(m, vma);
if (m->count < m->size) /* vma is copied successfully */
m->version = (vma != get_gate_vma(task->mm))
? vma->vm_start : 0;
return 0;
}
static const struct seq_operations proc_pid_maps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_map
};
static int maps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_pid_maps_op);
}
const struct file_operations proc_maps_operations = {
.open = maps_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
/*
* Proportional Set Size(PSS): my share of RSS.
*
* PSS of a process is the count of pages it has in memory, where each
* page is divided by the number of processes sharing it. So if a
* process has 1000 pages all to itself, and 1000 shared with one other
* process, its PSS will be 1500.
*
* To keep (accumulated) division errors low, we adopt a 64bit
* fixed-point pss counter to minimize division errors. So (pss >>
* PSS_SHIFT) would be the real byte count.
*
* A shift of 12 before division means (assuming 4K page size):
* - 1M 3-user-pages add up to 8KB errors;
* - supports mapcount up to 2^24, or 16M;
* - supports PSS up to 2^52 bytes, or 4PB.
*/
#define PSS_SHIFT 12
#ifdef CONFIG_PROC_PAGE_MONITOR
struct mem_size_stats {
struct vm_area_struct *vma;
unsigned long resident;
unsigned long shared_clean;
unsigned long shared_dirty;
unsigned long private_clean;
unsigned long private_dirty;
unsigned long referenced;
unsigned long anonymous;
unsigned long anonymous_thp;
unsigned long swap;
u64 pss;
};
static void smaps_pte_entry(pte_t ptent, unsigned long addr,
unsigned long ptent_size, struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = mss->vma;
struct page *page;
int mapcount;
if (is_swap_pte(ptent)) {
mss->swap += ptent_size;
return;
}
if (!pte_present(ptent))
return;
page = vm_normal_page(vma, addr, ptent);
if (!page)
return;
if (PageAnon(page))
mss->anonymous += ptent_size;
mss->resident += ptent_size;
/* Accumulate the size in pages that have been accessed. */
if (pte_young(ptent) || PageReferenced(page))
mss->referenced += ptent_size;
mapcount = page_mapcount(page);
if (mapcount >= 2) {
if (pte_dirty(ptent) || PageDirty(page))
mss->shared_dirty += ptent_size;
else
mss->shared_clean += ptent_size;
mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
} else {
if (pte_dirty(ptent) || PageDirty(page))
mss->private_dirty += ptent_size;
else
mss->private_clean += ptent_size;
mss->pss += (ptent_size << PSS_SHIFT);
}
}
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = mss->vma;
pte_t *pte;
spinlock_t *ptl;
spin_lock(&walk->mm->page_table_lock);
if (pmd_trans_huge(*pmd)) {
if (pmd_trans_splitting(*pmd)) {
spin_unlock(&walk->mm->page_table_lock);
wait_split_huge_page(vma->anon_vma, pmd);
} else {
smaps_pte_entry(*(pte_t *)pmd, addr,
HPAGE_PMD_SIZE, walk);
spin_unlock(&walk->mm->page_table_lock);
mss->anonymous_thp += HPAGE_PMD_SIZE;
return 0;
}
} else {
spin_unlock(&walk->mm->page_table_lock);
}
if (pmd_trans_unstable(pmd))
return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
* in here.
*/
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE)
smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
return 0;
}
static int show_smap(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
struct task_struct *task = priv->task;
struct vm_area_struct *vma = v;
struct mem_size_stats mss;
struct mm_walk smaps_walk = {
.pmd_entry = smaps_pte_range,
.mm = vma->vm_mm,
.private = &mss,
};
memset(&mss, 0, sizeof mss);
mss.vma = vma;
/* mmap_sem is held in m_start */
if (vma->vm_mm && !is_vm_hugetlb_page(vma))
walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
show_map_vma(m, vma);
seq_printf(m,
"Size: %8lu kB\n"
"Rss: %8lu kB\n"
"Pss: %8lu kB\n"
"Shared_Clean: %8lu kB\n"
"Shared_Dirty: %8lu kB\n"
"Private_Clean: %8lu kB\n"
"Private_Dirty: %8lu kB\n"
"Referenced: %8lu kB\n"
"Anonymous: %8lu kB\n"
"AnonHugePages: %8lu kB\n"
"Swap: %8lu kB\n"
"KernelPageSize: %8lu kB\n"
"MMUPageSize: %8lu kB\n"
"Locked: %8lu kB\n",
(vma->vm_end - vma->vm_start) >> 10,
mss.resident >> 10,
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
mss.shared_clean >> 10,
mss.shared_dirty >> 10,
mss.private_clean >> 10,
mss.private_dirty >> 10,
mss.referenced >> 10,
mss.anonymous >> 10,
mss.anonymous_thp >> 10,
mss.swap >> 10,
vma_kernel_pagesize(vma) >> 10,
vma_mmu_pagesize(vma) >> 10,
(vma->vm_flags & VM_LOCKED) ?
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
if (m->count < m->size) /* vma is copied successfully */
m->version = (vma != get_gate_vma(task->mm))
? vma->vm_start : 0;
return 0;
}
static const struct seq_operations proc_pid_smaps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_smap
};
static int smaps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_pid_smaps_op);
}
const struct file_operations proc_smaps_operations = {
.open = smaps_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->private;
pte_t *pte, ptent;
spinlock_t *ptl;
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
if (pmd_trans_unstable(pmd))
return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte;
if (!pte_present(ptent))
continue;
page = vm_normal_page(vma, addr, ptent);
if (!page)
continue;
if (PageReserved(page))
continue;
/* Clear accessed and referenced bits. */
ptep_test_and_clear_young(vma, addr, pte);
ClearPageReferenced(page);
}
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
return 0;
}
#define CLEAR_REFS_ALL 1
#define CLEAR_REFS_ANON 2
#define CLEAR_REFS_MAPPED 3
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task;
char buffer[PROC_NUMBUF];
struct mm_struct *mm;
struct vm_area_struct *vma;
int type;
int rv;
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count))
return -EFAULT;
rv = kstrtoint(strstrip(buffer), 10, &type);
if (rv < 0)
return rv;
if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
return -EINVAL;
task = get_proc_task(file->f_path.dentry->d_inode);
if (!task)
return -ESRCH;
mm = get_task_mm(task);
if (mm) {
struct mm_walk clear_refs_walk = {
.pmd_entry = clear_refs_pte_range,
.mm = mm,
};
down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
clear_refs_walk.private = vma;
if (is_vm_hugetlb_page(vma))
continue;
/*
* Writing 1 to /proc/pid/clear_refs affects all pages.
*
* Writing 2 to /proc/pid/clear_refs only affects
* Anonymous pages.
*
* Writing 3 to /proc/pid/clear_refs only affects file
* mapped pages.
*/
if (type == CLEAR_REFS_ANON && vma->vm_file)
continue;
if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
continue;
walk_page_range(vma->vm_start, vma->vm_end,
&clear_refs_walk);
}
flush_tlb_mm(mm);
up_read(&mm->mmap_sem);
mmput(mm);
}
put_task_struct(task);
return count;
}
const struct file_operations proc_clear_refs_operations = {
.write = clear_refs_write,
.llseek = noop_llseek,
};
struct pagemapread {
int pos, len;
u64 *buffer;
};
#define PM_ENTRY_BYTES sizeof(u64)
#define PM_STATUS_BITS 3
#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
#define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
#define PM_PSHIFT_BITS 6
#define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
#define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
#define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
#define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
#define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
#define PM_PRESENT PM_STATUS(4LL)
#define PM_SWAP PM_STATUS(2LL)
#define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
#define PM_END_OF_BUFFER 1
static int add_to_pagemap(unsigned long addr, u64 pfn,
struct pagemapread *pm)
{
pm->buffer[pm->pos++] = pfn;
if (pm->pos >= pm->len)
return PM_END_OF_BUFFER;
return 0;
}
static int pagemap_pte_hole(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
unsigned long addr;
int err = 0;
for (addr = start; addr < end; addr += PAGE_SIZE) {
err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
if (err)
break;
}
return err;
}
static u64 swap_pte_to_pagemap_entry(pte_t pte)
{
swp_entry_t e = pte_to_swp_entry(pte);
return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
}
static u64 pte_to_pagemap_entry(pte_t pte)
{
u64 pme = 0;
if (is_swap_pte(pte))
pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
else if (pte_present(pte))
pme = PM_PFRAME(pte_pfn(pte))
| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
return pme;
}
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma;
struct pagemapread *pm = walk->private;
pte_t *pte;
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
if (pmd_trans_unstable(pmd))
return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
for (; addr != end; addr += PAGE_SIZE) {
u64 pfn = PM_NOT_PRESENT;
/* check to see if we've left 'vma' behind
* and need a new, higher one */
if (vma && (addr >= vma->vm_end))
vma = find_vma(walk->mm, addr);
/* check that 'vma' actually covers this address,
* and that it isn't a huge page vma */
if (vma && (vma->vm_start <= addr) &&
!is_vm_hugetlb_page(vma)) {
pte = pte_offset_map(pmd, addr);
pfn = pte_to_pagemap_entry(*pte);
/* unmap before userspace copy */
pte_unmap(pte);
}
err = add_to_pagemap(addr, pfn, pm);
if (err)
return err;
}
cond_resched();
return err;
}
#ifdef CONFIG_HUGETLB_PAGE
static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
{
u64 pme = 0;
if (pte_present(pte))
pme = PM_PFRAME(pte_pfn(pte) + offset)
| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
return pme;
}
/* This function walks within one hugetlb entry in the single call */
static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
int err = 0;
u64 pfn;
for (; addr != end; addr += PAGE_SIZE) {
int offset = (addr & ~hmask) >> PAGE_SHIFT;
pfn = huge_pte_to_pagemap_entry(*pte, offset);
err = add_to_pagemap(addr, pfn, pm);
if (err)
return err;
}
cond_resched();
return err;
}
#endif /* HUGETLB_PAGE */
/*
* /proc/pid/pagemap - an array mapping virtual pages to pfns
*
* For each page in the address space, this file contains one 64-bit entry
* consisting of the following:
*
* Bits 0-55 page frame number (PFN) if present
* Bits 0-4 swap type if swapped
* Bits 5-55 swap offset if swapped
* Bits 55-60 page shift (page size = 1<<page shift)
* Bit 61 reserved for future use
* Bit 62 page swapped
* Bit 63 page present
*
* If the page is not present but in swap, then the PFN contains an
* encoding of the swap file number and the page's offset into the
* swap. Unmapped pages return a null PFN. This allows determining
* precisely which pages are mapped (or in swap) and comparing mapped
* pages between processes.
*
* Efficient users of this interface will use /proc/pid/maps to
* determine which areas of memory are actually mapped and llseek to
* skip over unmapped regions.
*/
#define PAGEMAP_WALK_SIZE (PMD_SIZE)
#define PAGEMAP_WALK_MASK (PMD_MASK)
static ssize_t pagemap_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
struct mm_struct *mm;
struct pagemapread pm;
int ret = -ESRCH;
struct mm_walk pagemap_walk = {};
unsigned long src;
unsigned long svpfn;
unsigned long start_vaddr;
unsigned long end_vaddr;
int copied = 0;
if (!task)
goto out;
ret = -EINVAL;
/* file position must be aligned */
if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
goto out_task;
ret = 0;
if (!count)
goto out_task;
pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
ret = -ENOMEM;
if (!pm.buffer)
goto out_task;
mm = mm_for_maps(task);
ret = PTR_ERR(mm);
if (!mm || IS_ERR(mm))
goto out_free;
pagemap_walk.pmd_entry = pagemap_pte_range;
pagemap_walk.pte_hole = pagemap_pte_hole;
#ifdef CONFIG_HUGETLB_PAGE
pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
#endif
pagemap_walk.mm = mm;
pagemap_walk.private = ±
src = *ppos;
svpfn = src / PM_ENTRY_BYTES;
start_vaddr = svpfn << PAGE_SHIFT;
end_vaddr = TASK_SIZE_OF(task);
/* watch out for wraparound */
if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
start_vaddr = end_vaddr;
/*
* The odds are that this will stop walking way
* before end_vaddr, because the length of the
* user buffer is tracked in "pm", and the walk
* will stop when we hit the end of the buffer.
*/
ret = 0;
while (count && (start_vaddr < end_vaddr)) {
int len;
unsigned long end;
pm.pos = 0;
end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
/* overflow ? */
if (end < start_vaddr || end > end_vaddr)
end = end_vaddr;
down_read(&mm->mmap_sem);
ret = walk_page_range(start_vaddr, end, &pagemap_walk);
up_read(&mm->mmap_sem);
start_vaddr = end;
len = min(count, PM_ENTRY_BYTES * pm.pos);
if (copy_to_user(buf, pm.buffer, len)) {
ret = -EFAULT;
goto out_mm;
}
copied += len;
buf += len;
count -= len;
}
*ppos += copied;
if (!ret || ret == PM_END_OF_BUFFER)
ret = copied;
out_mm:
mmput(mm);
out_free:
kfree(pm.buffer);
out_task:
put_task_struct(task);
out:
return ret;
}
const struct file_operations proc_pagemap_operations = {
.llseek = mem_lseek, /* borrow this */
.read = pagemap_read,
};
#endif /* CONFIG_PROC_PAGE_MONITOR */
#ifdef CONFIG_NUMA
struct numa_maps {
struct vm_area_struct *vma;
unsigned long pages;
unsigned long anon;
unsigned long active;
unsigned long writeback;
unsigned long mapcount_max;
unsigned long dirty;
unsigned long swapcache;
unsigned long node[MAX_NUMNODES];
};
struct numa_maps_private {
struct proc_maps_private proc_maps;
struct numa_maps md;
};
static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
unsigned long nr_pages)
{
int count = page_mapcount(page);
md->pages += nr_pages;
if (pte_dirty || PageDirty(page))
md->dirty += nr_pages;
if (PageSwapCache(page))
md->swapcache += nr_pages;
if (PageActive(page) || PageUnevictable(page))
md->active += nr_pages;
if (PageWriteback(page))
md->writeback += nr_pages;
if (PageAnon(page))
md->anon += nr_pages;
if (count > md->mapcount_max)
md->mapcount_max = count;
md->node[page_to_nid(page)] += nr_pages;
}
static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
unsigned long addr)
{
struct page *page;
int nid;
if (!pte_present(pte))
return NULL;
page = vm_normal_page(vma, addr, pte);
if (!page)
return NULL;
if (PageReserved(page))
return NULL;
nid = page_to_nid(page);
if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
return NULL;
return page;
}
static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct numa_maps *md;
spinlock_t *ptl;
pte_t *orig_pte;
pte_t *pte;
md = walk->private;
spin_lock(&walk->mm->page_table_lock);
if (pmd_trans_huge(*pmd)) {
if (pmd_trans_splitting(*pmd)) {
spin_unlock(&walk->mm->page_table_lock);
wait_split_huge_page(md->vma->anon_vma, pmd);
} else {
pte_t huge_pte = *(pte_t *)pmd;
struct page *page;
page = can_gather_numa_stats(huge_pte, md->vma, addr);
if (page)
gather_stats(page, md, pte_dirty(huge_pte),
HPAGE_PMD_SIZE/PAGE_SIZE);
spin_unlock(&walk->mm->page_table_lock);
return 0;
}
} else {
spin_unlock(&walk->mm->page_table_lock);
}
if (pmd_trans_unstable(pmd))
return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
if (!page)
continue;
gather_stats(page, md, pte_dirty(*pte), 1);
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap_unlock(orig_pte, ptl);
return 0;
}
#ifdef CONFIG_HUGETLB_PAGE
static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
struct numa_maps *md;
struct page *page;
if (pte_none(*pte))
return 0;
page = pte_page(*pte);
if (!page)
return 0;
md = walk->private;
gather_stats(page, md, pte_dirty(*pte), 1);
return 0;
}
#else
static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
return 0;
}
#endif
/*
* Display pages allocated per node and memory policy via /proc.
*/
static int show_numa_map(struct seq_file *m, void *v)
{
struct numa_maps_private *numa_priv = m->private;
struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
struct vm_area_struct *vma = v;
struct numa_maps *md = &numa_priv->md;
struct file *file = vma->vm_file;
struct mm_struct *mm = vma->vm_mm;
struct mm_walk walk = {};
struct mempolicy *pol;
int n;
char buffer[50];
if (!mm)
return 0;
/* Ensure we start with an empty set of numa_maps statistics. */
memset(md, 0, sizeof(*md));
md->vma = vma;
walk.hugetlb_entry = gather_hugetbl_stats;
walk.pmd_entry = gather_pte_stats;
walk.private = md;
walk.mm = mm;
pol = get_vma_policy(proc_priv->task, vma, vma->vm_start);
mpol_to_str(buffer, sizeof(buffer), pol, 0);
mpol_cond_put(pol);
seq_printf(m, "%08lx %s", vma->vm_start, buffer);
if (file) {
seq_printf(m, " file=");
seq_path(m, &file->f_path, "\n\t= ");
} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
seq_printf(m, " heap");
} else if (vma->vm_start <= mm->start_stack &&
vma->vm_end >= mm->start_stack) {
seq_printf(m, " stack");
}
if (is_vm_hugetlb_page(vma))
seq_printf(m, " huge");
walk_page_range(vma->vm_start, vma->vm_end, &walk);
if (!md->pages)
goto out;
if (md->anon)
seq_printf(m, " anon=%lu", md->anon);
if (md->dirty)
seq_printf(m, " dirty=%lu", md->dirty);
if (md->pages != md->anon && md->pages != md->dirty)
seq_printf(m, " mapped=%lu", md->pages);
if (md->mapcount_max > 1)
seq_printf(m, " mapmax=%lu", md->mapcount_max);
if (md->swapcache)
seq_printf(m, " swapcache=%lu", md->swapcache);
if (md->active < md->pages && !is_vm_hugetlb_page(vma))
seq_printf(m, " active=%lu", md->active);
if (md->writeback)
seq_printf(m, " writeback=%lu", md->writeback);
for_each_node_state(n, N_HIGH_MEMORY)
if (md->node[n])
seq_printf(m, " N%d=%lu", n, md->node[n]);
out:
seq_putc(m, '\n');
if (m->count < m->size)
m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
return 0;
}
static const struct seq_operations proc_pid_numa_maps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_numa_map,
};
static int numa_maps_open(struct inode *inode, struct file *file)
{
struct numa_maps_private *priv;
int ret = -ENOMEM;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv) {
priv->proc_maps.pid = proc_pid(inode);
ret = seq_open(file, &proc_pid_numa_maps_op);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = priv;
} else {
kfree(priv);
}
}
return ret;
}
const struct file_operations proc_numa_maps_operations = {
.open = numa_maps_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
#endif /* CONFIG_NUMA */
| gpl-2.0 |
kannu1994/maguro_kernel | sound/pci/lx6464es/lx6464es.c | 2678 | 28203 | /* -*- linux-c -*- *
*
* ALSA driver for the digigram lx6464es interface
*
* Copyright (c) 2008, 2009 Tim Blechmann <tim@klingt.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <sound/initval.h>
#include <sound/control.h>
#include <sound/info.h>
#include "lx6464es.h"
MODULE_AUTHOR("Tim Blechmann");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("digigram lx6464es");
MODULE_SUPPORTED_DEVICE("{digigram lx6464es{}}");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for Digigram LX6464ES interface.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for Digigram LX6464ES interface.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable/disable specific Digigram LX6464ES soundcards.");
static const char card_name[] = "LX6464ES";
#define PCI_DEVICE_ID_PLX_LX6464ES PCI_DEVICE_ID_PLX_9056
static DEFINE_PCI_DEVICE_TABLE(snd_lx6464es_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_LX6464ES),
.subvendor = PCI_VENDOR_ID_DIGIGRAM,
.subdevice = PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM
}, /* LX6464ES */
{ PCI_DEVICE(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_LX6464ES),
.subvendor = PCI_VENDOR_ID_DIGIGRAM,
.subdevice = PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM
}, /* LX6464ES-CAE */
{ 0, },
};
MODULE_DEVICE_TABLE(pci, snd_lx6464es_ids);
/* PGO pour USERo dans le registre pci_0x06/loc_0xEC */
#define CHIPSC_RESET_XILINX (1L<<16)
/* alsa callbacks */
static struct snd_pcm_hardware lx_caps = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_SYNC_START),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S16_BE |
SNDRV_PCM_FMTBIT_S24_3LE |
SNDRV_PCM_FMTBIT_S24_3BE),
.rates = (SNDRV_PCM_RATE_CONTINUOUS |
SNDRV_PCM_RATE_8000_192000),
.rate_min = 8000,
.rate_max = 192000,
.channels_min = 2,
.channels_max = 64,
.buffer_bytes_max = 64*2*3*MICROBLAZE_IBL_MAX*MAX_STREAM_BUFFER,
.period_bytes_min = (2*2*MICROBLAZE_IBL_MIN*2),
.period_bytes_max = (4*64*MICROBLAZE_IBL_MAX*MAX_STREAM_BUFFER),
.periods_min = 2,
.periods_max = MAX_STREAM_BUFFER,
};
static int lx_set_granularity(struct lx6464es *chip, u32 gran);
static int lx_hardware_open(struct lx6464es *chip,
struct snd_pcm_substream *substream)
{
int err = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
int channels = runtime->channels;
int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
snd_pcm_uframes_t period_size = runtime->period_size;
snd_printd(LXP "allocating pipe for %d channels\n", channels);
err = lx_pipe_allocate(chip, 0, is_capture, channels);
if (err < 0) {
snd_printk(KERN_ERR LXP "allocating pipe failed\n");
return err;
}
err = lx_set_granularity(chip, period_size);
if (err < 0) {
snd_printk(KERN_ERR LXP "setting granularity to %ld failed\n",
period_size);
return err;
}
return 0;
}
static int lx_hardware_start(struct lx6464es *chip,
struct snd_pcm_substream *substream)
{
int err = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
snd_printd(LXP "setting stream format\n");
err = lx_stream_set_format(chip, runtime, 0, is_capture);
if (err < 0) {
snd_printk(KERN_ERR LXP "setting stream format failed\n");
return err;
}
snd_printd(LXP "starting pipe\n");
err = lx_pipe_start(chip, 0, is_capture);
if (err < 0) {
snd_printk(KERN_ERR LXP "starting pipe failed\n");
return err;
}
snd_printd(LXP "waiting for pipe to start\n");
err = lx_pipe_wait_for_start(chip, 0, is_capture);
if (err < 0) {
snd_printk(KERN_ERR LXP "waiting for pipe failed\n");
return err;
}
return err;
}
static int lx_hardware_stop(struct lx6464es *chip,
struct snd_pcm_substream *substream)
{
int err = 0;
int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
snd_printd(LXP "pausing pipe\n");
err = lx_pipe_pause(chip, 0, is_capture);
if (err < 0) {
snd_printk(KERN_ERR LXP "pausing pipe failed\n");
return err;
}
snd_printd(LXP "waiting for pipe to become idle\n");
err = lx_pipe_wait_for_idle(chip, 0, is_capture);
if (err < 0) {
snd_printk(KERN_ERR LXP "waiting for pipe failed\n");
return err;
}
snd_printd(LXP "stopping pipe\n");
err = lx_pipe_stop(chip, 0, is_capture);
if (err < 0) {
snd_printk(LXP "stopping pipe failed\n");
return err;
}
return err;
}
static int lx_hardware_close(struct lx6464es *chip,
struct snd_pcm_substream *substream)
{
int err = 0;
int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
snd_printd(LXP "releasing pipe\n");
err = lx_pipe_release(chip, 0, is_capture);
if (err < 0) {
snd_printk(LXP "releasing pipe failed\n");
return err;
}
return err;
}
static int lx_pcm_open(struct snd_pcm_substream *substream)
{
struct lx6464es *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err = 0;
int board_rate;
snd_printdd("->lx_pcm_open\n");
mutex_lock(&chip->setup_mutex);
/* copy the struct snd_pcm_hardware struct */
runtime->hw = lx_caps;
#if 0
/* buffer-size should better be multiple of period-size */
err = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0) {
snd_printk(KERN_WARNING LXP "could not constrain periods\n");
goto exit;
}
#endif
/* the clock rate cannot be changed */
board_rate = chip->board_sample_rate;
err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
board_rate, board_rate);
if (err < 0) {
snd_printk(KERN_WARNING LXP "could not constrain periods\n");
goto exit;
}
/* constrain period size */
err = snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
MICROBLAZE_IBL_MIN,
MICROBLAZE_IBL_MAX);
if (err < 0) {
snd_printk(KERN_WARNING LXP
"could not constrain period size\n");
goto exit;
}
snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 32);
snd_pcm_set_sync(substream);
err = 0;
exit:
runtime->private_data = chip;
mutex_unlock(&chip->setup_mutex);
snd_printdd("<-lx_pcm_open, %d\n", err);
return err;
}
static int lx_pcm_close(struct snd_pcm_substream *substream)
{
int err = 0;
snd_printdd("->lx_pcm_close\n");
return err;
}
static snd_pcm_uframes_t lx_pcm_stream_pointer(struct snd_pcm_substream
*substream)
{
struct lx6464es *chip = snd_pcm_substream_chip(substream);
snd_pcm_uframes_t pos;
unsigned long flags;
int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
struct lx_stream *lx_stream = is_capture ? &chip->capture_stream :
&chip->playback_stream;
snd_printdd("->lx_pcm_stream_pointer\n");
spin_lock_irqsave(&chip->lock, flags);
pos = lx_stream->frame_pos * substream->runtime->period_size;
spin_unlock_irqrestore(&chip->lock, flags);
snd_printdd(LXP "stream_pointer at %ld\n", pos);
return pos;
}
static int lx_pcm_prepare(struct snd_pcm_substream *substream)
{
struct lx6464es *chip = snd_pcm_substream_chip(substream);
int err = 0;
const int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
snd_printdd("->lx_pcm_prepare\n");
mutex_lock(&chip->setup_mutex);
if (chip->hardware_running[is_capture]) {
err = lx_hardware_stop(chip, substream);
if (err < 0) {
snd_printk(KERN_ERR LXP "failed to stop hardware. "
"Error code %d\n", err);
goto exit;
}
err = lx_hardware_close(chip, substream);
if (err < 0) {
snd_printk(KERN_ERR LXP "failed to close hardware. "
"Error code %d\n", err);
goto exit;
}
}
snd_printd(LXP "opening hardware\n");
err = lx_hardware_open(chip, substream);
if (err < 0) {
snd_printk(KERN_ERR LXP "failed to open hardware. "
"Error code %d\n", err);
goto exit;
}
err = lx_hardware_start(chip, substream);
if (err < 0) {
snd_printk(KERN_ERR LXP "failed to start hardware. "
"Error code %d\n", err);
goto exit;
}
chip->hardware_running[is_capture] = 1;
if (chip->board_sample_rate != substream->runtime->rate) {
if (!err)
chip->board_sample_rate = substream->runtime->rate;
}
exit:
mutex_unlock(&chip->setup_mutex);
return err;
}
static int lx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params, int is_capture)
{
struct lx6464es *chip = snd_pcm_substream_chip(substream);
int err = 0;
snd_printdd("->lx_pcm_hw_params\n");
mutex_lock(&chip->setup_mutex);
/* set dma buffer */
err = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (is_capture)
chip->capture_stream.stream = substream;
else
chip->playback_stream.stream = substream;
mutex_unlock(&chip->setup_mutex);
return err;
}
static int lx_pcm_hw_params_playback(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
return lx_pcm_hw_params(substream, hw_params, 0);
}
static int lx_pcm_hw_params_capture(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
return lx_pcm_hw_params(substream, hw_params, 1);
}
static int lx_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct lx6464es *chip = snd_pcm_substream_chip(substream);
int err = 0;
int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
snd_printdd("->lx_pcm_hw_free\n");
mutex_lock(&chip->setup_mutex);
if (chip->hardware_running[is_capture]) {
err = lx_hardware_stop(chip, substream);
if (err < 0) {
snd_printk(KERN_ERR LXP "failed to stop hardware. "
"Error code %d\n", err);
goto exit;
}
err = lx_hardware_close(chip, substream);
if (err < 0) {
snd_printk(KERN_ERR LXP "failed to close hardware. "
"Error code %d\n", err);
goto exit;
}
chip->hardware_running[is_capture] = 0;
}
err = snd_pcm_lib_free_pages(substream);
if (is_capture)
chip->capture_stream.stream = 0;
else
chip->playback_stream.stream = 0;
exit:
mutex_unlock(&chip->setup_mutex);
return err;
}
static void lx_trigger_start(struct lx6464es *chip, struct lx_stream *lx_stream)
{
struct snd_pcm_substream *substream = lx_stream->stream;
const unsigned int is_capture = lx_stream->is_capture;
int err;
const u32 channels = substream->runtime->channels;
const u32 bytes_per_frame = channels * 3;
const u32 period_size = substream->runtime->period_size;
const u32 periods = substream->runtime->periods;
const u32 period_bytes = period_size * bytes_per_frame;
dma_addr_t buf = substream->dma_buffer.addr;
int i;
u32 needed, freed;
u32 size_array[5];
for (i = 0; i != periods; ++i) {
u32 buffer_index = 0;
err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed,
size_array);
snd_printdd(LXP "starting: needed %d, freed %d\n",
needed, freed);
err = lx_buffer_give(chip, 0, is_capture, period_bytes,
lower_32_bits(buf), upper_32_bits(buf),
&buffer_index);
snd_printdd(LXP "starting: buffer index %x on %p (%d bytes)\n",
buffer_index, (void *)buf, period_bytes);
buf += period_bytes;
}
err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
snd_printdd(LXP "starting: needed %d, freed %d\n", needed, freed);
snd_printd(LXP "starting: starting stream\n");
err = lx_stream_start(chip, 0, is_capture);
if (err < 0)
snd_printk(KERN_ERR LXP "couldn't start stream\n");
else
lx_stream->status = LX_STREAM_STATUS_RUNNING;
lx_stream->frame_pos = 0;
}
static void lx_trigger_stop(struct lx6464es *chip, struct lx_stream *lx_stream)
{
const unsigned int is_capture = lx_stream->is_capture;
int err;
snd_printd(LXP "stopping: stopping stream\n");
err = lx_stream_stop(chip, 0, is_capture);
if (err < 0)
snd_printk(KERN_ERR LXP "couldn't stop stream\n");
else
lx_stream->status = LX_STREAM_STATUS_FREE;
}
static void lx_trigger_tasklet_dispatch_stream(struct lx6464es *chip,
struct lx_stream *lx_stream)
{
switch (lx_stream->status) {
case LX_STREAM_STATUS_SCHEDULE_RUN:
lx_trigger_start(chip, lx_stream);
break;
case LX_STREAM_STATUS_SCHEDULE_STOP:
lx_trigger_stop(chip, lx_stream);
break;
default:
break;
}
}
static void lx_trigger_tasklet(unsigned long data)
{
struct lx6464es *chip = (struct lx6464es *)data;
unsigned long flags;
snd_printdd("->lx_trigger_tasklet\n");
spin_lock_irqsave(&chip->lock, flags);
lx_trigger_tasklet_dispatch_stream(chip, &chip->capture_stream);
lx_trigger_tasklet_dispatch_stream(chip, &chip->playback_stream);
spin_unlock_irqrestore(&chip->lock, flags);
}
static int lx_pcm_trigger_dispatch(struct lx6464es *chip,
struct lx_stream *lx_stream, int cmd)
{
int err = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
lx_stream->status = LX_STREAM_STATUS_SCHEDULE_RUN;
break;
case SNDRV_PCM_TRIGGER_STOP:
lx_stream->status = LX_STREAM_STATUS_SCHEDULE_STOP;
break;
default:
err = -EINVAL;
goto exit;
}
tasklet_schedule(&chip->trigger_tasklet);
exit:
return err;
}
static int lx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct lx6464es *chip = snd_pcm_substream_chip(substream);
const int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
struct lx_stream *stream = is_capture ? &chip->capture_stream :
&chip->playback_stream;
snd_printdd("->lx_pcm_trigger\n");
return lx_pcm_trigger_dispatch(chip, stream, cmd);
}
static int snd_lx6464es_free(struct lx6464es *chip)
{
snd_printdd("->snd_lx6464es_free\n");
lx_irq_disable(chip);
if (chip->irq >= 0)
free_irq(chip->irq, chip);
iounmap(chip->port_dsp_bar);
ioport_unmap(chip->port_plx_remapped);
pci_release_regions(chip->pci);
pci_disable_device(chip->pci);
kfree(chip);
return 0;
}
static int snd_lx6464es_dev_free(struct snd_device *device)
{
return snd_lx6464es_free(device->device_data);
}
/* reset the dsp during initialization */
static int __devinit lx_init_xilinx_reset(struct lx6464es *chip)
{
int i;
u32 plx_reg = lx_plx_reg_read(chip, ePLX_CHIPSC);
snd_printdd("->lx_init_xilinx_reset\n");
/* activate reset of xilinx */
plx_reg &= ~CHIPSC_RESET_XILINX;
lx_plx_reg_write(chip, ePLX_CHIPSC, plx_reg);
msleep(1);
lx_plx_reg_write(chip, ePLX_MBOX3, 0);
msleep(1);
plx_reg |= CHIPSC_RESET_XILINX;
lx_plx_reg_write(chip, ePLX_CHIPSC, plx_reg);
/* deactivate reset of xilinx */
for (i = 0; i != 100; ++i) {
u32 reg_mbox3;
msleep(10);
reg_mbox3 = lx_plx_reg_read(chip, ePLX_MBOX3);
if (reg_mbox3) {
snd_printd(LXP "xilinx reset done\n");
snd_printdd(LXP "xilinx took %d loops\n", i);
break;
}
}
/* todo: add some error handling? */
/* clear mr */
lx_dsp_reg_write(chip, eReg_CSM, 0);
/* le xilinx ES peut ne pas etre encore pret, on attend. */
msleep(600);
return 0;
}
static int __devinit lx_init_xilinx_test(struct lx6464es *chip)
{
u32 reg;
snd_printdd("->lx_init_xilinx_test\n");
/* TEST if we have access to Xilinx/MicroBlaze */
lx_dsp_reg_write(chip, eReg_CSM, 0);
reg = lx_dsp_reg_read(chip, eReg_CSM);
if (reg) {
snd_printk(KERN_ERR LXP "Problem: Reg_CSM %x.\n", reg);
/* PCI9056_SPACE0_REMAP */
lx_plx_reg_write(chip, ePLX_PCICR, 1);
reg = lx_dsp_reg_read(chip, eReg_CSM);
if (reg) {
snd_printk(KERN_ERR LXP "Error: Reg_CSM %x.\n", reg);
return -EAGAIN; /* seems to be appropriate */
}
}
snd_printd(LXP "Xilinx/MicroBlaze access test successful\n");
return 0;
}
/* initialize ethersound */
static int __devinit lx_init_ethersound_config(struct lx6464es *chip)
{
int i;
u32 orig_conf_es = lx_dsp_reg_read(chip, eReg_CONFES);
/* configure 64 io channels */
u32 conf_es = (orig_conf_es & CONFES_READ_PART_MASK) |
(64 << IOCR_INPUTS_OFFSET) |
(64 << IOCR_OUTPUTS_OFFSET) |
(FREQ_RATIO_SINGLE_MODE << FREQ_RATIO_OFFSET);
snd_printdd("->lx_init_ethersound\n");
chip->freq_ratio = FREQ_RATIO_SINGLE_MODE;
/*
* write it to the card !
* this actually kicks the ES xilinx, the first time since poweron.
* the MAC address in the Reg_ADMACESMSB Reg_ADMACESLSB registers
* is not ready before this is done, and the bit 2 in Reg_CSES is set.
* */
lx_dsp_reg_write(chip, eReg_CONFES, conf_es);
for (i = 0; i != 1000; ++i) {
if (lx_dsp_reg_read(chip, eReg_CSES) & 4) {
snd_printd(LXP "ethersound initialized after %dms\n",
i);
goto ethersound_initialized;
}
msleep(1);
}
snd_printk(KERN_WARNING LXP
"ethersound could not be initialized after %dms\n", i);
return -ETIMEDOUT;
ethersound_initialized:
snd_printd(LXP "ethersound initialized\n");
return 0;
}
static int __devinit lx_init_get_version_features(struct lx6464es *chip)
{
u32 dsp_version;
int err;
snd_printdd("->lx_init_get_version_features\n");
err = lx_dsp_get_version(chip, &dsp_version);
if (err == 0) {
u32 freq;
snd_printk(LXP "DSP version: V%02d.%02d #%d\n",
(dsp_version>>16) & 0xff, (dsp_version>>8) & 0xff,
dsp_version & 0xff);
/* later: what firmware version do we expect? */
/* retrieve Play/Rec features */
/* done here because we may have to handle alternate
* DSP files. */
/* later */
/* init the EtherSound sample rate */
err = lx_dsp_get_clock_frequency(chip, &freq);
if (err == 0)
chip->board_sample_rate = freq;
snd_printd(LXP "actual clock frequency %d\n", freq);
} else {
snd_printk(KERN_ERR LXP "DSP corrupted \n");
err = -EAGAIN;
}
return err;
}
static int lx_set_granularity(struct lx6464es *chip, u32 gran)
{
int err = 0;
u32 snapped_gran = MICROBLAZE_IBL_MIN;
snd_printdd("->lx_set_granularity\n");
/* blocksize is a power of 2 */
while ((snapped_gran < gran) &&
(snapped_gran < MICROBLAZE_IBL_MAX)) {
snapped_gran *= 2;
}
if (snapped_gran == chip->pcm_granularity)
return 0;
err = lx_dsp_set_granularity(chip, snapped_gran);
if (err < 0) {
snd_printk(KERN_WARNING LXP "could not set granularity\n");
err = -EAGAIN;
}
if (snapped_gran != gran)
snd_printk(LXP "snapped blocksize to %d\n", snapped_gran);
snd_printd(LXP "set blocksize on board %d\n", snapped_gran);
chip->pcm_granularity = snapped_gran;
return err;
}
/* initialize and test the xilinx dsp chip */
static int __devinit lx_init_dsp(struct lx6464es *chip)
{
int err;
u8 mac_address[6];
int i;
snd_printdd("->lx_init_dsp\n");
snd_printd(LXP "initialize board\n");
err = lx_init_xilinx_reset(chip);
if (err)
return err;
snd_printd(LXP "testing board\n");
err = lx_init_xilinx_test(chip);
if (err)
return err;
snd_printd(LXP "initialize ethersound configuration\n");
err = lx_init_ethersound_config(chip);
if (err)
return err;
lx_irq_enable(chip);
/** \todo the mac address should be ready by not, but it isn't,
* so we wait for it */
for (i = 0; i != 1000; ++i) {
err = lx_dsp_get_mac(chip, mac_address);
if (err)
return err;
if (mac_address[0] || mac_address[1] || mac_address[2] ||
mac_address[3] || mac_address[4] || mac_address[5])
goto mac_ready;
msleep(1);
}
return -ETIMEDOUT;
mac_ready:
snd_printd(LXP "mac address ready read after: %dms\n", i);
snd_printk(LXP "mac address: %02X.%02X.%02X.%02X.%02X.%02X\n",
mac_address[0], mac_address[1], mac_address[2],
mac_address[3], mac_address[4], mac_address[5]);
err = lx_init_get_version_features(chip);
if (err)
return err;
lx_set_granularity(chip, MICROBLAZE_IBL_DEFAULT);
chip->playback_mute = 0;
return err;
}
static struct snd_pcm_ops lx_ops_playback = {
.open = lx_pcm_open,
.close = lx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.prepare = lx_pcm_prepare,
.hw_params = lx_pcm_hw_params_playback,
.hw_free = lx_pcm_hw_free,
.trigger = lx_pcm_trigger,
.pointer = lx_pcm_stream_pointer,
};
static struct snd_pcm_ops lx_ops_capture = {
.open = lx_pcm_open,
.close = lx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.prepare = lx_pcm_prepare,
.hw_params = lx_pcm_hw_params_capture,
.hw_free = lx_pcm_hw_free,
.trigger = lx_pcm_trigger,
.pointer = lx_pcm_stream_pointer,
};
static int __devinit lx_pcm_create(struct lx6464es *chip)
{
int err;
struct snd_pcm *pcm;
u32 size = 64 * /* channels */
3 * /* 24 bit samples */
MAX_STREAM_BUFFER * /* periods */
MICROBLAZE_IBL_MAX * /* frames per period */
2; /* duplex */
size = PAGE_ALIGN(size);
/* hardcoded device name & channel count */
err = snd_pcm_new(chip->card, (char *)card_name, 0,
1, 1, &pcm);
pcm->private_data = chip;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &lx_ops_playback);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &lx_ops_capture);
pcm->info_flags = 0;
strcpy(pcm->name, card_name);
err = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci),
size, size);
if (err < 0)
return err;
chip->pcm = pcm;
chip->capture_stream.is_capture = 1;
return 0;
}
static int lx_control_playback_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
static int lx_control_playback_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct lx6464es *chip = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = chip->playback_mute;
return 0;
}
static int lx_control_playback_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct lx6464es *chip = snd_kcontrol_chip(kcontrol);
int changed = 0;
int current_value = chip->playback_mute;
if (current_value != ucontrol->value.integer.value[0]) {
lx_level_unmute(chip, 0, !current_value);
chip->playback_mute = !current_value;
changed = 1;
}
return changed;
}
static struct snd_kcontrol_new lx_control_playback_switch __devinitdata = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Switch",
.index = 0,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.private_value = 0,
.info = lx_control_playback_info,
.get = lx_control_playback_get,
.put = lx_control_playback_put
};
static void lx_proc_levels_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
u32 levels[64];
int err;
int i, j;
struct lx6464es *chip = entry->private_data;
snd_iprintf(buffer, "capture levels:\n");
err = lx_level_peaks(chip, 1, 64, levels);
if (err < 0)
return;
for (i = 0; i != 8; ++i) {
for (j = 0; j != 8; ++j)
snd_iprintf(buffer, "%08x ", levels[i*8+j]);
snd_iprintf(buffer, "\n");
}
snd_iprintf(buffer, "\nplayback levels:\n");
err = lx_level_peaks(chip, 0, 64, levels);
if (err < 0)
return;
for (i = 0; i != 8; ++i) {
for (j = 0; j != 8; ++j)
snd_iprintf(buffer, "%08x ", levels[i*8+j]);
snd_iprintf(buffer, "\n");
}
snd_iprintf(buffer, "\n");
}
static int __devinit lx_proc_create(struct snd_card *card, struct lx6464es *chip)
{
struct snd_info_entry *entry;
int err = snd_card_proc_new(card, "levels", &entry);
if (err < 0)
return err;
snd_info_set_text_ops(entry, chip, lx_proc_levels_read);
return 0;
}
static int __devinit snd_lx6464es_create(struct snd_card *card,
struct pci_dev *pci,
struct lx6464es **rchip)
{
struct lx6464es *chip;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_lx6464es_dev_free,
};
snd_printdd("->snd_lx6464es_create\n");
*rchip = NULL;
/* enable PCI device */
err = pci_enable_device(pci);
if (err < 0)
return err;
pci_set_master(pci);
/* check if we can restrict PCI DMA transfers to 32 bits */
err = pci_set_dma_mask(pci, DMA_BIT_MASK(32));
if (err < 0) {
snd_printk(KERN_ERR "architecture does not support "
"32bit PCI busmaster DMA\n");
pci_disable_device(pci);
return -ENXIO;
}
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL) {
err = -ENOMEM;
goto alloc_failed;
}
chip->card = card;
chip->pci = pci;
chip->irq = -1;
/* initialize synchronization structs */
spin_lock_init(&chip->lock);
spin_lock_init(&chip->msg_lock);
mutex_init(&chip->setup_mutex);
tasklet_init(&chip->trigger_tasklet, lx_trigger_tasklet,
(unsigned long)chip);
tasklet_init(&chip->tasklet_capture, lx_tasklet_capture,
(unsigned long)chip);
tasklet_init(&chip->tasklet_playback, lx_tasklet_playback,
(unsigned long)chip);
/* request resources */
err = pci_request_regions(pci, card_name);
if (err < 0)
goto request_regions_failed;
/* plx port */
chip->port_plx = pci_resource_start(pci, 1);
chip->port_plx_remapped = ioport_map(chip->port_plx,
pci_resource_len(pci, 1));
/* dsp port */
chip->port_dsp_bar = pci_ioremap_bar(pci, 2);
err = request_irq(pci->irq, lx_interrupt, IRQF_SHARED,
card_name, chip);
if (err) {
snd_printk(KERN_ERR LXP "unable to grab IRQ %d\n", pci->irq);
goto request_irq_failed;
}
chip->irq = pci->irq;
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
if (err < 0)
goto device_new_failed;
err = lx_init_dsp(chip);
if (err < 0) {
snd_printk(KERN_ERR LXP "error during DSP initialization\n");
return err;
}
err = lx_pcm_create(chip);
if (err < 0)
return err;
err = lx_proc_create(card, chip);
if (err < 0)
return err;
err = snd_ctl_add(card, snd_ctl_new1(&lx_control_playback_switch,
chip));
if (err < 0)
return err;
snd_card_set_dev(card, &pci->dev);
*rchip = chip;
return 0;
device_new_failed:
free_irq(pci->irq, chip);
request_irq_failed:
pci_release_regions(pci);
request_regions_failed:
kfree(chip);
alloc_failed:
pci_disable_device(pci);
return err;
}
static int __devinit snd_lx6464es_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
struct lx6464es *chip;
int err;
snd_printdd("->snd_lx6464es_probe\n");
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
dev++;
return -ENOENT;
}
err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
if (err < 0)
return err;
err = snd_lx6464es_create(card, pci, &chip);
if (err < 0) {
snd_printk(KERN_ERR LXP "error during snd_lx6464es_create\n");
goto out_free;
}
strcpy(card->driver, "lx6464es");
strcpy(card->shortname, "Digigram LX6464ES");
sprintf(card->longname, "%s at 0x%lx, 0x%p, irq %i",
card->shortname, chip->port_plx,
chip->port_dsp_bar, chip->irq);
err = snd_card_register(card);
if (err < 0)
goto out_free;
snd_printdd(LXP "initialization successful\n");
pci_set_drvdata(pci, card);
dev++;
return 0;
out_free:
snd_card_free(card);
return err;
}
static void __devexit snd_lx6464es_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
}
static struct pci_driver driver = {
.name = "Digigram LX6464ES",
.id_table = snd_lx6464es_ids,
.probe = snd_lx6464es_probe,
.remove = __devexit_p(snd_lx6464es_remove),
};
/* module initialization */
static int __init mod_init(void)
{
return pci_register_driver(&driver);
}
static void __exit mod_exit(void)
{
pci_unregister_driver(&driver);
}
module_init(mod_init);
module_exit(mod_exit);
| gpl-2.0 |
thypon/bowser-kernel | arch/m68k/kernel/setup_no.c | 4470 | 9142 | /*
* linux/arch/m68knommu/kernel/setup.c
*
* Copyright (C) 1999-2007 Greg Ungerer (gerg@snapgear.com)
* Copyright (C) 1998,1999 D. Jeff Dionne <jeff@uClinux.org>
* Copyleft ()) 2000 James D. Schettine {james@telos-systems.com}
* Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>
* Copyright (C) 1995 Hamish Macdonald
* Copyright (C) 2000 Lineo Inc. (www.lineo.com)
* Copyright (C) 2001 Lineo, Inc. <www.lineo.com>
*
* 68VZ328 Fixes/support Evan Stawnyczy <e@lineo.ca>
*/
/*
* This file handles the architecture-dependent parts of system setup
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/root_dev.h>
#include <linux/rtc.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/machdep.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
unsigned long memory_start;
unsigned long memory_end;
EXPORT_SYMBOL(memory_start);
EXPORT_SYMBOL(memory_end);
char __initdata command_line[COMMAND_LINE_SIZE];
/* machine dependent timer functions */
void (*mach_sched_init)(irq_handler_t handler) __initdata = NULL;
int (*mach_set_clock_mmss)(unsigned long);
int (*mach_hwclk) (int, struct rtc_time*);
/* machine dependent reboot functions */
void (*mach_reset)(void);
void (*mach_halt)(void);
void (*mach_power_off)(void);
#ifdef CONFIG_M68328
#define CPU_NAME "MC68328"
#endif
#ifdef CONFIG_M68EZ328
#define CPU_NAME "MC68EZ328"
#endif
#ifdef CONFIG_M68VZ328
#define CPU_NAME "MC68VZ328"
#endif
#ifdef CONFIG_M68360
#define CPU_NAME "MC68360"
#endif
#ifndef CPU_NAME
#define CPU_NAME "UNKNOWN"
#endif
/*
* Different cores have different instruction execution timings.
* The old/traditional 68000 cores are basically all the same, at 16.
* The ColdFire cores vary a little, their values are defined in their
* headers. We default to the standard 68000 value here.
*/
#ifndef CPU_INSTR_PER_JIFFY
#define CPU_INSTR_PER_JIFFY 16
#endif
#if defined(CONFIG_UBOOT)
/*
* parse_uboot_commandline
*
* Copies u-boot commandline arguments and store them in the proper linux
* variables.
*
* Assumes:
* _init_sp global contains the address in the stack pointer when the
* kernel starts (see head.S::_start)
*
* U-Boot calling convention:
* (*kernel) (kbd, initrd_start, initrd_end, cmd_start, cmd_end);
*
* _init_sp can be parsed as such
*
* _init_sp+00 = u-boot cmd after jsr into kernel (skip)
* _init_sp+04 = &kernel board_info (residual data)
* _init_sp+08 = &initrd_start
* _init_sp+12 = &initrd_end
* _init_sp+16 = &cmd_start
* _init_sp+20 = &cmd_end
*
* This also assumes that the memory locations pointed to are still
* unmodified. U-boot places them near the end of external SDRAM.
*
* Argument(s):
* commandp = the linux commandline arg container to fill.
* size = the sizeof commandp.
*
* Returns:
*/
void parse_uboot_commandline(char *commandp, int size)
{
extern unsigned long _init_sp;
unsigned long *sp;
unsigned long uboot_kbd;
unsigned long uboot_initrd_start, uboot_initrd_end;
unsigned long uboot_cmd_start, uboot_cmd_end;
sp = (unsigned long *)_init_sp;
uboot_kbd = sp[1];
uboot_initrd_start = sp[2];
uboot_initrd_end = sp[3];
uboot_cmd_start = sp[4];
uboot_cmd_end = sp[5];
if (uboot_cmd_start && uboot_cmd_end)
strncpy(commandp, (const char *)uboot_cmd_start, size);
#if defined(CONFIG_BLK_DEV_INITRD)
if (uboot_initrd_start && uboot_initrd_end &&
(uboot_initrd_end > uboot_initrd_start)) {
initrd_start = uboot_initrd_start;
initrd_end = uboot_initrd_end;
ROOT_DEV = Root_RAM0;
printk(KERN_INFO "initrd at 0x%lx:0x%lx\n",
initrd_start, initrd_end);
}
#endif /* if defined(CONFIG_BLK_DEV_INITRD) */
}
#endif /* #if defined(CONFIG_UBOOT) */
void __init setup_arch(char **cmdline_p)
{
int bootmap_size;
memory_start = PAGE_ALIGN(_ramstart);
memory_end = _ramend;
init_mm.start_code = (unsigned long) &_stext;
init_mm.end_code = (unsigned long) &_etext;
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) 0;
config_BSP(&command_line[0], sizeof(command_line));
#if defined(CONFIG_BOOTPARAM)
strncpy(&command_line[0], CONFIG_BOOTPARAM_STRING, sizeof(command_line));
command_line[sizeof(command_line) - 1] = 0;
#endif /* CONFIG_BOOTPARAM */
#if defined(CONFIG_UBOOT)
/* CONFIG_UBOOT and CONFIG_BOOTPARAM defined, concatenate cmdline */
#if defined(CONFIG_BOOTPARAM)
/* Add the whitespace separator */
command_line[strlen(CONFIG_BOOTPARAM_STRING)] = ' ';
/* Parse uboot command line into the rest of the buffer */
parse_uboot_commandline(
&command_line[(strlen(CONFIG_BOOTPARAM_STRING)+1)],
(sizeof(command_line) -
(strlen(CONFIG_BOOTPARAM_STRING)+1)));
/* Only CONFIG_UBOOT defined, create cmdline */
#else
parse_uboot_commandline(&command_line[0], sizeof(command_line));
#endif /* CONFIG_BOOTPARAM */
command_line[sizeof(command_line) - 1] = 0;
#endif /* CONFIG_UBOOT */
printk(KERN_INFO "\x0F\r\n\nuClinux/" CPU_NAME "\n");
#ifdef CONFIG_UCDIMM
printk(KERN_INFO "uCdimm by Lineo, Inc. <www.lineo.com>\n");
#endif
#ifdef CONFIG_M68VZ328
printk(KERN_INFO "M68VZ328 support by Evan Stawnyczy <e@lineo.ca>\n");
#endif
#ifdef CONFIG_COLDFIRE
printk(KERN_INFO "COLDFIRE port done by Greg Ungerer, gerg@snapgear.com\n");
#ifdef CONFIG_M5307
printk(KERN_INFO "Modified for M5307 by Dave Miller, dmiller@intellistor.com\n");
#endif
#ifdef CONFIG_ELITE
printk(KERN_INFO "Modified for M5206eLITE by Rob Scott, rscott@mtrob.fdns.net\n");
#endif
#endif
printk(KERN_INFO "Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n");
#if defined( CONFIG_PILOT ) && defined( CONFIG_M68328 )
printk(KERN_INFO "TRG SuperPilot FLASH card support <info@trgnet.com>\n");
#endif
#if defined( CONFIG_PILOT ) && defined( CONFIG_M68EZ328 )
printk(KERN_INFO "PalmV support by Lineo Inc. <jeff@uclinux.com>\n");
#endif
#if defined (CONFIG_M68360)
printk(KERN_INFO "QUICC port done by SED Systems <hamilton@sedsystems.ca>,\n");
printk(KERN_INFO "based on 2.0.38 port by Lineo Inc. <mleslie@lineo.com>.\n");
#endif
#ifdef CONFIG_DRAGEN2
printk(KERN_INFO "DragonEngine II board support by Georges Menie\n");
#endif
#ifdef CONFIG_M5235EVB
printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
#endif
pr_debug("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
"BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
(int) &_sdata, (int) &_edata,
(int) &_sbss, (int) &_ebss);
pr_debug("MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ",
(int) &_ebss, (int) memory_start,
(int) memory_start, (int) memory_end);
/* Keep a copy of command line */
*cmdline_p = &command_line[0];
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
boot_command_line[COMMAND_LINE_SIZE-1] = 0;
#if defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
#endif
/*
* Give all the memory to the bootmap allocator, tell it to put the
* boot mem_map at the start of memory.
*/
bootmap_size = init_bootmem_node(
NODE_DATA(0),
memory_start >> PAGE_SHIFT, /* map goes here */
PAGE_OFFSET >> PAGE_SHIFT, /* 0 on coldfire */
memory_end >> PAGE_SHIFT);
/*
* Free the usable memory, we have to make sure we do not free
* the bootmem bitmap so we then reserve it after freeing it :-)
*/
free_bootmem(memory_start, memory_end - memory_start);
reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
#if defined(CONFIG_UBOOT) && defined(CONFIG_BLK_DEV_INITRD)
if ((initrd_start > 0) && (initrd_start < initrd_end) &&
(initrd_end < memory_end))
reserve_bootmem(initrd_start, initrd_end - initrd_start,
BOOTMEM_DEFAULT);
#endif /* if defined(CONFIG_BLK_DEV_INITRD) */
/*
* Get kmalloc into gear.
*/
paging_init();
}
/*
* Get CPU information for use by the procfs.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
char *cpu, *mmu, *fpu;
u_long clockfreq;
cpu = CPU_NAME;
mmu = "none";
fpu = "none";
clockfreq = (loops_per_jiffy * HZ) * CPU_INSTR_PER_JIFFY;
seq_printf(m, "CPU:\t\t%s\n"
"MMU:\t\t%s\n"
"FPU:\t\t%s\n"
"Clocking:\t%lu.%1luMHz\n"
"BogoMips:\t%lu.%02lu\n"
"Calibration:\t%lu loops\n",
cpu, mmu, fpu,
clockfreq / 1000000,
(clockfreq / 100000) % 10,
(loops_per_jiffy * HZ) / 500000,
((loops_per_jiffy * HZ) / 5000) % 100,
(loops_per_jiffy * HZ));
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < NR_CPUS ? ((void *) 0x12345678) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
| gpl-2.0 |
CalmYak/N1-Kernel-Source-4.2 | kernel/wait.c | 4726 | 8441 | /*
* Generic waiting primitives.
*
* (C) 2004 William Irwin, Oracle
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/wait.h>
#include <linux/hash.h>
void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
{
spin_lock_init(&q->lock);
lockdep_set_class_and_name(&q->lock, key, name);
INIT_LIST_HEAD(&q->task_list);
}
EXPORT_SYMBOL(__init_waitqueue_head);
void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
wait->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue(q, wait);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(add_wait_queue);
void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
wait->flags |= WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue_tail(q, wait);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(add_wait_queue_exclusive);
void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
__remove_wait_queue(q, wait);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(remove_wait_queue);
/*
* Note: we use "set_current_state()" _after_ the wait-queue add,
* because we need a memory barrier there on SMP, so that any
* wake-function that tests for the wait-queue being active
* will be guaranteed to see waitqueue addition _or_ subsequent
* tests in this thread will see the wakeup having taken place.
*
* The spin_unlock() itself is semi-permeable and only protects
* one way (it only protects stuff inside the critical region and
* stops them from bleeding out - it would still allow subsequent
* loads to move into the critical region).
*/
void
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
wait->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue(q, wait);
set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(prepare_to_wait);
void
prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
wait->flags |= WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue_tail(q, wait);
set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(prepare_to_wait_exclusive);
/**
* finish_wait - clean up after waiting in a queue
* @q: waitqueue waited on
* @wait: wait descriptor
*
* Sets current thread back to running state and removes
* the wait descriptor from the given waitqueue if still
* queued.
*/
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
__set_current_state(TASK_RUNNING);
/*
* We can check for list emptiness outside the lock
* IFF:
* - we use the "careful" check that verifies both
* the next and prev pointers, so that there cannot
* be any half-pending updates in progress on other
* CPU's that we haven't seen yet (and that might
* still change the stack area.
* and
* - all other users take the lock (ie we can only
* have _one_ other CPU that looks at or modifies
* the list).
*/
if (!list_empty_careful(&wait->task_list)) {
spin_lock_irqsave(&q->lock, flags);
list_del_init(&wait->task_list);
spin_unlock_irqrestore(&q->lock, flags);
}
}
EXPORT_SYMBOL(finish_wait);
/**
* abort_exclusive_wait - abort exclusive waiting in a queue
* @q: waitqueue waited on
* @wait: wait descriptor
* @mode: runstate of the waiter to be woken
* @key: key to identify a wait bit queue or %NULL
*
* Sets current thread back to running state and removes
* the wait descriptor from the given waitqueue if still
* queued.
*
* Wakes up the next waiter if the caller is concurrently
* woken up through the queue.
*
* This prevents waiter starvation where an exclusive waiter
* aborts and is woken up concurrently and no one wakes up
* the next waiter.
*/
void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
unsigned int mode, void *key)
{
unsigned long flags;
__set_current_state(TASK_RUNNING);
spin_lock_irqsave(&q->lock, flags);
if (!list_empty(&wait->task_list))
list_del_init(&wait->task_list);
else if (waitqueue_active(q))
__wake_up_locked_key(q, mode, key);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(abort_exclusive_wait);
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
int ret = default_wake_function(wait, mode, sync, key);
if (ret)
list_del_init(&wait->task_list);
return ret;
}
EXPORT_SYMBOL(autoremove_wake_function);
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
{
struct wait_bit_key *key = arg;
struct wait_bit_queue *wait_bit
= container_of(wait, struct wait_bit_queue, wait);
if (wait_bit->key.flags != key->flags ||
wait_bit->key.bit_nr != key->bit_nr ||
test_bit(key->bit_nr, key->flags))
return 0;
else
return autoremove_wake_function(wait, mode, sync, key);
}
EXPORT_SYMBOL(wake_bit_function);
/*
* To allow interruptible waiting and asynchronous (i.e. nonblocking)
* waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
* permitted return codes. Nonzero return codes halt waiting and return.
*/
int __sched
__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
int (*action)(void *), unsigned mode)
{
int ret = 0;
do {
prepare_to_wait(wq, &q->wait, mode);
if (test_bit(q->key.bit_nr, q->key.flags))
ret = (*action)(q->key.flags);
} while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
finish_wait(wq, &q->wait);
return ret;
}
EXPORT_SYMBOL(__wait_on_bit);
int __sched out_of_line_wait_on_bit(void *word, int bit,
int (*action)(void *), unsigned mode)
{
wait_queue_head_t *wq = bit_waitqueue(word, bit);
DEFINE_WAIT_BIT(wait, word, bit);
return __wait_on_bit(wq, &wait, action, mode);
}
EXPORT_SYMBOL(out_of_line_wait_on_bit);
int __sched
__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
int (*action)(void *), unsigned mode)
{
do {
int ret;
prepare_to_wait_exclusive(wq, &q->wait, mode);
if (!test_bit(q->key.bit_nr, q->key.flags))
continue;
ret = action(q->key.flags);
if (!ret)
continue;
abort_exclusive_wait(wq, &q->wait, mode, &q->key);
return ret;
} while (test_and_set_bit(q->key.bit_nr, q->key.flags));
finish_wait(wq, &q->wait);
return 0;
}
EXPORT_SYMBOL(__wait_on_bit_lock);
int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
int (*action)(void *), unsigned mode)
{
wait_queue_head_t *wq = bit_waitqueue(word, bit);
DEFINE_WAIT_BIT(wait, word, bit);
return __wait_on_bit_lock(wq, &wait, action, mode);
}
EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
{
struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
if (waitqueue_active(wq))
__wake_up(wq, TASK_NORMAL, 1, &key);
}
EXPORT_SYMBOL(__wake_up_bit);
/**
* wake_up_bit - wake up a waiter on a bit
* @word: the word being waited on, a kernel virtual address
* @bit: the bit of the word being waited on
*
* There is a standard hashed waitqueue table for generic use. This
* is the part of the hashtable's accessor API that wakes up waiters
* on a bit. For instance, if one were to have waiters on a bitflag,
* one would call wake_up_bit() after clearing the bit.
*
* In order for this to function properly, as it uses waitqueue_active()
* internally, some kind of memory barrier must be done prior to calling
* this. Typically, this will be smp_mb__after_clear_bit(), but in some
* cases where bitflags are manipulated non-atomically under a lock, one
* may need to use a less regular barrier, such fs/inode.c's smp_mb(),
* because spin_unlock() does not guarantee a memory barrier.
*/
void wake_up_bit(void *word, int bit)
{
__wake_up_bit(bit_waitqueue(word, bit), word, bit);
}
EXPORT_SYMBOL(wake_up_bit);
wait_queue_head_t *bit_waitqueue(void *word, int bit)
{
const int shift = BITS_PER_LONG == 32 ? 5 : 6;
const struct zone *zone = page_zone(virt_to_page(word));
unsigned long val = (unsigned long)word << shift | bit;
return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
}
EXPORT_SYMBOL(bit_waitqueue);
| gpl-2.0 |
Where-No-Man-Has-Gone-Before/kernel_htc_chacha-CHS | crypto/serpent.c | 4982 | 20267 | /*
* Cryptographic API.
*
* Serpent Cipher Algorithm.
*
* Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no>
* 2003 Herbert Valerio Riedel <hvr@gnu.org>
*
* Added tnepres support: Ruben Jesus Garcia Hernandez <ruben@ugr.es>, 18.10.2004
* Based on code by hvr
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
#include <linux/crypto.h>
#include <linux/types.h>
/* Key is padded to the maximum of 256 bits before round key generation.
* Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
*/
#define SERPENT_MIN_KEY_SIZE 0
#define SERPENT_MAX_KEY_SIZE 32
#define SERPENT_EXPKEY_WORDS 132
#define SERPENT_BLOCK_SIZE 16
#define PHI 0x9e3779b9UL
#define keyiter(a,b,c,d,i,j) \
b ^= d; b ^= c; b ^= a; b ^= PHI ^ i; b = rol32(b,11); k[j] = b;
#define loadkeys(x0,x1,x2,x3,i) \
x0=k[i]; x1=k[i+1]; x2=k[i+2]; x3=k[i+3];
#define storekeys(x0,x1,x2,x3,i) \
k[i]=x0; k[i+1]=x1; k[i+2]=x2; k[i+3]=x3;
#define K(x0,x1,x2,x3,i) \
x3 ^= k[4*(i)+3]; x2 ^= k[4*(i)+2]; \
x1 ^= k[4*(i)+1]; x0 ^= k[4*(i)+0];
#define LK(x0,x1,x2,x3,x4,i) \
x0=rol32(x0,13);\
x2=rol32(x2,3); x1 ^= x0; x4 = x0 << 3; \
x3 ^= x2; x1 ^= x2; \
x1=rol32(x1,1); x3 ^= x4; \
x3=rol32(x3,7); x4 = x1; \
x0 ^= x1; x4 <<= 7; x2 ^= x3; \
x0 ^= x3; x2 ^= x4; x3 ^= k[4*i+3]; \
x1 ^= k[4*i+1]; x0=rol32(x0,5); x2=rol32(x2,22);\
x0 ^= k[4*i+0]; x2 ^= k[4*i+2];
#define KL(x0,x1,x2,x3,x4,i) \
x0 ^= k[4*i+0]; x1 ^= k[4*i+1]; x2 ^= k[4*i+2]; \
x3 ^= k[4*i+3]; x0=ror32(x0,5); x2=ror32(x2,22);\
x4 = x1; x2 ^= x3; x0 ^= x3; \
x4 <<= 7; x0 ^= x1; x1=ror32(x1,1); \
x2 ^= x4; x3=ror32(x3,7); x4 = x0 << 3; \
x1 ^= x0; x3 ^= x4; x0=ror32(x0,13);\
x1 ^= x2; x3 ^= x2; x2=ror32(x2,3);
#define S0(x0,x1,x2,x3,x4) \
x4 = x3; \
x3 |= x0; x0 ^= x4; x4 ^= x2; \
x4 =~ x4; x3 ^= x1; x1 &= x0; \
x1 ^= x4; x2 ^= x0; x0 ^= x3; \
x4 |= x0; x0 ^= x2; x2 &= x1; \
x3 ^= x2; x1 =~ x1; x2 ^= x4; \
x1 ^= x2;
#define S1(x0,x1,x2,x3,x4) \
x4 = x1; \
x1 ^= x0; x0 ^= x3; x3 =~ x3; \
x4 &= x1; x0 |= x1; x3 ^= x2; \
x0 ^= x3; x1 ^= x3; x3 ^= x4; \
x1 |= x4; x4 ^= x2; x2 &= x0; \
x2 ^= x1; x1 |= x0; x0 =~ x0; \
x0 ^= x2; x4 ^= x1;
#define S2(x0,x1,x2,x3,x4) \
x3 =~ x3; \
x1 ^= x0; x4 = x0; x0 &= x2; \
x0 ^= x3; x3 |= x4; x2 ^= x1; \
x3 ^= x1; x1 &= x0; x0 ^= x2; \
x2 &= x3; x3 |= x1; x0 =~ x0; \
x3 ^= x0; x4 ^= x0; x0 ^= x2; \
x1 |= x2;
#define S3(x0,x1,x2,x3,x4) \
x4 = x1; \
x1 ^= x3; x3 |= x0; x4 &= x0; \
x0 ^= x2; x2 ^= x1; x1 &= x3; \
x2 ^= x3; x0 |= x4; x4 ^= x3; \
x1 ^= x0; x0 &= x3; x3 &= x4; \
x3 ^= x2; x4 |= x1; x2 &= x1; \
x4 ^= x3; x0 ^= x3; x3 ^= x2;
#define S4(x0,x1,x2,x3,x4) \
x4 = x3; \
x3 &= x0; x0 ^= x4; \
x3 ^= x2; x2 |= x4; x0 ^= x1; \
x4 ^= x3; x2 |= x0; \
x2 ^= x1; x1 &= x0; \
x1 ^= x4; x4 &= x2; x2 ^= x3; \
x4 ^= x0; x3 |= x1; x1 =~ x1; \
x3 ^= x0;
#define S5(x0,x1,x2,x3,x4) \
x4 = x1; x1 |= x0; \
x2 ^= x1; x3 =~ x3; x4 ^= x0; \
x0 ^= x2; x1 &= x4; x4 |= x3; \
x4 ^= x0; x0 &= x3; x1 ^= x3; \
x3 ^= x2; x0 ^= x1; x2 &= x4; \
x1 ^= x2; x2 &= x0; \
x3 ^= x2;
#define S6(x0,x1,x2,x3,x4) \
x4 = x1; \
x3 ^= x0; x1 ^= x2; x2 ^= x0; \
x0 &= x3; x1 |= x3; x4 =~ x4; \
x0 ^= x1; x1 ^= x2; \
x3 ^= x4; x4 ^= x0; x2 &= x0; \
x4 ^= x1; x2 ^= x3; x3 &= x1; \
x3 ^= x0; x1 ^= x2;
#define S7(x0,x1,x2,x3,x4) \
x1 =~ x1; \
x4 = x1; x0 =~ x0; x1 &= x2; \
x1 ^= x3; x3 |= x4; x4 ^= x2; \
x2 ^= x3; x3 ^= x0; x0 |= x1; \
x2 &= x0; x0 ^= x4; x4 ^= x3; \
x3 &= x0; x4 ^= x1; \
x2 ^= x4; x3 ^= x1; x4 |= x0; \
x4 ^= x1;
#define SI0(x0,x1,x2,x3,x4) \
x4 = x3; x1 ^= x0; \
x3 |= x1; x4 ^= x1; x0 =~ x0; \
x2 ^= x3; x3 ^= x0; x0 &= x1; \
x0 ^= x2; x2 &= x3; x3 ^= x4; \
x2 ^= x3; x1 ^= x3; x3 &= x0; \
x1 ^= x0; x0 ^= x2; x4 ^= x3;
#define SI1(x0,x1,x2,x3,x4) \
x1 ^= x3; x4 = x0; \
x0 ^= x2; x2 =~ x2; x4 |= x1; \
x4 ^= x3; x3 &= x1; x1 ^= x2; \
x2 &= x4; x4 ^= x1; x1 |= x3; \
x3 ^= x0; x2 ^= x0; x0 |= x4; \
x2 ^= x4; x1 ^= x0; \
x4 ^= x1;
#define SI2(x0,x1,x2,x3,x4) \
x2 ^= x1; x4 = x3; x3 =~ x3; \
x3 |= x2; x2 ^= x4; x4 ^= x0; \
x3 ^= x1; x1 |= x2; x2 ^= x0; \
x1 ^= x4; x4 |= x3; x2 ^= x3; \
x4 ^= x2; x2 &= x1; \
x2 ^= x3; x3 ^= x4; x4 ^= x0;
#define SI3(x0,x1,x2,x3,x4) \
x2 ^= x1; \
x4 = x1; x1 &= x2; \
x1 ^= x0; x0 |= x4; x4 ^= x3; \
x0 ^= x3; x3 |= x1; x1 ^= x2; \
x1 ^= x3; x0 ^= x2; x2 ^= x3; \
x3 &= x1; x1 ^= x0; x0 &= x2; \
x4 ^= x3; x3 ^= x0; x0 ^= x1;
#define SI4(x0,x1,x2,x3,x4) \
x2 ^= x3; x4 = x0; x0 &= x1; \
x0 ^= x2; x2 |= x3; x4 =~ x4; \
x1 ^= x0; x0 ^= x2; x2 &= x4; \
x2 ^= x0; x0 |= x4; \
x0 ^= x3; x3 &= x2; \
x4 ^= x3; x3 ^= x1; x1 &= x0; \
x4 ^= x1; x0 ^= x3;
#define SI5(x0,x1,x2,x3,x4) \
x4 = x1; x1 |= x2; \
x2 ^= x4; x1 ^= x3; x3 &= x4; \
x2 ^= x3; x3 |= x0; x0 =~ x0; \
x3 ^= x2; x2 |= x0; x4 ^= x1; \
x2 ^= x4; x4 &= x0; x0 ^= x1; \
x1 ^= x3; x0 &= x2; x2 ^= x3; \
x0 ^= x2; x2 ^= x4; x4 ^= x3;
#define SI6(x0,x1,x2,x3,x4) \
x0 ^= x2; \
x4 = x0; x0 &= x3; x2 ^= x3; \
x0 ^= x2; x3 ^= x1; x2 |= x4; \
x2 ^= x3; x3 &= x0; x0 =~ x0; \
x3 ^= x1; x1 &= x2; x4 ^= x0; \
x3 ^= x4; x4 ^= x2; x0 ^= x1; \
x2 ^= x0;
#define SI7(x0,x1,x2,x3,x4) \
x4 = x3; x3 &= x0; x0 ^= x2; \
x2 |= x4; x4 ^= x1; x0 =~ x0; \
x1 |= x3; x4 ^= x0; x0 &= x2; \
x0 ^= x1; x1 &= x2; x3 ^= x2; \
x4 ^= x3; x2 &= x3; x3 |= x0; \
x1 ^= x4; x3 ^= x4; x4 &= x0; \
x4 ^= x2;
struct serpent_ctx {
u32 expkey[SERPENT_EXPKEY_WORDS];
};
static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *k = ctx->expkey;
u8 *k8 = (u8 *)k;
u32 r0,r1,r2,r3,r4;
int i;
/* Copy key, add padding */
for (i = 0; i < keylen; ++i)
k8[i] = key[i];
if (i < SERPENT_MAX_KEY_SIZE)
k8[i++] = 1;
while (i < SERPENT_MAX_KEY_SIZE)
k8[i++] = 0;
/* Expand key using polynomial */
r0 = le32_to_cpu(k[3]);
r1 = le32_to_cpu(k[4]);
r2 = le32_to_cpu(k[5]);
r3 = le32_to_cpu(k[6]);
r4 = le32_to_cpu(k[7]);
keyiter(le32_to_cpu(k[0]),r0,r4,r2,0,0);
keyiter(le32_to_cpu(k[1]),r1,r0,r3,1,1);
keyiter(le32_to_cpu(k[2]),r2,r1,r4,2,2);
keyiter(le32_to_cpu(k[3]),r3,r2,r0,3,3);
keyiter(le32_to_cpu(k[4]),r4,r3,r1,4,4);
keyiter(le32_to_cpu(k[5]),r0,r4,r2,5,5);
keyiter(le32_to_cpu(k[6]),r1,r0,r3,6,6);
keyiter(le32_to_cpu(k[7]),r2,r1,r4,7,7);
keyiter(k[ 0],r3,r2,r0, 8, 8); keyiter(k[ 1],r4,r3,r1, 9, 9);
keyiter(k[ 2],r0,r4,r2, 10, 10); keyiter(k[ 3],r1,r0,r3, 11, 11);
keyiter(k[ 4],r2,r1,r4, 12, 12); keyiter(k[ 5],r3,r2,r0, 13, 13);
keyiter(k[ 6],r4,r3,r1, 14, 14); keyiter(k[ 7],r0,r4,r2, 15, 15);
keyiter(k[ 8],r1,r0,r3, 16, 16); keyiter(k[ 9],r2,r1,r4, 17, 17);
keyiter(k[ 10],r3,r2,r0, 18, 18); keyiter(k[ 11],r4,r3,r1, 19, 19);
keyiter(k[ 12],r0,r4,r2, 20, 20); keyiter(k[ 13],r1,r0,r3, 21, 21);
keyiter(k[ 14],r2,r1,r4, 22, 22); keyiter(k[ 15],r3,r2,r0, 23, 23);
keyiter(k[ 16],r4,r3,r1, 24, 24); keyiter(k[ 17],r0,r4,r2, 25, 25);
keyiter(k[ 18],r1,r0,r3, 26, 26); keyiter(k[ 19],r2,r1,r4, 27, 27);
keyiter(k[ 20],r3,r2,r0, 28, 28); keyiter(k[ 21],r4,r3,r1, 29, 29);
keyiter(k[ 22],r0,r4,r2, 30, 30); keyiter(k[ 23],r1,r0,r3, 31, 31);
k += 50;
keyiter(k[-26],r2,r1,r4, 32,-18); keyiter(k[-25],r3,r2,r0, 33,-17);
keyiter(k[-24],r4,r3,r1, 34,-16); keyiter(k[-23],r0,r4,r2, 35,-15);
keyiter(k[-22],r1,r0,r3, 36,-14); keyiter(k[-21],r2,r1,r4, 37,-13);
keyiter(k[-20],r3,r2,r0, 38,-12); keyiter(k[-19],r4,r3,r1, 39,-11);
keyiter(k[-18],r0,r4,r2, 40,-10); keyiter(k[-17],r1,r0,r3, 41, -9);
keyiter(k[-16],r2,r1,r4, 42, -8); keyiter(k[-15],r3,r2,r0, 43, -7);
keyiter(k[-14],r4,r3,r1, 44, -6); keyiter(k[-13],r0,r4,r2, 45, -5);
keyiter(k[-12],r1,r0,r3, 46, -4); keyiter(k[-11],r2,r1,r4, 47, -3);
keyiter(k[-10],r3,r2,r0, 48, -2); keyiter(k[ -9],r4,r3,r1, 49, -1);
keyiter(k[ -8],r0,r4,r2, 50, 0); keyiter(k[ -7],r1,r0,r3, 51, 1);
keyiter(k[ -6],r2,r1,r4, 52, 2); keyiter(k[ -5],r3,r2,r0, 53, 3);
keyiter(k[ -4],r4,r3,r1, 54, 4); keyiter(k[ -3],r0,r4,r2, 55, 5);
keyiter(k[ -2],r1,r0,r3, 56, 6); keyiter(k[ -1],r2,r1,r4, 57, 7);
keyiter(k[ 0],r3,r2,r0, 58, 8); keyiter(k[ 1],r4,r3,r1, 59, 9);
keyiter(k[ 2],r0,r4,r2, 60, 10); keyiter(k[ 3],r1,r0,r3, 61, 11);
keyiter(k[ 4],r2,r1,r4, 62, 12); keyiter(k[ 5],r3,r2,r0, 63, 13);
keyiter(k[ 6],r4,r3,r1, 64, 14); keyiter(k[ 7],r0,r4,r2, 65, 15);
keyiter(k[ 8],r1,r0,r3, 66, 16); keyiter(k[ 9],r2,r1,r4, 67, 17);
keyiter(k[ 10],r3,r2,r0, 68, 18); keyiter(k[ 11],r4,r3,r1, 69, 19);
keyiter(k[ 12],r0,r4,r2, 70, 20); keyiter(k[ 13],r1,r0,r3, 71, 21);
keyiter(k[ 14],r2,r1,r4, 72, 22); keyiter(k[ 15],r3,r2,r0, 73, 23);
keyiter(k[ 16],r4,r3,r1, 74, 24); keyiter(k[ 17],r0,r4,r2, 75, 25);
keyiter(k[ 18],r1,r0,r3, 76, 26); keyiter(k[ 19],r2,r1,r4, 77, 27);
keyiter(k[ 20],r3,r2,r0, 78, 28); keyiter(k[ 21],r4,r3,r1, 79, 29);
keyiter(k[ 22],r0,r4,r2, 80, 30); keyiter(k[ 23],r1,r0,r3, 81, 31);
k += 50;
keyiter(k[-26],r2,r1,r4, 82,-18); keyiter(k[-25],r3,r2,r0, 83,-17);
keyiter(k[-24],r4,r3,r1, 84,-16); keyiter(k[-23],r0,r4,r2, 85,-15);
keyiter(k[-22],r1,r0,r3, 86,-14); keyiter(k[-21],r2,r1,r4, 87,-13);
keyiter(k[-20],r3,r2,r0, 88,-12); keyiter(k[-19],r4,r3,r1, 89,-11);
keyiter(k[-18],r0,r4,r2, 90,-10); keyiter(k[-17],r1,r0,r3, 91, -9);
keyiter(k[-16],r2,r1,r4, 92, -8); keyiter(k[-15],r3,r2,r0, 93, -7);
keyiter(k[-14],r4,r3,r1, 94, -6); keyiter(k[-13],r0,r4,r2, 95, -5);
keyiter(k[-12],r1,r0,r3, 96, -4); keyiter(k[-11],r2,r1,r4, 97, -3);
keyiter(k[-10],r3,r2,r0, 98, -2); keyiter(k[ -9],r4,r3,r1, 99, -1);
keyiter(k[ -8],r0,r4,r2,100, 0); keyiter(k[ -7],r1,r0,r3,101, 1);
keyiter(k[ -6],r2,r1,r4,102, 2); keyiter(k[ -5],r3,r2,r0,103, 3);
keyiter(k[ -4],r4,r3,r1,104, 4); keyiter(k[ -3],r0,r4,r2,105, 5);
keyiter(k[ -2],r1,r0,r3,106, 6); keyiter(k[ -1],r2,r1,r4,107, 7);
keyiter(k[ 0],r3,r2,r0,108, 8); keyiter(k[ 1],r4,r3,r1,109, 9);
keyiter(k[ 2],r0,r4,r2,110, 10); keyiter(k[ 3],r1,r0,r3,111, 11);
keyiter(k[ 4],r2,r1,r4,112, 12); keyiter(k[ 5],r3,r2,r0,113, 13);
keyiter(k[ 6],r4,r3,r1,114, 14); keyiter(k[ 7],r0,r4,r2,115, 15);
keyiter(k[ 8],r1,r0,r3,116, 16); keyiter(k[ 9],r2,r1,r4,117, 17);
keyiter(k[ 10],r3,r2,r0,118, 18); keyiter(k[ 11],r4,r3,r1,119, 19);
keyiter(k[ 12],r0,r4,r2,120, 20); keyiter(k[ 13],r1,r0,r3,121, 21);
keyiter(k[ 14],r2,r1,r4,122, 22); keyiter(k[ 15],r3,r2,r0,123, 23);
keyiter(k[ 16],r4,r3,r1,124, 24); keyiter(k[ 17],r0,r4,r2,125, 25);
keyiter(k[ 18],r1,r0,r3,126, 26); keyiter(k[ 19],r2,r1,r4,127, 27);
keyiter(k[ 20],r3,r2,r0,128, 28); keyiter(k[ 21],r4,r3,r1,129, 29);
keyiter(k[ 22],r0,r4,r2,130, 30); keyiter(k[ 23],r1,r0,r3,131, 31);
/* Apply S-boxes */
S3(r3,r4,r0,r1,r2); storekeys(r1,r2,r4,r3, 28); loadkeys(r1,r2,r4,r3, 24);
S4(r1,r2,r4,r3,r0); storekeys(r2,r4,r3,r0, 24); loadkeys(r2,r4,r3,r0, 20);
S5(r2,r4,r3,r0,r1); storekeys(r1,r2,r4,r0, 20); loadkeys(r1,r2,r4,r0, 16);
S6(r1,r2,r4,r0,r3); storekeys(r4,r3,r2,r0, 16); loadkeys(r4,r3,r2,r0, 12);
S7(r4,r3,r2,r0,r1); storekeys(r1,r2,r0,r4, 12); loadkeys(r1,r2,r0,r4, 8);
S0(r1,r2,r0,r4,r3); storekeys(r0,r2,r4,r1, 8); loadkeys(r0,r2,r4,r1, 4);
S1(r0,r2,r4,r1,r3); storekeys(r3,r4,r1,r0, 4); loadkeys(r3,r4,r1,r0, 0);
S2(r3,r4,r1,r0,r2); storekeys(r2,r4,r3,r0, 0); loadkeys(r2,r4,r3,r0, -4);
S3(r2,r4,r3,r0,r1); storekeys(r0,r1,r4,r2, -4); loadkeys(r0,r1,r4,r2, -8);
S4(r0,r1,r4,r2,r3); storekeys(r1,r4,r2,r3, -8); loadkeys(r1,r4,r2,r3,-12);
S5(r1,r4,r2,r3,r0); storekeys(r0,r1,r4,r3,-12); loadkeys(r0,r1,r4,r3,-16);
S6(r0,r1,r4,r3,r2); storekeys(r4,r2,r1,r3,-16); loadkeys(r4,r2,r1,r3,-20);
S7(r4,r2,r1,r3,r0); storekeys(r0,r1,r3,r4,-20); loadkeys(r0,r1,r3,r4,-24);
S0(r0,r1,r3,r4,r2); storekeys(r3,r1,r4,r0,-24); loadkeys(r3,r1,r4,r0,-28);
k -= 50;
S1(r3,r1,r4,r0,r2); storekeys(r2,r4,r0,r3, 22); loadkeys(r2,r4,r0,r3, 18);
S2(r2,r4,r0,r3,r1); storekeys(r1,r4,r2,r3, 18); loadkeys(r1,r4,r2,r3, 14);
S3(r1,r4,r2,r3,r0); storekeys(r3,r0,r4,r1, 14); loadkeys(r3,r0,r4,r1, 10);
S4(r3,r0,r4,r1,r2); storekeys(r0,r4,r1,r2, 10); loadkeys(r0,r4,r1,r2, 6);
S5(r0,r4,r1,r2,r3); storekeys(r3,r0,r4,r2, 6); loadkeys(r3,r0,r4,r2, 2);
S6(r3,r0,r4,r2,r1); storekeys(r4,r1,r0,r2, 2); loadkeys(r4,r1,r0,r2, -2);
S7(r4,r1,r0,r2,r3); storekeys(r3,r0,r2,r4, -2); loadkeys(r3,r0,r2,r4, -6);
S0(r3,r0,r2,r4,r1); storekeys(r2,r0,r4,r3, -6); loadkeys(r2,r0,r4,r3,-10);
S1(r2,r0,r4,r3,r1); storekeys(r1,r4,r3,r2,-10); loadkeys(r1,r4,r3,r2,-14);
S2(r1,r4,r3,r2,r0); storekeys(r0,r4,r1,r2,-14); loadkeys(r0,r4,r1,r2,-18);
S3(r0,r4,r1,r2,r3); storekeys(r2,r3,r4,r0,-18); loadkeys(r2,r3,r4,r0,-22);
k -= 50;
S4(r2,r3,r4,r0,r1); storekeys(r3,r4,r0,r1, 28); loadkeys(r3,r4,r0,r1, 24);
S5(r3,r4,r0,r1,r2); storekeys(r2,r3,r4,r1, 24); loadkeys(r2,r3,r4,r1, 20);
S6(r2,r3,r4,r1,r0); storekeys(r4,r0,r3,r1, 20); loadkeys(r4,r0,r3,r1, 16);
S7(r4,r0,r3,r1,r2); storekeys(r2,r3,r1,r4, 16); loadkeys(r2,r3,r1,r4, 12);
S0(r2,r3,r1,r4,r0); storekeys(r1,r3,r4,r2, 12); loadkeys(r1,r3,r4,r2, 8);
S1(r1,r3,r4,r2,r0); storekeys(r0,r4,r2,r1, 8); loadkeys(r0,r4,r2,r1, 4);
S2(r0,r4,r2,r1,r3); storekeys(r3,r4,r0,r1, 4); loadkeys(r3,r4,r0,r1, 0);
S3(r3,r4,r0,r1,r2); storekeys(r1,r2,r4,r3, 0);
return 0;
}
static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
const u32
*k = ctx->expkey;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
u32 r0, r1, r2, r3, r4;
/*
* Note: The conversions between u8* and u32* might cause trouble
* on architectures with stricter alignment rules than x86
*/
r0 = le32_to_cpu(s[0]);
r1 = le32_to_cpu(s[1]);
r2 = le32_to_cpu(s[2]);
r3 = le32_to_cpu(s[3]);
K(r0,r1,r2,r3,0);
S0(r0,r1,r2,r3,r4); LK(r2,r1,r3,r0,r4,1);
S1(r2,r1,r3,r0,r4); LK(r4,r3,r0,r2,r1,2);
S2(r4,r3,r0,r2,r1); LK(r1,r3,r4,r2,r0,3);
S3(r1,r3,r4,r2,r0); LK(r2,r0,r3,r1,r4,4);
S4(r2,r0,r3,r1,r4); LK(r0,r3,r1,r4,r2,5);
S5(r0,r3,r1,r4,r2); LK(r2,r0,r3,r4,r1,6);
S6(r2,r0,r3,r4,r1); LK(r3,r1,r0,r4,r2,7);
S7(r3,r1,r0,r4,r2); LK(r2,r0,r4,r3,r1,8);
S0(r2,r0,r4,r3,r1); LK(r4,r0,r3,r2,r1,9);
S1(r4,r0,r3,r2,r1); LK(r1,r3,r2,r4,r0,10);
S2(r1,r3,r2,r4,r0); LK(r0,r3,r1,r4,r2,11);
S3(r0,r3,r1,r4,r2); LK(r4,r2,r3,r0,r1,12);
S4(r4,r2,r3,r0,r1); LK(r2,r3,r0,r1,r4,13);
S5(r2,r3,r0,r1,r4); LK(r4,r2,r3,r1,r0,14);
S6(r4,r2,r3,r1,r0); LK(r3,r0,r2,r1,r4,15);
S7(r3,r0,r2,r1,r4); LK(r4,r2,r1,r3,r0,16);
S0(r4,r2,r1,r3,r0); LK(r1,r2,r3,r4,r0,17);
S1(r1,r2,r3,r4,r0); LK(r0,r3,r4,r1,r2,18);
S2(r0,r3,r4,r1,r2); LK(r2,r3,r0,r1,r4,19);
S3(r2,r3,r0,r1,r4); LK(r1,r4,r3,r2,r0,20);
S4(r1,r4,r3,r2,r0); LK(r4,r3,r2,r0,r1,21);
S5(r4,r3,r2,r0,r1); LK(r1,r4,r3,r0,r2,22);
S6(r1,r4,r3,r0,r2); LK(r3,r2,r4,r0,r1,23);
S7(r3,r2,r4,r0,r1); LK(r1,r4,r0,r3,r2,24);
S0(r1,r4,r0,r3,r2); LK(r0,r4,r3,r1,r2,25);
S1(r0,r4,r3,r1,r2); LK(r2,r3,r1,r0,r4,26);
S2(r2,r3,r1,r0,r4); LK(r4,r3,r2,r0,r1,27);
S3(r4,r3,r2,r0,r1); LK(r0,r1,r3,r4,r2,28);
S4(r0,r1,r3,r4,r2); LK(r1,r3,r4,r2,r0,29);
S5(r1,r3,r4,r2,r0); LK(r0,r1,r3,r2,r4,30);
S6(r0,r1,r3,r2,r4); LK(r3,r4,r1,r2,r0,31);
S7(r3,r4,r1,r2,r0); K(r0,r1,r2,r3,32);
d[0] = cpu_to_le32(r0);
d[1] = cpu_to_le32(r1);
d[2] = cpu_to_le32(r2);
d[3] = cpu_to_le32(r3);
}
static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
const u32
*k = ((struct serpent_ctx *)ctx)->expkey;
const __le32 *s = (const __le32 *)src;
__le32 *d = (__le32 *)dst;
u32 r0, r1, r2, r3, r4;
r0 = le32_to_cpu(s[0]);
r1 = le32_to_cpu(s[1]);
r2 = le32_to_cpu(s[2]);
r3 = le32_to_cpu(s[3]);
K(r0,r1,r2,r3,32);
SI7(r0,r1,r2,r3,r4); KL(r1,r3,r0,r4,r2,31);
SI6(r1,r3,r0,r4,r2); KL(r0,r2,r4,r1,r3,30);
SI5(r0,r2,r4,r1,r3); KL(r2,r3,r0,r4,r1,29);
SI4(r2,r3,r0,r4,r1); KL(r2,r0,r1,r4,r3,28);
SI3(r2,r0,r1,r4,r3); KL(r1,r2,r3,r4,r0,27);
SI2(r1,r2,r3,r4,r0); KL(r2,r0,r4,r3,r1,26);
SI1(r2,r0,r4,r3,r1); KL(r1,r0,r4,r3,r2,25);
SI0(r1,r0,r4,r3,r2); KL(r4,r2,r0,r1,r3,24);
SI7(r4,r2,r0,r1,r3); KL(r2,r1,r4,r3,r0,23);
SI6(r2,r1,r4,r3,r0); KL(r4,r0,r3,r2,r1,22);
SI5(r4,r0,r3,r2,r1); KL(r0,r1,r4,r3,r2,21);
SI4(r0,r1,r4,r3,r2); KL(r0,r4,r2,r3,r1,20);
SI3(r0,r4,r2,r3,r1); KL(r2,r0,r1,r3,r4,19);
SI2(r2,r0,r1,r3,r4); KL(r0,r4,r3,r1,r2,18);
SI1(r0,r4,r3,r1,r2); KL(r2,r4,r3,r1,r0,17);
SI0(r2,r4,r3,r1,r0); KL(r3,r0,r4,r2,r1,16);
SI7(r3,r0,r4,r2,r1); KL(r0,r2,r3,r1,r4,15);
SI6(r0,r2,r3,r1,r4); KL(r3,r4,r1,r0,r2,14);
SI5(r3,r4,r1,r0,r2); KL(r4,r2,r3,r1,r0,13);
SI4(r4,r2,r3,r1,r0); KL(r4,r3,r0,r1,r2,12);
SI3(r4,r3,r0,r1,r2); KL(r0,r4,r2,r1,r3,11);
SI2(r0,r4,r2,r1,r3); KL(r4,r3,r1,r2,r0,10);
SI1(r4,r3,r1,r2,r0); KL(r0,r3,r1,r2,r4,9);
SI0(r0,r3,r1,r2,r4); KL(r1,r4,r3,r0,r2,8);
SI7(r1,r4,r3,r0,r2); KL(r4,r0,r1,r2,r3,7);
SI6(r4,r0,r1,r2,r3); KL(r1,r3,r2,r4,r0,6);
SI5(r1,r3,r2,r4,r0); KL(r3,r0,r1,r2,r4,5);
SI4(r3,r0,r1,r2,r4); KL(r3,r1,r4,r2,r0,4);
SI3(r3,r1,r4,r2,r0); KL(r4,r3,r0,r2,r1,3);
SI2(r4,r3,r0,r2,r1); KL(r3,r1,r2,r0,r4,2);
SI1(r3,r1,r2,r0,r4); KL(r4,r1,r2,r0,r3,1);
SI0(r4,r1,r2,r0,r3); K(r2,r3,r1,r4,0);
d[0] = cpu_to_le32(r2);
d[1] = cpu_to_le32(r3);
d[2] = cpu_to_le32(r1);
d[3] = cpu_to_le32(r4);
}
static struct crypto_alg serpent_alg = {
.cra_name = "serpent",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
.cia_max_keysize = SERPENT_MAX_KEY_SIZE,
.cia_setkey = serpent_setkey,
.cia_encrypt = serpent_encrypt,
.cia_decrypt = serpent_decrypt } }
};
static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
u8 rev_key[SERPENT_MAX_KEY_SIZE];
int i;
for (i = 0; i < keylen; ++i)
rev_key[keylen - i - 1] = key[i];
return serpent_setkey(tfm, rev_key, keylen);
}
static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const u32 * const s = (const u32 * const)src;
u32 * const d = (u32 * const)dst;
u32 rs[4], rd[4];
rs[0] = swab32(s[3]);
rs[1] = swab32(s[2]);
rs[2] = swab32(s[1]);
rs[3] = swab32(s[0]);
serpent_encrypt(tfm, (u8 *)rd, (u8 *)rs);
d[0] = swab32(rd[3]);
d[1] = swab32(rd[2]);
d[2] = swab32(rd[1]);
d[3] = swab32(rd[0]);
}
static void tnepres_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const u32 * const s = (const u32 * const)src;
u32 * const d = (u32 * const)dst;
u32 rs[4], rd[4];
rs[0] = swab32(s[3]);
rs[1] = swab32(s[2]);
rs[2] = swab32(s[1]);
rs[3] = swab32(s[0]);
serpent_decrypt(tfm, (u8 *)rd, (u8 *)rs);
d[0] = swab32(rd[3]);
d[1] = swab32(rd[2]);
d[2] = swab32(rd[1]);
d[3] = swab32(rd[0]);
}
static struct crypto_alg tnepres_alg = {
.cra_name = "tnepres",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
.cia_max_keysize = SERPENT_MAX_KEY_SIZE,
.cia_setkey = tnepres_setkey,
.cia_encrypt = tnepres_encrypt,
.cia_decrypt = tnepres_decrypt } }
};
static int __init serpent_mod_init(void)
{
int ret = crypto_register_alg(&serpent_alg);
if (ret)
return ret;
ret = crypto_register_alg(&tnepres_alg);
if (ret)
crypto_unregister_alg(&serpent_alg);
return ret;
}
static void __exit serpent_mod_fini(void)
{
crypto_unregister_alg(&tnepres_alg);
crypto_unregister_alg(&serpent_alg);
}
module_init(serpent_mod_init);
module_exit(serpent_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
MODULE_ALIAS("tnepres");
| gpl-2.0 |
sktjdgns1189/android_kernel_pantech_ef56s | drivers/media/video/timblogiw.c | 4982 | 21326 | /*
* timblogiw.c timberdale FPGA LogiWin Video In driver
* Copyright (c) 2009-2010 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
* Timberdale FPGA LogiWin Video In
*/
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
#include <linux/scatterlist.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <media/videobuf-dma-contig.h>
#include <media/timb_video.h>
#define DRIVER_NAME "timb-video"
#define TIMBLOGIWIN_NAME "Timberdale Video-In"
#define TIMBLOGIW_VERSION_CODE 0x04
#define TIMBLOGIW_LINES_PER_DESC 44
#define TIMBLOGIW_MAX_VIDEO_MEM 16
#define TIMBLOGIW_HAS_DECODER(lw) (lw->pdata.encoder.module_name)
struct timblogiw {
struct video_device video_dev;
struct v4l2_device v4l2_dev; /* mutual exclusion */
struct mutex lock;
struct device *dev;
struct timb_video_platform_data pdata;
struct v4l2_subdev *sd_enc; /* encoder */
bool opened;
};
struct timblogiw_tvnorm {
v4l2_std_id std;
u16 width;
u16 height;
u8 fps;
};
struct timblogiw_fh {
struct videobuf_queue vb_vidq;
struct timblogiw_tvnorm const *cur_norm;
struct list_head capture;
struct dma_chan *chan;
spinlock_t queue_lock; /* mutual exclusion */
unsigned int frame_count;
};
struct timblogiw_buffer {
/* common v4l buffer stuff -- must be first */
struct videobuf_buffer vb;
struct scatterlist sg[16];
dma_cookie_t cookie;
struct timblogiw_fh *fh;
};
const struct timblogiw_tvnorm timblogiw_tvnorms[] = {
{
.std = V4L2_STD_PAL,
.width = 720,
.height = 576,
.fps = 25
},
{
.std = V4L2_STD_NTSC,
.width = 720,
.height = 480,
.fps = 30
}
};
static int timblogiw_bytes_per_line(const struct timblogiw_tvnorm *norm)
{
return norm->width * 2;
}
static int timblogiw_frame_size(const struct timblogiw_tvnorm *norm)
{
return norm->height * timblogiw_bytes_per_line(norm);
}
static const struct timblogiw_tvnorm *timblogiw_get_norm(const v4l2_std_id std)
{
int i;
for (i = 0; i < ARRAY_SIZE(timblogiw_tvnorms); i++)
if (timblogiw_tvnorms[i].std & std)
return timblogiw_tvnorms + i;
/* default to first element */
return timblogiw_tvnorms;
}
static void timblogiw_dma_cb(void *data)
{
struct timblogiw_buffer *buf = data;
struct timblogiw_fh *fh = buf->fh;
struct videobuf_buffer *vb = &buf->vb;
spin_lock(&fh->queue_lock);
/* mark the transfer done */
buf->cookie = -1;
fh->frame_count++;
if (vb->state != VIDEOBUF_ERROR) {
list_del(&vb->queue);
do_gettimeofday(&vb->ts);
vb->field_count = fh->frame_count * 2;
vb->state = VIDEOBUF_DONE;
wake_up(&vb->done);
}
if (!list_empty(&fh->capture)) {
vb = list_entry(fh->capture.next, struct videobuf_buffer,
queue);
vb->state = VIDEOBUF_ACTIVE;
}
spin_unlock(&fh->queue_lock);
}
static bool timblogiw_dma_filter_fn(struct dma_chan *chan, void *filter_param)
{
return chan->chan_id == (uintptr_t)filter_param;
}
/* IOCTL functions */
static int timblogiw_g_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s entry\n", __func__);
if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
mutex_lock(&lw->lock);
format->fmt.pix.width = fh->cur_norm->width;
format->fmt.pix.height = fh->cur_norm->height;
format->fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
format->fmt.pix.bytesperline = timblogiw_bytes_per_line(fh->cur_norm);
format->fmt.pix.sizeimage = timblogiw_frame_size(fh->cur_norm);
format->fmt.pix.field = V4L2_FIELD_NONE;
mutex_unlock(&lw->lock);
return 0;
}
static int timblogiw_try_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_pix_format *pix = &format->fmt.pix;
dev_dbg(&vdev->dev,
"%s - width=%d, height=%d, pixelformat=%d, field=%d\n"
"bytes per line %d, size image: %d, colorspace: %d\n",
__func__,
pix->width, pix->height, pix->pixelformat, pix->field,
pix->bytesperline, pix->sizeimage, pix->colorspace);
if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (pix->field != V4L2_FIELD_NONE)
return -EINVAL;
if (pix->pixelformat != V4L2_PIX_FMT_UYVY)
return -EINVAL;
return 0;
}
static int timblogiw_s_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
struct v4l2_pix_format *pix = &format->fmt.pix;
int err;
mutex_lock(&lw->lock);
err = timblogiw_try_fmt(file, priv, format);
if (err)
goto out;
if (videobuf_queue_is_busy(&fh->vb_vidq)) {
dev_err(&vdev->dev, "%s queue busy\n", __func__);
err = -EBUSY;
goto out;
}
pix->width = fh->cur_norm->width;
pix->height = fh->cur_norm->height;
out:
mutex_unlock(&lw->lock);
return err;
}
static int timblogiw_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
memset(cap, 0, sizeof(*cap));
strncpy(cap->card, TIMBLOGIWIN_NAME, sizeof(cap->card)-1);
strncpy(cap->driver, DRIVER_NAME, sizeof(cap->driver) - 1);
strlcpy(cap->bus_info, vdev->name, sizeof(cap->bus_info));
cap->version = TIMBLOGIW_VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
V4L2_CAP_READWRITE;
return 0;
}
static int timblogiw_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *fmt)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s, index: %d\n", __func__, fmt->index);
if (fmt->index != 0)
return -EINVAL;
memset(fmt, 0, sizeof(*fmt));
fmt->index = 0;
fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
strncpy(fmt->description, "4:2:2, packed, YUYV",
sizeof(fmt->description)-1);
fmt->pixelformat = V4L2_PIX_FMT_UYVY;
return 0;
}
static int timblogiw_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
struct timblogiw_fh *fh = priv;
struct v4l2_captureparm *cp = &sp->parm.capture;
cp->capability = V4L2_CAP_TIMEPERFRAME;
cp->timeperframe.numerator = 1;
cp->timeperframe.denominator = fh->cur_norm->fps;
return 0;
}
static int timblogiw_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_reqbufs(&fh->vb_vidq, rb);
}
static int timblogiw_querybuf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_querybuf(&fh->vb_vidq, b);
}
static int timblogiw_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_qbuf(&fh->vb_vidq, b);
}
static int timblogiw_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
}
static int timblogiw_g_std(struct file *file, void *priv, v4l2_std_id *std)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
*std = fh->cur_norm->std;
return 0;
}
static int timblogiw_s_std(struct file *file, void *priv, v4l2_std_id *std)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
int err = 0;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
mutex_lock(&lw->lock);
if (TIMBLOGIW_HAS_DECODER(lw))
err = v4l2_subdev_call(lw->sd_enc, core, s_std, *std);
if (!err)
fh->cur_norm = timblogiw_get_norm(*std);
mutex_unlock(&lw->lock);
return err;
}
static int timblogiw_enuminput(struct file *file, void *priv,
struct v4l2_input *inp)
{
struct video_device *vdev = video_devdata(file);
int i;
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
if (inp->index != 0)
return -EINVAL;
inp->index = 0;
strncpy(inp->name, "Timb input 1", sizeof(inp->name) - 1);
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = 0;
for (i = 0; i < ARRAY_SIZE(timblogiw_tvnorms); i++)
inp->std |= timblogiw_tvnorms[i].std;
return 0;
}
static int timblogiw_g_input(struct file *file, void *priv,
unsigned int *input)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
*input = 0;
return 0;
}
static int timblogiw_s_input(struct file *file, void *priv, unsigned int input)
{
struct video_device *vdev = video_devdata(file);
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
if (input != 0)
return -EINVAL;
return 0;
}
static int timblogiw_streamon(struct file *file, void *priv, unsigned int type)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
dev_dbg(&vdev->dev, "%s - No capture device\n", __func__);
return -EINVAL;
}
fh->frame_count = 0;
return videobuf_streamon(&fh->vb_vidq);
}
static int timblogiw_streamoff(struct file *file, void *priv,
unsigned int type)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s entry\n", __func__);
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
return videobuf_streamoff(&fh->vb_vidq);
}
static int timblogiw_querystd(struct file *file, void *priv, v4l2_std_id *std)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s entry\n", __func__);
if (TIMBLOGIW_HAS_DECODER(lw))
return v4l2_subdev_call(lw->sd_enc, video, querystd, std);
else {
*std = fh->cur_norm->std;
return 0;
}
}
static int timblogiw_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = priv;
dev_dbg(&vdev->dev, "%s - index: %d, format: %d\n", __func__,
fsize->index, fsize->pixel_format);
if ((fsize->index != 0) ||
(fsize->pixel_format != V4L2_PIX_FMT_UYVY))
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = fh->cur_norm->width;
fsize->discrete.height = fh->cur_norm->height;
return 0;
}
/* Video buffer functions */
static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
struct timblogiw_fh *fh = vq->priv_data;
*size = timblogiw_frame_size(fh->cur_norm);
if (!*count)
*count = 32;
while (*size * *count > TIMBLOGIW_MAX_VIDEO_MEM * 1024 * 1024)
(*count)--;
return 0;
}
static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct timblogiw_fh *fh = vq->priv_data;
struct timblogiw_buffer *buf = container_of(vb, struct timblogiw_buffer,
vb);
unsigned int data_size = timblogiw_frame_size(fh->cur_norm);
int err = 0;
if (vb->baddr && vb->bsize < data_size)
/* User provided buffer, but it is too small */
return -ENOMEM;
vb->size = data_size;
vb->width = fh->cur_norm->width;
vb->height = fh->cur_norm->height;
vb->field = field;
if (vb->state == VIDEOBUF_NEEDS_INIT) {
int i;
unsigned int size;
unsigned int bytes_per_desc = TIMBLOGIW_LINES_PER_DESC *
timblogiw_bytes_per_line(fh->cur_norm);
dma_addr_t addr;
sg_init_table(buf->sg, ARRAY_SIZE(buf->sg));
err = videobuf_iolock(vq, vb, NULL);
if (err)
goto err;
addr = videobuf_to_dma_contig(vb);
for (i = 0, size = 0; size < data_size; i++) {
sg_dma_address(buf->sg + i) = addr + size;
size += bytes_per_desc;
sg_dma_len(buf->sg + i) = (size > data_size) ?
(bytes_per_desc - (size - data_size)) :
bytes_per_desc;
}
vb->state = VIDEOBUF_PREPARED;
buf->cookie = -1;
buf->fh = fh;
}
return 0;
err:
videobuf_dma_contig_free(vq, vb);
vb->state = VIDEOBUF_NEEDS_INIT;
return err;
}
static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
struct timblogiw_fh *fh = vq->priv_data;
struct timblogiw_buffer *buf = container_of(vb, struct timblogiw_buffer,
vb);
struct dma_async_tx_descriptor *desc;
int sg_elems;
int bytes_per_desc = TIMBLOGIW_LINES_PER_DESC *
timblogiw_bytes_per_line(fh->cur_norm);
sg_elems = timblogiw_frame_size(fh->cur_norm) / bytes_per_desc;
sg_elems +=
(timblogiw_frame_size(fh->cur_norm) % bytes_per_desc) ? 1 : 0;
if (list_empty(&fh->capture))
vb->state = VIDEOBUF_ACTIVE;
else
vb->state = VIDEOBUF_QUEUED;
list_add_tail(&vb->queue, &fh->capture);
spin_unlock_irq(&fh->queue_lock);
desc = dmaengine_prep_slave_sg(fh->chan,
buf->sg, sg_elems, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!desc) {
spin_lock_irq(&fh->queue_lock);
list_del_init(&vb->queue);
vb->state = VIDEOBUF_PREPARED;
return;
}
desc->callback_param = buf;
desc->callback = timblogiw_dma_cb;
buf->cookie = desc->tx_submit(desc);
spin_lock_irq(&fh->queue_lock);
}
static void buffer_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct timblogiw_fh *fh = vq->priv_data;
struct timblogiw_buffer *buf = container_of(vb, struct timblogiw_buffer,
vb);
videobuf_waiton(vq, vb, 0, 0);
if (buf->cookie >= 0)
dma_sync_wait(fh->chan, buf->cookie);
videobuf_dma_contig_free(vq, vb);
vb->state = VIDEOBUF_NEEDS_INIT;
}
static struct videobuf_queue_ops timblogiw_video_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
.buf_release = buffer_release,
};
/* Device Operations functions */
static int timblogiw_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh;
v4l2_std_id std;
dma_cap_mask_t mask;
int err = 0;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
mutex_lock(&lw->lock);
if (lw->opened) {
err = -EBUSY;
goto out;
}
if (TIMBLOGIW_HAS_DECODER(lw) && !lw->sd_enc) {
struct i2c_adapter *adapt;
/* find the video decoder */
adapt = i2c_get_adapter(lw->pdata.i2c_adapter);
if (!adapt) {
dev_err(&vdev->dev, "No I2C bus #%d\n",
lw->pdata.i2c_adapter);
err = -ENODEV;
goto out;
}
/* now find the encoder */
lw->sd_enc = v4l2_i2c_new_subdev_board(&lw->v4l2_dev, adapt,
lw->pdata.encoder.info, NULL);
i2c_put_adapter(adapt);
if (!lw->sd_enc) {
dev_err(&vdev->dev, "Failed to get encoder: %s\n",
lw->pdata.encoder.module_name);
err = -ENODEV;
goto out;
}
}
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
if (!fh) {
err = -ENOMEM;
goto out;
}
fh->cur_norm = timblogiw_tvnorms;
timblogiw_querystd(file, fh, &std);
fh->cur_norm = timblogiw_get_norm(std);
INIT_LIST_HEAD(&fh->capture);
spin_lock_init(&fh->queue_lock);
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
dma_cap_set(DMA_PRIVATE, mask);
/* find the DMA channel */
fh->chan = dma_request_channel(mask, timblogiw_dma_filter_fn,
(void *)(uintptr_t)lw->pdata.dma_channel);
if (!fh->chan) {
dev_err(&vdev->dev, "Failed to get DMA channel\n");
kfree(fh);
err = -ENODEV;
goto out;
}
file->private_data = fh;
videobuf_queue_dma_contig_init(&fh->vb_vidq,
&timblogiw_video_qops, lw->dev, &fh->queue_lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
sizeof(struct timblogiw_buffer), fh, NULL);
lw->opened = true;
out:
mutex_unlock(&lw->lock);
return err;
}
static int timblogiw_close(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw *lw = video_get_drvdata(vdev);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
videobuf_stop(&fh->vb_vidq);
videobuf_mmap_free(&fh->vb_vidq);
dma_release_channel(fh->chan);
kfree(fh);
mutex_lock(&lw->lock);
lw->opened = false;
mutex_unlock(&lw->lock);
return 0;
}
static ssize_t timblogiw_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_read_stream(&fh->vb_vidq, data, count, ppos, 0,
file->f_flags & O_NONBLOCK);
}
static unsigned int timblogiw_poll(struct file *file,
struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_poll_stream(file, &fh->vb_vidq, wait);
}
static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
{
struct video_device *vdev = video_devdata(file);
struct timblogiw_fh *fh = file->private_data;
dev_dbg(&vdev->dev, "%s: entry\n", __func__);
return videobuf_mmap_mapper(&fh->vb_vidq, vma);
}
/* Platform device functions */
static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
.vidioc_querycap = timblogiw_querycap,
.vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
.vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
.vidioc_try_fmt_vid_cap = timblogiw_try_fmt,
.vidioc_s_fmt_vid_cap = timblogiw_s_fmt,
.vidioc_g_parm = timblogiw_g_parm,
.vidioc_reqbufs = timblogiw_reqbufs,
.vidioc_querybuf = timblogiw_querybuf,
.vidioc_qbuf = timblogiw_qbuf,
.vidioc_dqbuf = timblogiw_dqbuf,
.vidioc_g_std = timblogiw_g_std,
.vidioc_s_std = timblogiw_s_std,
.vidioc_enum_input = timblogiw_enuminput,
.vidioc_g_input = timblogiw_g_input,
.vidioc_s_input = timblogiw_s_input,
.vidioc_streamon = timblogiw_streamon,
.vidioc_streamoff = timblogiw_streamoff,
.vidioc_querystd = timblogiw_querystd,
.vidioc_enum_framesizes = timblogiw_enum_framesizes,
};
static __devinitconst struct v4l2_file_operations timblogiw_fops = {
.owner = THIS_MODULE,
.open = timblogiw_open,
.release = timblogiw_close,
.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
.mmap = timblogiw_mmap,
.read = timblogiw_read,
.poll = timblogiw_poll,
};
static __devinitconst struct video_device timblogiw_template = {
.name = TIMBLOGIWIN_NAME,
.fops = &timblogiw_fops,
.ioctl_ops = &timblogiw_ioctl_ops,
.release = video_device_release_empty,
.minor = -1,
.tvnorms = V4L2_STD_PAL | V4L2_STD_NTSC
};
static int __devinit timblogiw_probe(struct platform_device *pdev)
{
int err;
struct timblogiw *lw = NULL;
struct timb_video_platform_data *pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "No platform data\n");
err = -EINVAL;
goto err;
}
if (!pdata->encoder.module_name)
dev_info(&pdev->dev, "Running without decoder\n");
lw = kzalloc(sizeof(*lw), GFP_KERNEL);
if (!lw) {
err = -ENOMEM;
goto err;
}
if (pdev->dev.parent)
lw->dev = pdev->dev.parent;
else
lw->dev = &pdev->dev;
memcpy(&lw->pdata, pdata, sizeof(lw->pdata));
mutex_init(&lw->lock);
lw->video_dev = timblogiw_template;
strlcpy(lw->v4l2_dev.name, DRIVER_NAME, sizeof(lw->v4l2_dev.name));
err = v4l2_device_register(NULL, &lw->v4l2_dev);
if (err)
goto err_register;
lw->video_dev.v4l2_dev = &lw->v4l2_dev;
platform_set_drvdata(pdev, lw);
video_set_drvdata(&lw->video_dev, lw);
err = video_register_device(&lw->video_dev, VFL_TYPE_GRABBER, 0);
if (err) {
dev_err(&pdev->dev, "Error reg video: %d\n", err);
goto err_request;
}
return 0;
err_request:
platform_set_drvdata(pdev, NULL);
v4l2_device_unregister(&lw->v4l2_dev);
err_register:
kfree(lw);
err:
dev_err(&pdev->dev, "Failed to register: %d\n", err);
return err;
}
static int __devexit timblogiw_remove(struct platform_device *pdev)
{
struct timblogiw *lw = platform_get_drvdata(pdev);
video_unregister_device(&lw->video_dev);
v4l2_device_unregister(&lw->v4l2_dev);
kfree(lw);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver timblogiw_platform_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
.probe = timblogiw_probe,
.remove = __devexit_p(timblogiw_remove),
};
module_platform_driver(timblogiw_platform_driver);
MODULE_DESCRIPTION(TIMBLOGIWIN_NAME);
MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:"DRIVER_NAME);
| gpl-2.0 |
McBane87/Sony_Tablet_Z_KK.283_Kernel | drivers/message/fusion/mptscsih.c | 5238 | 95165 | /*
* linux/drivers/message/fusion/mptscsih.c
* For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
NO WARRANTY
THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
solely responsible for determining the appropriateness of using and
distributing the Program and assumes all risks associated with its
exercise of rights under this Agreement, including but not limited to
the risks and costs of program errors, damage to or loss of data,
programs or equipment, and unavailability or interruption of operations.
DISCLAIMER OF LIABILITY
NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/delay.h> /* for mdelay */
#include <linux/interrupt.h> /* needed for in_interrupt() proto */
#include <linux/reboot.h> /* notifier code */
#include <linux/workqueue.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_dbg.h>
#include "mptbase.h"
#include "mptscsih.h"
#include "lsi/mpi_log_sas.h"
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#define my_NAME "Fusion MPT SCSI Host driver"
#define my_VERSION MPT_LINUX_VERSION_COMMON
#define MYNAM "mptscsih"
MODULE_AUTHOR(MODULEAUTHOR);
MODULE_DESCRIPTION(my_NAME);
MODULE_LICENSE("GPL");
MODULE_VERSION(my_VERSION);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Other private/forward protos...
*/
struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i);
static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd);
static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd);
int mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
static void mptscsih_report_queue_full(struct scsi_cmnd *sc, SCSIIOReply_t *pScsiReply, SCSIIORequest_t *pScsiReq);
int mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
static int mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
SCSIIORequest_t *pReq, int req_idx);
static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id,
int lun, int ctx2abort, ulong timeout);
int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
void
mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
static int mptscsih_get_completion_code(MPT_ADAPTER *ioc,
MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice);
static int
mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
SCSITaskMgmtReply_t *pScsiTmReply);
void mptscsih_remove(struct pci_dev *);
void mptscsih_shutdown(struct pci_dev *);
#ifdef CONFIG_PM
int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state);
int mptscsih_resume(struct pci_dev *pdev);
#endif
#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_getFreeChainBuffer - Function to get a free chain
* from the MPT_SCSI_HOST FreeChainQ.
* @ioc: Pointer to MPT_ADAPTER structure
* @req_idx: Index of the SCSI IO request frame. (output)
*
* return SUCCESS or FAILED
*/
static inline int
mptscsih_getFreeChainBuffer(MPT_ADAPTER *ioc, int *retIndex)
{
MPT_FRAME_HDR *chainBuf;
unsigned long flags;
int rc;
int chain_idx;
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "getFreeChainBuffer called\n",
ioc->name));
spin_lock_irqsave(&ioc->FreeQlock, flags);
if (!list_empty(&ioc->FreeChainQ)) {
int offset;
chainBuf = list_entry(ioc->FreeChainQ.next, MPT_FRAME_HDR,
u.frame.linkage.list);
list_del(&chainBuf->u.frame.linkage.list);
offset = (u8 *)chainBuf - (u8 *)ioc->ChainBuffer;
chain_idx = offset / ioc->req_sz;
rc = SUCCESS;
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"getFreeChainBuffer chainBuf=%p ChainBuffer=%p offset=%d chain_idx=%d\n",
ioc->name, chainBuf, ioc->ChainBuffer, offset, chain_idx));
} else {
rc = FAILED;
chain_idx = MPT_HOST_NO_CHAIN;
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "getFreeChainBuffer failed\n",
ioc->name));
}
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
*retIndex = chain_idx;
return rc;
} /* mptscsih_getFreeChainBuffer() */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_AddSGE - Add a SGE (plus chain buffers) to the
* SCSIIORequest_t Message Frame.
* @ioc: Pointer to MPT_ADAPTER structure
* @SCpnt: Pointer to scsi_cmnd structure
* @pReq: Pointer to SCSIIORequest_t structure
*
* Returns ...
*/
static int
mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
SCSIIORequest_t *pReq, int req_idx)
{
char *psge;
char *chainSge;
struct scatterlist *sg;
int frm_sz;
int sges_left, sg_done;
int chain_idx = MPT_HOST_NO_CHAIN;
int sgeOffset;
int numSgeSlots, numSgeThisFrame;
u32 sgflags, sgdir, thisxfer = 0;
int chain_dma_off = 0;
int newIndex;
int ii;
dma_addr_t v2;
u32 RequestNB;
sgdir = le32_to_cpu(pReq->Control) & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
if (sgdir == MPI_SCSIIO_CONTROL_WRITE) {
sgdir = MPT_TRANSFER_HOST_TO_IOC;
} else {
sgdir = MPT_TRANSFER_IOC_TO_HOST;
}
psge = (char *) &pReq->SGL;
frm_sz = ioc->req_sz;
/* Map the data portion, if any.
* sges_left = 0 if no data transfer.
*/
sges_left = scsi_dma_map(SCpnt);
if (sges_left < 0)
return FAILED;
/* Handle the SG case.
*/
sg = scsi_sglist(SCpnt);
sg_done = 0;
sgeOffset = sizeof(SCSIIORequest_t) - sizeof(SGE_IO_UNION);
chainSge = NULL;
/* Prior to entering this loop - the following must be set
* current MF: sgeOffset (bytes)
* chainSge (Null if original MF is not a chain buffer)
* sg_done (num SGE done for this MF)
*/
nextSGEset:
numSgeSlots = ((frm_sz - sgeOffset) / ioc->SGE_size);
numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots;
sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | sgdir;
/* Get first (num - 1) SG elements
* Skip any SG entries with a length of 0
* NOTE: at finish, sg and psge pointed to NEXT data/location positions
*/
for (ii=0; ii < (numSgeThisFrame-1); ii++) {
thisxfer = sg_dma_len(sg);
if (thisxfer == 0) {
/* Get next SG element from the OS */
sg = sg_next(sg);
sg_done++;
continue;
}
v2 = sg_dma_address(sg);
ioc->add_sge(psge, sgflags | thisxfer, v2);
/* Get next SG element from the OS */
sg = sg_next(sg);
psge += ioc->SGE_size;
sgeOffset += ioc->SGE_size;
sg_done++;
}
if (numSgeThisFrame == sges_left) {
/* Add last element, end of buffer and end of list flags.
*/
sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT |
MPT_SGE_FLAGS_END_OF_BUFFER |
MPT_SGE_FLAGS_END_OF_LIST;
/* Add last SGE and set termination flags.
* Note: Last SGE may have a length of 0 - which should be ok.
*/
thisxfer = sg_dma_len(sg);
v2 = sg_dma_address(sg);
ioc->add_sge(psge, sgflags | thisxfer, v2);
sgeOffset += ioc->SGE_size;
sg_done++;
if (chainSge) {
/* The current buffer is a chain buffer,
* but there is not another one.
* Update the chain element
* Offset and Length fields.
*/
ioc->add_chain((char *)chainSge, 0, sgeOffset,
ioc->ChainBufferDMA + chain_dma_off);
} else {
/* The current buffer is the original MF
* and there is no Chain buffer.
*/
pReq->ChainOffset = 0;
RequestNB = (((sgeOffset - 1) >> ioc->NBShiftFactor) + 1) & 0x03;
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Single Buffer RequestNB=%x, sgeOffset=%d\n", ioc->name, RequestNB, sgeOffset));
ioc->RequestNB[req_idx] = RequestNB;
}
} else {
/* At least one chain buffer is needed.
* Complete the first MF
* - last SGE element, set the LastElement bit
* - set ChainOffset (words) for orig MF
* (OR finish previous MF chain buffer)
* - update MFStructPtr ChainIndex
* - Populate chain element
* Also
* Loop until done.
*/
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SG: Chain Required! sg done %d\n",
ioc->name, sg_done));
/* Set LAST_ELEMENT flag for last non-chain element
* in the buffer. Since psge points at the NEXT
* SGE element, go back one SGE element, update the flags
* and reset the pointer. (Note: sgflags & thisxfer are already
* set properly).
*/
if (sg_done) {
u32 *ptmp = (u32 *) (psge - ioc->SGE_size);
sgflags = le32_to_cpu(*ptmp);
sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT;
*ptmp = cpu_to_le32(sgflags);
}
if (chainSge) {
/* The current buffer is a chain buffer.
* chainSge points to the previous Chain Element.
* Update its chain element Offset and Length (must
* include chain element size) fields.
* Old chain element is now complete.
*/
u8 nextChain = (u8) (sgeOffset >> 2);
sgeOffset += ioc->SGE_size;
ioc->add_chain((char *)chainSge, nextChain, sgeOffset,
ioc->ChainBufferDMA + chain_dma_off);
} else {
/* The original MF buffer requires a chain buffer -
* set the offset.
* Last element in this MF is a chain element.
*/
pReq->ChainOffset = (u8) (sgeOffset >> 2);
RequestNB = (((sgeOffset - 1) >> ioc->NBShiftFactor) + 1) & 0x03;
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Chain Buffer Needed, RequestNB=%x sgeOffset=%d\n", ioc->name, RequestNB, sgeOffset));
ioc->RequestNB[req_idx] = RequestNB;
}
sges_left -= sg_done;
/* NOTE: psge points to the beginning of the chain element
* in current buffer. Get a chain buffer.
*/
if ((mptscsih_getFreeChainBuffer(ioc, &newIndex)) == FAILED) {
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"getFreeChainBuffer FAILED SCSI cmd=%02x (%p)\n",
ioc->name, pReq->CDB[0], SCpnt));
return FAILED;
}
/* Update the tracking arrays.
* If chainSge == NULL, update ReqToChain, else ChainToChain
*/
if (chainSge) {
ioc->ChainToChain[chain_idx] = newIndex;
} else {
ioc->ReqToChain[req_idx] = newIndex;
}
chain_idx = newIndex;
chain_dma_off = ioc->req_sz * chain_idx;
/* Populate the chainSGE for the current buffer.
* - Set chain buffer pointer to psge and fill
* out the Address and Flags fields.
*/
chainSge = (char *) psge;
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT " Current buff @ %p (index 0x%x)",
ioc->name, psge, req_idx));
/* Start the SGE for the next buffer
*/
psge = (char *) (ioc->ChainBuffer + chain_dma_off);
sgeOffset = 0;
sg_done = 0;
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT " Chain buff @ %p (index 0x%x)\n",
ioc->name, psge, chain_idx));
/* Start the SGE for the next buffer
*/
goto nextSGEset;
}
return SUCCESS;
} /* mptscsih_AddSGE() */
static void
mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget,
U32 SlotStatus)
{
MPT_FRAME_HDR *mf;
SEPRequest_t *SEPMsg;
if (ioc->bus_type != SAS)
return;
/* Not supported for hidden raid components
*/
if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
return;
if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: no msg frames!!\n",
ioc->name,__func__));
return;
}
SEPMsg = (SEPRequest_t *)mf;
SEPMsg->Function = MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
SEPMsg->Bus = vtarget->channel;
SEPMsg->TargetID = vtarget->id;
SEPMsg->Action = MPI_SEP_REQ_ACTION_WRITE_STATUS;
SEPMsg->SlotStatus = SlotStatus;
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Sending SEP cmd=%x channel=%d id=%d\n",
ioc->name, SlotStatus, SEPMsg->Bus, SEPMsg->TargetID));
mpt_put_msg_frame(ioc->DoneCtx, ioc, mf);
}
#ifdef CONFIG_FUSION_LOGGING
/**
* mptscsih_info_scsiio - debug print info on reply frame
* @ioc: Pointer to MPT_ADAPTER structure
* @sc: original scsi cmnd pointer
* @pScsiReply: Pointer to MPT reply frame
*
* MPT_DEBUG_REPLY needs to be enabled to obtain this info
*
* Refer to lsi/mpi.h.
**/
static void
mptscsih_info_scsiio(MPT_ADAPTER *ioc, struct scsi_cmnd *sc, SCSIIOReply_t * pScsiReply)
{
char *desc = NULL;
char *desc1 = NULL;
u16 ioc_status;
u8 skey, asc, ascq;
ioc_status = le16_to_cpu(pScsiReply->IOCStatus) & MPI_IOCSTATUS_MASK;
switch (ioc_status) {
case MPI_IOCSTATUS_SUCCESS:
desc = "success";
break;
case MPI_IOCSTATUS_SCSI_INVALID_BUS:
desc = "invalid bus";
break;
case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
desc = "invalid target_id";
break;
case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
desc = "device not there";
break;
case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
desc = "data overrun";
break;
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
desc = "data underrun";
break;
case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
desc = "I/O data error";
break;
case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
desc = "protocol error";
break;
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
desc = "task terminated";
break;
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
desc = "residual mismatch";
break;
case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
desc = "task management failed";
break;
case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
desc = "IOC terminated";
break;
case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
desc = "ext terminated";
break;
default:
desc = "";
break;
}
switch (pScsiReply->SCSIStatus)
{
case MPI_SCSI_STATUS_SUCCESS:
desc1 = "success";
break;
case MPI_SCSI_STATUS_CHECK_CONDITION:
desc1 = "check condition";
break;
case MPI_SCSI_STATUS_CONDITION_MET:
desc1 = "condition met";
break;
case MPI_SCSI_STATUS_BUSY:
desc1 = "busy";
break;
case MPI_SCSI_STATUS_INTERMEDIATE:
desc1 = "intermediate";
break;
case MPI_SCSI_STATUS_INTERMEDIATE_CONDMET:
desc1 = "intermediate condmet";
break;
case MPI_SCSI_STATUS_RESERVATION_CONFLICT:
desc1 = "reservation conflict";
break;
case MPI_SCSI_STATUS_COMMAND_TERMINATED:
desc1 = "command terminated";
break;
case MPI_SCSI_STATUS_TASK_SET_FULL:
desc1 = "task set full";
break;
case MPI_SCSI_STATUS_ACA_ACTIVE:
desc1 = "aca active";
break;
case MPI_SCSI_STATUS_FCPEXT_DEVICE_LOGGED_OUT:
desc1 = "fcpext device logged out";
break;
case MPI_SCSI_STATUS_FCPEXT_NO_LINK:
desc1 = "fcpext no link";
break;
case MPI_SCSI_STATUS_FCPEXT_UNASSIGNED:
desc1 = "fcpext unassigned";
break;
default:
desc1 = "";
break;
}
scsi_print_command(sc);
printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d, lun = %d\n",
ioc->name, pScsiReply->Bus, pScsiReply->TargetID, sc->device->lun);
printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, "
"resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow,
scsi_get_resid(sc));
printk(MYIOC_s_DEBUG_FMT "\ttag = %d, transfer_count = %d, "
"sc->result = %08X\n", ioc->name, le16_to_cpu(pScsiReply->TaskTag),
le32_to_cpu(pScsiReply->TransferCount), sc->result);
printk(MYIOC_s_DEBUG_FMT "\tiocstatus = %s (0x%04x), "
"scsi_status = %s (0x%02x), scsi_state = (0x%02x)\n",
ioc->name, desc, ioc_status, desc1, pScsiReply->SCSIStatus,
pScsiReply->SCSIState);
if (pScsiReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
skey = sc->sense_buffer[2] & 0x0F;
asc = sc->sense_buffer[12];
ascq = sc->sense_buffer[13];
printk(MYIOC_s_DEBUG_FMT "\t[sense_key,asc,ascq]: "
"[0x%02x,0x%02x,0x%02x]\n", ioc->name, skey, asc, ascq);
}
/*
* Look for + dump FCP ResponseInfo[]!
*/
if (pScsiReply->SCSIState & MPI_SCSI_STATE_RESPONSE_INFO_VALID &&
pScsiReply->ResponseInfo)
printk(MYIOC_s_DEBUG_FMT "response_info = %08xh\n",
ioc->name, le32_to_cpu(pScsiReply->ResponseInfo));
}
#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_io_done - Main SCSI IO callback routine registered to
* Fusion MPT (base) driver
* @ioc: Pointer to MPT_ADAPTER structure
* @mf: Pointer to original MPT request frame
* @r: Pointer to MPT reply frame (NULL if TurboReply)
*
* This routine is called from mpt.c::mpt_interrupt() at the completion
* of any SCSI IO request.
* This routine is registered with the Fusion MPT (base) driver at driver
* load/init time via the mpt_register() API call.
*
* Returns 1 indicating alloc'd request frame ptr should be freed.
*/
int
mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
{
struct scsi_cmnd *sc;
MPT_SCSI_HOST *hd;
SCSIIORequest_t *pScsiReq;
SCSIIOReply_t *pScsiReply;
u16 req_idx, req_idx_MR;
VirtDevice *vdevice;
VirtTarget *vtarget;
hd = shost_priv(ioc->sh);
req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
req_idx_MR = (mr != NULL) ?
le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx;
/* Special case, where already freed message frame is received from
* Firmware. It happens with Resetting IOC.
* Return immediately. Do not care
*/
if ((req_idx != req_idx_MR) ||
(le32_to_cpu(mf->u.frame.linkage.arg1) == 0xdeadbeaf))
return 0;
sc = mptscsih_getclear_scsi_lookup(ioc, req_idx);
if (sc == NULL) {
MPIHeader_t *hdr = (MPIHeader_t *)mf;
/* Remark: writeSDP1 will use the ScsiDoneCtx
* If a SCSI I/O cmd, device disabled by OS and
* completion done. Cannot touch sc struct. Just free mem.
*/
if (hdr->Function == MPI_FUNCTION_SCSI_IO_REQUEST)
printk(MYIOC_s_ERR_FMT "NULL ScsiCmd ptr!\n",
ioc->name);
mptscsih_freeChainBuffers(ioc, req_idx);
return 1;
}
if ((unsigned char *)mf != sc->host_scribble) {
mptscsih_freeChainBuffers(ioc, req_idx);
return 1;
}
if (ioc->bus_type == SAS) {
VirtDevice *vdevice = sc->device->hostdata;
if (!vdevice || !vdevice->vtarget ||
vdevice->vtarget->deleted) {
sc->result = DID_NO_CONNECT << 16;
goto out;
}
}
sc->host_scribble = NULL;
sc->result = DID_OK << 16; /* Set default reply as OK */
pScsiReq = (SCSIIORequest_t *) mf;
pScsiReply = (SCSIIOReply_t *) mr;
if((ioc->facts.MsgVersion >= MPI_VERSION_01_05) && pScsiReply){
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d,task-tag=%d)\n",
ioc->name, mf, mr, sc, req_idx, pScsiReply->TaskTag));
}else{
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d)\n",
ioc->name, mf, mr, sc, req_idx));
}
if (pScsiReply == NULL) {
/* special context reply handling */
;
} else {
u32 xfer_cnt;
u16 status;
u8 scsi_state, scsi_status;
u32 log_info;
status = le16_to_cpu(pScsiReply->IOCStatus) & MPI_IOCSTATUS_MASK;
scsi_state = pScsiReply->SCSIState;
scsi_status = pScsiReply->SCSIStatus;
xfer_cnt = le32_to_cpu(pScsiReply->TransferCount);
scsi_set_resid(sc, scsi_bufflen(sc) - xfer_cnt);
log_info = le32_to_cpu(pScsiReply->IOCLogInfo);
/*
* if we get a data underrun indication, yet no data was
* transferred and the SCSI status indicates that the
* command was never started, change the data underrun
* to success
*/
if (status == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
(scsi_status == MPI_SCSI_STATUS_BUSY ||
scsi_status == MPI_SCSI_STATUS_RESERVATION_CONFLICT ||
scsi_status == MPI_SCSI_STATUS_TASK_SET_FULL)) {
status = MPI_IOCSTATUS_SUCCESS;
}
if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)
mptscsih_copy_sense_data(sc, hd, mf, pScsiReply);
/*
* Look for + dump FCP ResponseInfo[]!
*/
if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID &&
pScsiReply->ResponseInfo) {
printk(MYIOC_s_NOTE_FMT "[%d:%d:%d:%d] "
"FCP_ResponseInfo=%08xh\n", ioc->name,
sc->device->host->host_no, sc->device->channel,
sc->device->id, sc->device->lun,
le32_to_cpu(pScsiReply->ResponseInfo));
}
switch(status) {
case MPI_IOCSTATUS_BUSY: /* 0x0002 */
case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */
/* CHECKME!
* Maybe: DRIVER_BUSY | SUGGEST_RETRY | DID_SOFT_ERROR (retry)
* But not: DID_BUS_BUSY lest one risk
* killing interrupt handler:-(
*/
sc->result = SAM_STAT_BUSY;
break;
case MPI_IOCSTATUS_SCSI_INVALID_BUS: /* 0x0041 */
case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: /* 0x0042 */
sc->result = DID_BAD_TARGET << 16;
break;
case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
/* Spoof to SCSI Selection Timeout! */
if (ioc->bus_type != FC)
sc->result = DID_NO_CONNECT << 16;
/* else fibre, just stall until rescan event */
else
sc->result = DID_REQUEUE << 16;
if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF)
hd->sel_timeout[pScsiReq->TargetID]++;
vdevice = sc->device->hostdata;
if (!vdevice)
break;
vtarget = vdevice->vtarget;
if (vtarget->tflags & MPT_TARGET_FLAGS_LED_ON) {
mptscsih_issue_sep_command(ioc, vtarget,
MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED);
vtarget->tflags &= ~MPT_TARGET_FLAGS_LED_ON;
}
break;
case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
if ( ioc->bus_type == SAS ) {
u16 ioc_status =
le16_to_cpu(pScsiReply->IOCStatus);
if ((ioc_status &
MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
&&
((log_info & SAS_LOGINFO_MASK) ==
SAS_LOGINFO_NEXUS_LOSS)) {
VirtDevice *vdevice =
sc->device->hostdata;
/* flag the device as being in
* device removal delay so we can
* notify the midlayer to hold off
* on timeout eh */
if (vdevice && vdevice->
vtarget &&
vdevice->vtarget->
raidVolume)
printk(KERN_INFO
"Skipping Raid Volume"
"for inDMD\n");
else if (vdevice &&
vdevice->vtarget)
vdevice->vtarget->
inDMD = 1;
sc->result =
(DID_TRANSPORT_DISRUPTED
<< 16);
break;
}
} else if (ioc->bus_type == FC) {
/*
* The FC IOC may kill a request for variety of
* reasons, some of which may be recovered by a
* retry, some which are unlikely to be
* recovered. Return DID_ERROR instead of
* DID_RESET to permit retry of the command,
* just not an infinite number of them
*/
sc->result = DID_ERROR << 16;
break;
}
/*
* Allow non-SAS & non-NEXUS_LOSS to drop into below code
*/
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
/* Linux handles an unsolicited DID_RESET better
* than an unsolicited DID_ABORT.
*/
sc->result = DID_RESET << 16;
case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
if (ioc->bus_type == FC)
sc->result = DID_ERROR << 16;
else
sc->result = DID_RESET << 16;
break;
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
scsi_set_resid(sc, scsi_bufflen(sc) - xfer_cnt);
if((xfer_cnt==0)||(sc->underflow > xfer_cnt))
sc->result=DID_SOFT_ERROR << 16;
else /* Sufficient data transfer occurred */
sc->result = (DID_OK << 16) | scsi_status;
dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"RESIDUAL_MISMATCH: result=%x on channel=%d id=%d\n",
ioc->name, sc->result, sc->device->channel, sc->device->id));
break;
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
/*
* Do upfront check for valid SenseData and give it
* precedence!
*/
sc->result = (DID_OK << 16) | scsi_status;
if (!(scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
/*
* For an Errata on LSI53C1030
* When the length of request data
* and transfer data are different
* with result of command (READ or VERIFY),
* DID_SOFT_ERROR is set.
*/
if (ioc->bus_type == SPI) {
if ((pScsiReq->CDB[0] == READ_6 && ((pScsiReq->CDB[1] & 0x02) == 0)) ||
pScsiReq->CDB[0] == READ_10 ||
pScsiReq->CDB[0] == READ_12 ||
(pScsiReq->CDB[0] == READ_16 &&
((pScsiReq->CDB[1] & 0x02) == 0)) ||
pScsiReq->CDB[0] == VERIFY ||
pScsiReq->CDB[0] == VERIFY_16) {
if (scsi_bufflen(sc) !=
xfer_cnt) {
sc->result =
DID_SOFT_ERROR << 16;
printk(KERN_WARNING "Errata"
"on LSI53C1030 occurred."
"sc->req_bufflen=0x%02x,"
"xfer_cnt=0x%02x\n",
scsi_bufflen(sc),
xfer_cnt);
}
}
}
if (xfer_cnt < sc->underflow) {
if (scsi_status == SAM_STAT_BUSY)
sc->result = SAM_STAT_BUSY;
else
sc->result = DID_SOFT_ERROR << 16;
}
if (scsi_state & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) {
/* What to do?
*/
sc->result = DID_SOFT_ERROR << 16;
}
else if (scsi_state & MPI_SCSI_STATE_TERMINATED) {
/* Not real sure here either... */
sc->result = DID_RESET << 16;
}
}
dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT
" sc->underflow={report ERR if < %02xh bytes xfer'd}\n",
ioc->name, sc->underflow));
dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT
" ActBytesXferd=%02xh\n", ioc->name, xfer_cnt));
/* Report Queue Full
*/
if (scsi_status == MPI_SCSI_STATUS_TASK_SET_FULL)
mptscsih_report_queue_full(sc, pScsiReply, pScsiReq);
break;
case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */
scsi_set_resid(sc, 0);
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
sc->result = (DID_OK << 16) | scsi_status;
if (scsi_state == 0) {
;
} else if (scsi_state &
MPI_SCSI_STATE_AUTOSENSE_VALID) {
/*
* For potential trouble on LSI53C1030.
* (date:2007.xx.)
* It is checked whether the length of
* request data is equal to
* the length of transfer and residual.
* MEDIUM_ERROR is set by incorrect data.
*/
if ((ioc->bus_type == SPI) &&
(sc->sense_buffer[2] & 0x20)) {
u32 difftransfer;
difftransfer =
sc->sense_buffer[3] << 24 |
sc->sense_buffer[4] << 16 |
sc->sense_buffer[5] << 8 |
sc->sense_buffer[6];
if (((sc->sense_buffer[3] & 0x80) ==
0x80) && (scsi_bufflen(sc)
!= xfer_cnt)) {
sc->sense_buffer[2] =
MEDIUM_ERROR;
sc->sense_buffer[12] = 0xff;
sc->sense_buffer[13] = 0xff;
printk(KERN_WARNING"Errata"
"on LSI53C1030 occurred."
"sc->req_bufflen=0x%02x,"
"xfer_cnt=0x%02x\n" ,
scsi_bufflen(sc),
xfer_cnt);
}
if (((sc->sense_buffer[3] & 0x80)
!= 0x80) &&
(scsi_bufflen(sc) !=
xfer_cnt + difftransfer)) {
sc->sense_buffer[2] =
MEDIUM_ERROR;
sc->sense_buffer[12] = 0xff;
sc->sense_buffer[13] = 0xff;
printk(KERN_WARNING
"Errata on LSI53C1030 occurred"
"sc->req_bufflen=0x%02x,"
" xfer_cnt=0x%02x,"
"difftransfer=0x%02x\n",
scsi_bufflen(sc),
xfer_cnt,
difftransfer);
}
}
/*
* If running against circa 200003dd 909 MPT f/w,
* may get this (AUTOSENSE_VALID) for actual TASK_SET_FULL
* (QUEUE_FULL) returned from device! --> get 0x0000?128
* and with SenseBytes set to 0.
*/
if (pScsiReply->SCSIStatus == MPI_SCSI_STATUS_TASK_SET_FULL)
mptscsih_report_queue_full(sc, pScsiReply, pScsiReq);
}
else if (scsi_state &
(MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)
) {
/*
* What to do?
*/
sc->result = DID_SOFT_ERROR << 16;
}
else if (scsi_state & MPI_SCSI_STATE_TERMINATED) {
/* Not real sure here either... */
sc->result = DID_RESET << 16;
}
else if (scsi_state & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) {
/* Device Inq. data indicates that it supports
* QTags, but rejects QTag messages.
* This command completed OK.
*
* Not real sure here either so do nothing... */
}
if (sc->result == MPI_SCSI_STATUS_TASK_SET_FULL)
mptscsih_report_queue_full(sc, pScsiReply, pScsiReq);
/* Add handling of:
* Reservation Conflict, Busy,
* Command Terminated, CHECK
*/
break;
case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
sc->result = DID_SOFT_ERROR << 16;
break;
case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */
case MPI_IOCSTATUS_INVALID_SGL: /* 0x0003 */
case MPI_IOCSTATUS_INTERNAL_ERROR: /* 0x0004 */
case MPI_IOCSTATUS_RESERVED: /* 0x0005 */
case MPI_IOCSTATUS_INVALID_FIELD: /* 0x0007 */
case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */
case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: /* 0x004A */
default:
/*
* What to do?
*/
sc->result = DID_SOFT_ERROR << 16;
break;
} /* switch(status) */
#ifdef CONFIG_FUSION_LOGGING
if (sc->result && (ioc->debug_level & MPT_DEBUG_REPLY))
mptscsih_info_scsiio(ioc, sc, pScsiReply);
#endif
} /* end of address reply case */
out:
/* Unmap the DMA buffers, if any. */
scsi_dma_unmap(sc);
sc->scsi_done(sc); /* Issue the command callback */
/* Free Chain buffers */
mptscsih_freeChainBuffers(ioc, req_idx);
return 1;
}
/*
* mptscsih_flush_running_cmds - For each command found, search
* Scsi_Host instance taskQ and reply to OS.
* Called only if recovering from a FW reload.
* @hd: Pointer to a SCSI HOST structure
*
* Returns: None.
*
* Must be called while new I/Os are being queued.
*/
void
mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
{
MPT_ADAPTER *ioc = hd->ioc;
struct scsi_cmnd *sc;
SCSIIORequest_t *mf = NULL;
int ii;
int channel, id;
for (ii= 0; ii < ioc->req_depth; ii++) {
sc = mptscsih_getclear_scsi_lookup(ioc, ii);
if (!sc)
continue;
mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(ioc, ii);
if (!mf)
continue;
channel = mf->Bus;
id = mf->TargetID;
mptscsih_freeChainBuffers(ioc, ii);
mpt_free_msg_frame(ioc, (MPT_FRAME_HDR *)mf);
if ((unsigned char *)mf != sc->host_scribble)
continue;
scsi_dma_unmap(sc);
sc->result = DID_RESET << 16;
sc->host_scribble = NULL;
dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT
"completing cmds: fw_channel %d, fw_id %d, sc=%p, mf = %p, "
"idx=%x\n", ioc->name, channel, id, sc, mf, ii));
sc->scsi_done(sc);
}
}
EXPORT_SYMBOL(mptscsih_flush_running_cmds);
/*
* mptscsih_search_running_cmds - Delete any commands associated
* with the specified target and lun. Function called only
* when a lun is disable by mid-layer.
* Do NOT access the referenced scsi_cmnd structure or
* members. Will cause either a paging or NULL ptr error.
* (BUT, BUT, BUT, the code does reference it! - mdr)
* @hd: Pointer to a SCSI HOST structure
* @vdevice: per device private data
*
* Returns: None.
*
* Called from slave_destroy.
*/
static void
mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
{
SCSIIORequest_t *mf = NULL;
int ii;
struct scsi_cmnd *sc;
struct scsi_lun lun;
MPT_ADAPTER *ioc = hd->ioc;
unsigned long flags;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
for (ii = 0; ii < ioc->req_depth; ii++) {
if ((sc = ioc->ScsiLookup[ii]) != NULL) {
mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(ioc, ii);
if (mf == NULL)
continue;
/* If the device is a hidden raid component, then its
* expected that the mf->function will be RAID_SCSI_IO
*/
if (vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT && mf->Function !=
MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)
continue;
int_to_scsilun(vdevice->lun, &lun);
if ((mf->Bus != vdevice->vtarget->channel) ||
(mf->TargetID != vdevice->vtarget->id) ||
memcmp(lun.scsi_lun, mf->LUN, 8))
continue;
if ((unsigned char *)mf != sc->host_scribble)
continue;
ioc->ScsiLookup[ii] = NULL;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
mptscsih_freeChainBuffers(ioc, ii);
mpt_free_msg_frame(ioc, (MPT_FRAME_HDR *)mf);
scsi_dma_unmap(sc);
sc->host_scribble = NULL;
sc->result = DID_NO_CONNECT << 16;
dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device,
MYIOC_s_FMT "completing cmds: fw_channel %d, "
"fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name,
vdevice->vtarget->channel, vdevice->vtarget->id,
sc, mf, ii));
sc->scsi_done(sc);
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
}
}
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_report_queue_full - Report QUEUE_FULL status returned
* from a SCSI target device.
* @sc: Pointer to scsi_cmnd structure
* @pScsiReply: Pointer to SCSIIOReply_t
* @pScsiReq: Pointer to original SCSI request
*
* This routine periodically reports QUEUE_FULL status returned from a
* SCSI target device. It reports this to the console via kernel
* printk() API call, not more than once every 10 seconds.
*/
static void
mptscsih_report_queue_full(struct scsi_cmnd *sc, SCSIIOReply_t *pScsiReply, SCSIIORequest_t *pScsiReq)
{
long time = jiffies;
MPT_SCSI_HOST *hd;
MPT_ADAPTER *ioc;
if (sc->device == NULL)
return;
if (sc->device->host == NULL)
return;
if ((hd = shost_priv(sc->device->host)) == NULL)
return;
ioc = hd->ioc;
if (time - hd->last_queue_full > 10 * HZ) {
dprintk(ioc, printk(MYIOC_s_WARN_FMT "Device (%d:%d:%d) reported QUEUE_FULL!\n",
ioc->name, 0, sc->device->id, sc->device->lun));
hd->last_queue_full = time;
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_remove - Removed scsi devices
* @pdev: Pointer to pci_dev structure
*
*
*/
void
mptscsih_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct Scsi_Host *host = ioc->sh;
MPT_SCSI_HOST *hd;
int sz1;
scsi_remove_host(host);
if((hd = shost_priv(host)) == NULL)
return;
mptscsih_shutdown(pdev);
sz1=0;
if (ioc->ScsiLookup != NULL) {
sz1 = ioc->req_depth * sizeof(void *);
kfree(ioc->ScsiLookup);
ioc->ScsiLookup = NULL;
}
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Free'd ScsiLookup (%d) memory\n",
ioc->name, sz1));
kfree(hd->info_kbuf);
/* NULL the Scsi_Host pointer
*/
ioc->sh = NULL;
scsi_host_put(host);
mpt_detach(pdev);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_shutdown - reboot notifier
*
*/
void
mptscsih_shutdown(struct pci_dev *pdev)
{
}
#ifdef CONFIG_PM
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_suspend - Fusion MPT scsi driver suspend routine.
*
*
*/
int
mptscsih_suspend(struct pci_dev *pdev, pm_message_t state)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
scsi_block_requests(ioc->sh);
flush_scheduled_work();
mptscsih_shutdown(pdev);
return mpt_suspend(pdev,state);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_resume - Fusion MPT scsi driver resume routine.
*
*
*/
int
mptscsih_resume(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
int rc;
rc = mpt_resume(pdev);
scsi_unblock_requests(ioc->sh);
return rc;
}
#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_info - Return information about MPT adapter
* @SChost: Pointer to Scsi_Host structure
*
* (linux scsi_host_template.info routine)
*
* Returns pointer to buffer where information was written.
*/
const char *
mptscsih_info(struct Scsi_Host *SChost)
{
MPT_SCSI_HOST *h;
int size = 0;
h = shost_priv(SChost);
if (h) {
if (h->info_kbuf == NULL)
if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
return h->info_kbuf;
h->info_kbuf[0] = '\0';
mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
h->info_kbuf[size-1] = '\0';
}
return h->info_kbuf;
}
struct info_str {
char *buffer;
int length;
int offset;
int pos;
};
static void
mptscsih_copy_mem_info(struct info_str *info, char *data, int len)
{
if (info->pos + len > info->length)
len = info->length - info->pos;
if (info->pos + len < info->offset) {
info->pos += len;
return;
}
if (info->pos < info->offset) {
data += (info->offset - info->pos);
len -= (info->offset - info->pos);
}
if (len > 0) {
memcpy(info->buffer + info->pos, data, len);
info->pos += len;
}
}
static int
mptscsih_copy_info(struct info_str *info, char *fmt, ...)
{
va_list args;
char buf[81];
int len;
va_start(args, fmt);
len = vsprintf(buf, fmt, args);
va_end(args);
mptscsih_copy_mem_info(info, buf, len);
return len;
}
static int
mptscsih_host_info(MPT_ADAPTER *ioc, char *pbuf, off_t offset, int len)
{
struct info_str info;
info.buffer = pbuf;
info.length = len;
info.offset = offset;
info.pos = 0;
mptscsih_copy_info(&info, "%s: %s, ", ioc->name, ioc->prod_name);
mptscsih_copy_info(&info, "%s%08xh, ", MPT_FW_REV_MAGIC_ID_STRING, ioc->facts.FWVersion.Word);
mptscsih_copy_info(&info, "Ports=%d, ", ioc->facts.NumberOfPorts);
mptscsih_copy_info(&info, "MaxQ=%d\n", ioc->req_depth);
return ((info.pos > info.offset) ? info.pos - info.offset : 0);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_proc_info - Return information about MPT adapter
* @host: scsi host struct
* @buffer: if write, user data; if read, buffer for user
* @start: returns the buffer address
* @offset: if write, 0; if read, the current offset into the buffer from
* the previous read.
* @length: if write, return length;
* @func: write = 1; read = 0
*
* (linux scsi_host_template.info routine)
*/
int
mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
int length, int func)
{
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
int size = 0;
if (func) {
/*
* write is not supported
*/
} else {
if (start)
*start = buffer;
size = mptscsih_host_info(ioc, buffer, offset, length);
}
return size;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#define ADD_INDEX_LOG(req_ent) do { } while(0)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_qcmd - Primary Fusion MPT SCSI initiator IO start routine.
* @SCpnt: Pointer to scsi_cmnd structure
* @done: Pointer SCSI mid-layer IO completion function
*
* (linux scsi_host_template.queuecommand routine)
* This is the primary SCSI IO start routine. Create a MPI SCSIIORequest
* from a linux scsi_cmnd request and send it to the IOC.
*
* Returns 0. (rtn value discarded by linux scsi mid-layer)
*/
int
mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
{
MPT_SCSI_HOST *hd;
MPT_FRAME_HDR *mf;
SCSIIORequest_t *pScsiReq;
VirtDevice *vdevice = SCpnt->device->hostdata;
u32 datalen;
u32 scsictl;
u32 scsidir;
u32 cmd_len;
int my_idx;
int ii;
MPT_ADAPTER *ioc;
hd = shost_priv(SCpnt->device->host);
ioc = hd->ioc;
SCpnt->scsi_done = done;
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n",
ioc->name, SCpnt, done));
if (ioc->taskmgmt_quiesce_io)
return SCSI_MLQUEUE_HOST_BUSY;
/*
* Put together a MPT SCSI request...
*/
if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) {
dprintk(ioc, printk(MYIOC_s_WARN_FMT "QueueCmd, no msg frames!!\n",
ioc->name));
return SCSI_MLQUEUE_HOST_BUSY;
}
pScsiReq = (SCSIIORequest_t *) mf;
my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
ADD_INDEX_LOG(my_idx);
/* TUR's being issued with scsictl=0x02000000 (DATA_IN)!
* Seems we may receive a buffer (datalen>0) even when there
* will be no data transfer! GRRRRR...
*/
if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
datalen = scsi_bufflen(SCpnt);
scsidir = MPI_SCSIIO_CONTROL_READ; /* DATA IN (host<--ioc<--dev) */
} else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
datalen = scsi_bufflen(SCpnt);
scsidir = MPI_SCSIIO_CONTROL_WRITE; /* DATA OUT (host-->ioc-->dev) */
} else {
datalen = 0;
scsidir = MPI_SCSIIO_CONTROL_NODATATRANSFER;
}
/* Default to untagged. Once a target structure has been allocated,
* use the Inquiry data to determine if device supports tagged.
*/
if (vdevice
&& (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
&& (SCpnt->device->tagged_supported)) {
scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
if (SCpnt->request && SCpnt->request->ioprio) {
if (((SCpnt->request->ioprio & 0x7) == 1) ||
!(SCpnt->request->ioprio & 0x7))
scsictl |= MPI_SCSIIO_CONTROL_HEADOFQ;
}
} else
scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED;
/* Use the above information to set up the message frame
*/
pScsiReq->TargetID = (u8) vdevice->vtarget->id;
pScsiReq->Bus = vdevice->vtarget->channel;
pScsiReq->ChainOffset = 0;
if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
pScsiReq->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
else
pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
pScsiReq->CDBLength = SCpnt->cmd_len;
pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
pScsiReq->Reserved = 0;
pScsiReq->MsgFlags = mpt_msg_flags(ioc);
int_to_scsilun(SCpnt->device->lun, (struct scsi_lun *)pScsiReq->LUN);
pScsiReq->Control = cpu_to_le32(scsictl);
/*
* Write SCSI CDB into the message
*/
cmd_len = SCpnt->cmd_len;
for (ii=0; ii < cmd_len; ii++)
pScsiReq->CDB[ii] = SCpnt->cmnd[ii];
for (ii=cmd_len; ii < 16; ii++)
pScsiReq->CDB[ii] = 0;
/* DataLength */
pScsiReq->DataLength = cpu_to_le32(datalen);
/* SenseBuffer low address */
pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma
+ (my_idx * MPT_SENSE_BUFFER_ALLOC));
/* Now add the SG list
* Always have a SGE even if null length.
*/
if (datalen == 0) {
/* Add a NULL SGE */
ioc->add_sge((char *)&pScsiReq->SGL,
MPT_SGE_FLAGS_SSIMPLE_READ | 0,
(dma_addr_t) -1);
} else {
/* Add a 32 or 64 bit SGE */
if (mptscsih_AddSGE(ioc, SCpnt, pScsiReq, my_idx) != SUCCESS)
goto fail;
}
SCpnt->host_scribble = (unsigned char *)mf;
mptscsih_set_scsi_lookup(ioc, my_idx, SCpnt);
mpt_put_msg_frame(ioc->DoneCtx, ioc, mf);
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n",
ioc->name, SCpnt, mf, my_idx));
DBG_DUMP_REQUEST_FRAME(ioc, (u32 *)mf);
return 0;
fail:
mptscsih_freeChainBuffers(ioc, my_idx);
mpt_free_msg_frame(ioc, mf);
return SCSI_MLQUEUE_HOST_BUSY;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_freeChainBuffers - Function to free chain buffers associated
* with a SCSI IO request
* @hd: Pointer to the MPT_SCSI_HOST instance
* @req_idx: Index of the SCSI IO request frame.
*
* Called if SG chain buffer allocation fails and mptscsih callbacks.
* No return.
*/
static void
mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
{
MPT_FRAME_HDR *chain;
unsigned long flags;
int chain_idx;
int next;
/* Get the first chain index and reset
* tracker state.
*/
chain_idx = ioc->ReqToChain[req_idx];
ioc->ReqToChain[req_idx] = MPT_HOST_NO_CHAIN;
while (chain_idx != MPT_HOST_NO_CHAIN) {
/* Save the next chain buffer index */
next = ioc->ChainToChain[chain_idx];
/* Free this chain buffer and reset
* tracker
*/
ioc->ChainToChain[chain_idx] = MPT_HOST_NO_CHAIN;
chain = (MPT_FRAME_HDR *) (ioc->ChainBuffer
+ (chain_idx * ioc->req_sz));
spin_lock_irqsave(&ioc->FreeQlock, flags);
list_add_tail(&chain->u.frame.linkage.list, &ioc->FreeChainQ);
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FreeChainBuffers (index %d)\n",
ioc->name, chain_idx));
/* handle next */
chain_idx = next;
}
return;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Reset Handling
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_IssueTaskMgmt - Generic send Task Management function.
* @hd: Pointer to MPT_SCSI_HOST structure
* @type: Task Management type
* @channel: channel number for task management
* @id: Logical Target ID for reset (if appropriate)
* @lun: Logical Unit for reset (if appropriate)
* @ctx2abort: Context for the task to be aborted (if appropriate)
* @timeout: timeout for task management control
*
* Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
* or a non-interrupt thread. In the former, must not call schedule().
*
* Not all fields are meaningfull for all task types.
*
* Returns 0 for SUCCESS, or FAILED.
*
**/
int
mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
int ctx2abort, ulong timeout)
{
MPT_FRAME_HDR *mf;
SCSITaskMgmt_t *pScsiTm;
int ii;
int retval;
MPT_ADAPTER *ioc = hd->ioc;
unsigned long timeleft;
u8 issue_hard_reset;
u32 ioc_raw_state;
unsigned long time_count;
issue_hard_reset = 0;
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) {
printk(MYIOC_s_WARN_FMT
"TaskMgmt type=%x: IOC Not operational (0x%x)!\n",
ioc->name, type, ioc_raw_state);
printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
ioc->name, __func__);
if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0)
printk(MYIOC_s_WARN_FMT "TaskMgmt HardReset "
"FAILED!!\n", ioc->name);
return 0;
}
/* DOORBELL ACTIVE check is not required if
* MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q is supported.
*/
if (!((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q)
&& (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) &&
(ioc_raw_state & MPI_DOORBELL_ACTIVE)) {
printk(MYIOC_s_WARN_FMT
"TaskMgmt type=%x: ioc_state: "
"DOORBELL_ACTIVE (0x%x)!\n",
ioc->name, type, ioc_raw_state);
return FAILED;
}
mutex_lock(&ioc->taskmgmt_cmds.mutex);
if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
mf = NULL;
retval = FAILED;
goto out;
}
/* Return Fail to calling function if no message frames available.
*/
if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"TaskMgmt no msg frames!!\n", ioc->name));
retval = FAILED;
mpt_clear_taskmgmt_in_progress_flag(ioc);
goto out;
}
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
ioc->name, mf));
/* Format the Request
*/
pScsiTm = (SCSITaskMgmt_t *) mf;
pScsiTm->TargetID = id;
pScsiTm->Bus = channel;
pScsiTm->ChainOffset = 0;
pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
pScsiTm->Reserved = 0;
pScsiTm->TaskType = type;
pScsiTm->Reserved1 = 0;
pScsiTm->MsgFlags = (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS)
? MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION : 0;
int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN);
for (ii=0; ii < 7; ii++)
pScsiTm->Reserved2[ii] = 0;
pScsiTm->TaskMsgContext = ctx2abort;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt: ctx2abort (0x%08x) "
"task_type = 0x%02X, timeout = %ld\n", ioc->name, ctx2abort,
type, timeout));
DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm);
INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
time_count = jiffies;
if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
(ioc->facts.MsgVersion >= MPI_VERSION_01_05))
mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf);
else {
retval = mpt_send_handshake_request(ioc->TaskCtx, ioc,
sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP);
if (retval) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"TaskMgmt handshake FAILED!(mf=%p, rc=%d) \n",
ioc->name, mf, retval));
mpt_free_msg_frame(ioc, mf);
mpt_clear_taskmgmt_in_progress_flag(ioc);
goto out;
}
}
timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
timeout*HZ);
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
retval = FAILED;
dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
"TaskMgmt TIMED OUT!(mf=%p)\n", ioc->name, mf));
mpt_clear_taskmgmt_in_progress_flag(ioc);
if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out;
issue_hard_reset = 1;
goto out;
}
retval = mptscsih_taskmgmt_reply(ioc, type,
(SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply);
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt completed (%d seconds)\n",
ioc->name, jiffies_to_msecs(jiffies - time_count)/1000));
out:
CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
if (issue_hard_reset) {
printk(MYIOC_s_WARN_FMT
"Issuing Reset from %s!! doorbell=0x%08x\n",
ioc->name, __func__, mpt_GetIocState(ioc, 0));
retval = (ioc->bus_type == SAS) ?
mpt_HardResetHandler(ioc, CAN_SLEEP) :
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
retval = (retval == 0) ? 0 : FAILED;
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
return retval;
}
EXPORT_SYMBOL(mptscsih_IssueTaskMgmt);
static int
mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
{
switch (ioc->bus_type) {
case FC:
return 40;
case SAS:
return 30;
case SPI:
default:
return 10;
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_abort - Abort linux scsi_cmnd routine, new_eh variant
* @SCpnt: Pointer to scsi_cmnd structure, IO to be aborted
*
* (linux scsi_host_template.eh_abort_handler routine)
*
* Returns SUCCESS or FAILED.
**/
int
mptscsih_abort(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
MPT_FRAME_HDR *mf;
u32 ctx2abort;
int scpnt_idx;
int retval;
VirtDevice *vdevice;
MPT_ADAPTER *ioc;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = shost_priv(SCpnt->device->host)) == NULL) {
SCpnt->result = DID_RESET << 16;
SCpnt->scsi_done(SCpnt);
printk(KERN_ERR MYNAM ": task abort: "
"can't locate host! (sc=%p)\n", SCpnt);
return FAILED;
}
ioc = hd->ioc;
printk(MYIOC_s_INFO_FMT "attempting task abort! (sc=%p)\n",
ioc->name, SCpnt);
scsi_print_command(SCpnt);
vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"task abort: device has been deleted (sc=%p)\n",
ioc->name, SCpnt));
SCpnt->result = DID_NO_CONNECT << 16;
SCpnt->scsi_done(SCpnt);
retval = SUCCESS;
goto out;
}
/* Task aborts are not supported for hidden raid components.
*/
if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"task abort: hidden raid component (sc=%p)\n",
ioc->name, SCpnt));
SCpnt->result = DID_RESET << 16;
retval = FAILED;
goto out;
}
/* Task aborts are not supported for volumes.
*/
if (vdevice->vtarget->raidVolume) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"task abort: raid volume (sc=%p)\n",
ioc->name, SCpnt));
SCpnt->result = DID_RESET << 16;
retval = FAILED;
goto out;
}
/* Find this command
*/
if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(ioc, SCpnt)) < 0) {
/* Cmd not found in ScsiLookup.
* Do OS callback.
*/
SCpnt->result = DID_RESET << 16;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: "
"Command not in the active list! (sc=%p)\n", ioc->name,
SCpnt));
retval = SUCCESS;
goto out;
}
if (ioc->timeouts < -1)
ioc->timeouts++;
if (mpt_fwfault_debug)
mpt_halt_firmware(ioc);
/* Most important! Set TaskMsgContext to SCpnt's MsgContext!
* (the IO to be ABORT'd)
*
* NOTE: Since we do not byteswap MsgContext, we do not
* swap it here either. It is an opaque cookie to
* the controller, so it does not matter. -DaveM
*/
mf = MPT_INDEX_2_MFPTR(ioc, scpnt_idx);
ctx2abort = mf->u.frame.hwhdr.msgctxu.MsgContext;
retval = mptscsih_IssueTaskMgmt(hd,
MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
vdevice->vtarget->channel,
vdevice->vtarget->id, vdevice->lun,
ctx2abort, mptscsih_get_tm_timeout(ioc));
if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"task abort: command still in active list! (sc=%p)\n",
ioc->name, SCpnt));
retval = FAILED;
} else {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"task abort: command cleared from active list! (sc=%p)\n",
ioc->name, SCpnt));
retval = SUCCESS;
}
out:
printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p)\n",
ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
SCpnt);
return retval;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_dev_reset - Perform a SCSI TARGET_RESET! new_eh variant
* @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
*
* (linux scsi_host_template.eh_dev_reset_handler routine)
*
* Returns SUCCESS or FAILED.
**/
int
mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
int retval;
VirtDevice *vdevice;
MPT_ADAPTER *ioc;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = shost_priv(SCpnt->device->host)) == NULL){
printk(KERN_ERR MYNAM ": target reset: "
"Can't locate host! (sc=%p)\n", SCpnt);
return FAILED;
}
ioc = hd->ioc;
printk(MYIOC_s_INFO_FMT "attempting target reset! (sc=%p)\n",
ioc->name, SCpnt);
scsi_print_command(SCpnt);
vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget) {
retval = 0;
goto out;
}
/* Target reset to hidden raid component is not supported
*/
if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
retval = FAILED;
goto out;
}
retval = mptscsih_IssueTaskMgmt(hd,
MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
vdevice->vtarget->channel,
vdevice->vtarget->id, 0, 0,
mptscsih_get_tm_timeout(ioc));
out:
printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n",
ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
if (retval == 0)
return SUCCESS;
else
return FAILED;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_bus_reset - Perform a SCSI BUS_RESET! new_eh variant
* @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
*
* (linux scsi_host_template.eh_bus_reset_handler routine)
*
* Returns SUCCESS or FAILED.
**/
int
mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
int retval;
VirtDevice *vdevice;
MPT_ADAPTER *ioc;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = shost_priv(SCpnt->device->host)) == NULL){
printk(KERN_ERR MYNAM ": bus reset: "
"Can't locate host! (sc=%p)\n", SCpnt);
return FAILED;
}
ioc = hd->ioc;
printk(MYIOC_s_INFO_FMT "attempting bus reset! (sc=%p)\n",
ioc->name, SCpnt);
scsi_print_command(SCpnt);
if (ioc->timeouts < -1)
ioc->timeouts++;
vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget)
return SUCCESS;
retval = mptscsih_IssueTaskMgmt(hd,
MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
vdevice->vtarget->channel, 0, 0, 0,
mptscsih_get_tm_timeout(ioc));
printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n",
ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
if (retval == 0)
return SUCCESS;
else
return FAILED;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_host_reset - Perform a SCSI host adapter RESET (new_eh variant)
* @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
*
* (linux scsi_host_template.eh_host_reset_handler routine)
*
* Returns SUCCESS or FAILED.
*/
int
mptscsih_host_reset(struct scsi_cmnd *SCpnt)
{
MPT_SCSI_HOST * hd;
int status = SUCCESS;
MPT_ADAPTER *ioc;
int retval;
/* If we can't locate the host to reset, then we failed. */
if ((hd = shost_priv(SCpnt->device->host)) == NULL){
printk(KERN_ERR MYNAM ": host reset: "
"Can't locate host! (sc=%p)\n", SCpnt);
return FAILED;
}
/* make sure we have no outstanding commands at this stage */
mptscsih_flush_running_cmds(hd);
ioc = hd->ioc;
printk(MYIOC_s_INFO_FMT "attempting host reset! (sc=%p)\n",
ioc->name, SCpnt);
/* If our attempts to reset the host failed, then return a failed
* status. The host will be taken off line by the SCSI mid-layer.
*/
retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
if (retval < 0)
status = FAILED;
else
status = SUCCESS;
printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n",
ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
return status;
}
static int
mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
SCSITaskMgmtReply_t *pScsiTmReply)
{
u16 iocstatus;
u32 termination_count;
int retval;
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
retval = FAILED;
goto out;
}
DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply);
iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
termination_count = le32_to_cpu(pScsiTmReply->TerminationCount);
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt fw_channel = %d, fw_id = %d, task_type = 0x%02X,\n"
"\tiocstatus = 0x%04X, loginfo = 0x%08X, response_code = 0x%02X,\n"
"\tterm_cmnds = %d\n", ioc->name, pScsiTmReply->Bus,
pScsiTmReply->TargetID, type, le16_to_cpu(pScsiTmReply->IOCStatus),
le32_to_cpu(pScsiTmReply->IOCLogInfo), pScsiTmReply->ResponseCode,
termination_count));
if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 &&
pScsiTmReply->ResponseCode)
mptscsih_taskmgmt_response_code(ioc,
pScsiTmReply->ResponseCode);
if (iocstatus == MPI_IOCSTATUS_SUCCESS) {
retval = 0;
goto out;
}
retval = FAILED;
if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
if (termination_count == 1)
retval = 0;
goto out;
}
if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED)
retval = 0;
out:
return retval;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
void
mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
{
char *desc;
switch (response_code) {
case MPI_SCSITASKMGMT_RSP_TM_COMPLETE:
desc = "The task completed.";
break;
case MPI_SCSITASKMGMT_RSP_INVALID_FRAME:
desc = "The IOC received an invalid frame status.";
break;
case MPI_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
desc = "The task type is not supported.";
break;
case MPI_SCSITASKMGMT_RSP_TM_FAILED:
desc = "The requested task failed.";
break;
case MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED:
desc = "The task completed successfully.";
break;
case MPI_SCSITASKMGMT_RSP_TM_INVALID_LUN:
desc = "The LUN request is invalid.";
break;
case MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
desc = "The task is in the IOC queue and has not been sent to target.";
break;
default:
desc = "unknown";
break;
}
printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n",
ioc->name, response_code, desc);
}
EXPORT_SYMBOL(mptscsih_taskmgmt_response_code);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_taskmgmt_complete - Registered with Fusion MPT base driver
* @ioc: Pointer to MPT_ADAPTER structure
* @mf: Pointer to SCSI task mgmt request frame
* @mr: Pointer to SCSI task mgmt reply frame
*
* This routine is called from mptbase.c::mpt_interrupt() at the completion
* of any SCSI task management request.
* This routine is registered with the MPT (base) driver at driver
* load/init time via the mpt_register() API call.
*
* Returns 1 indicating alloc'd request frame ptr should be freed.
**/
int
mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
MPT_FRAME_HDR *mr)
{
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt completed (mf=%p, mr=%p)\n", ioc->name, mf, mr));
ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
if (!mr)
goto out;
ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
memcpy(ioc->taskmgmt_cmds.reply, mr,
min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
out:
if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
mpt_clear_taskmgmt_in_progress_flag(ioc);
ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
complete(&ioc->taskmgmt_cmds.done);
if (ioc->bus_type == SAS)
ioc->schedule_target_reset(ioc);
return 1;
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* This is anyones guess quite frankly.
*/
int
mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev,
sector_t capacity, int geom[])
{
int heads;
int sectors;
sector_t cylinders;
ulong dummy;
heads = 64;
sectors = 32;
dummy = heads * sectors;
cylinders = capacity;
sector_div(cylinders,dummy);
/*
* Handle extended translation size for logical drives
* > 1Gb
*/
if ((ulong)capacity >= 0x200000) {
heads = 255;
sectors = 63;
dummy = heads * sectors;
cylinders = capacity;
sector_div(cylinders,dummy);
}
/* return result */
geom[0] = heads;
geom[1] = sectors;
geom[2] = cylinders;
return 0;
}
/* Search IOC page 3 to determine if this is hidden physical disk
*
*/
int
mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
struct inactive_raid_component_info *component_info;
int i, j;
RaidPhysDiskPage1_t *phys_disk;
int rc = 0;
int num_paths;
if (!ioc->raid_data.pIocPg3)
goto out;
for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
if ((id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID) &&
(channel == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskBus)) {
rc = 1;
goto out;
}
}
if (ioc->bus_type != SAS)
goto out;
/*
* Check if dual path
*/
for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
if (num_paths < 2)
continue;
phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
(num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
if (!phys_disk)
continue;
if ((mpt_raid_phys_disk_pg1(ioc,
ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
phys_disk))) {
kfree(phys_disk);
continue;
}
for (j = 0; j < num_paths; j++) {
if ((phys_disk->Path[j].Flags &
MPI_RAID_PHYSDISK1_FLAG_INVALID))
continue;
if ((phys_disk->Path[j].Flags &
MPI_RAID_PHYSDISK1_FLAG_BROKEN))
continue;
if ((id == phys_disk->Path[j].PhysDiskID) &&
(channel == phys_disk->Path[j].PhysDiskBus)) {
rc = 1;
kfree(phys_disk);
goto out;
}
}
kfree(phys_disk);
}
/*
* Check inactive list for matching phys disks
*/
if (list_empty(&ioc->raid_data.inactive_list))
goto out;
mutex_lock(&ioc->raid_data.inactive_list_mutex);
list_for_each_entry(component_info, &ioc->raid_data.inactive_list,
list) {
if ((component_info->d.PhysDiskID == id) &&
(component_info->d.PhysDiskBus == channel))
rc = 1;
}
mutex_unlock(&ioc->raid_data.inactive_list_mutex);
out:
return rc;
}
EXPORT_SYMBOL(mptscsih_is_phys_disk);
u8
mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
struct inactive_raid_component_info *component_info;
int i, j;
RaidPhysDiskPage1_t *phys_disk;
int rc = -ENXIO;
int num_paths;
if (!ioc->raid_data.pIocPg3)
goto out;
for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
if ((id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID) &&
(channel == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskBus)) {
rc = ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum;
goto out;
}
}
if (ioc->bus_type != SAS)
goto out;
/*
* Check if dual path
*/
for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
if (num_paths < 2)
continue;
phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
(num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
if (!phys_disk)
continue;
if ((mpt_raid_phys_disk_pg1(ioc,
ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
phys_disk))) {
kfree(phys_disk);
continue;
}
for (j = 0; j < num_paths; j++) {
if ((phys_disk->Path[j].Flags &
MPI_RAID_PHYSDISK1_FLAG_INVALID))
continue;
if ((phys_disk->Path[j].Flags &
MPI_RAID_PHYSDISK1_FLAG_BROKEN))
continue;
if ((id == phys_disk->Path[j].PhysDiskID) &&
(channel == phys_disk->Path[j].PhysDiskBus)) {
rc = phys_disk->PhysDiskNum;
kfree(phys_disk);
goto out;
}
}
kfree(phys_disk);
}
/*
* Check inactive list for matching phys disks
*/
if (list_empty(&ioc->raid_data.inactive_list))
goto out;
mutex_lock(&ioc->raid_data.inactive_list_mutex);
list_for_each_entry(component_info, &ioc->raid_data.inactive_list,
list) {
if ((component_info->d.PhysDiskID == id) &&
(component_info->d.PhysDiskBus == channel))
rc = component_info->d.PhysDiskNum;
}
mutex_unlock(&ioc->raid_data.inactive_list_mutex);
out:
return rc;
}
EXPORT_SYMBOL(mptscsih_raid_id_to_num);
/*
* OS entry point to allow for host driver to free allocated memory
* Called if no device present or device being unloaded
*/
void
mptscsih_slave_destroy(struct scsi_device *sdev)
{
struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = shost_priv(host);
VirtTarget *vtarget;
VirtDevice *vdevice;
struct scsi_target *starget;
starget = scsi_target(sdev);
vtarget = starget->hostdata;
vdevice = sdev->hostdata;
if (!vdevice)
return;
mptscsih_search_running_cmds(hd, vdevice);
vtarget->num_luns--;
mptscsih_synchronize_cache(hd, vdevice);
kfree(vdevice);
sdev->hostdata = NULL;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_change_queue_depth - This function will set a devices queue depth
* @sdev: per scsi_device pointer
* @qdepth: requested queue depth
* @reason: calling context
*
* Adding support for new 'change_queue_depth' api.
*/
int
mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
{
MPT_SCSI_HOST *hd = shost_priv(sdev->host);
VirtTarget *vtarget;
struct scsi_target *starget;
int max_depth;
int tagged;
MPT_ADAPTER *ioc = hd->ioc;
starget = scsi_target(sdev);
vtarget = starget->hostdata;
if (reason != SCSI_QDEPTH_DEFAULT)
return -EOPNOTSUPP;
if (ioc->bus_type == SPI) {
if (!(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
max_depth = 1;
else if (sdev->type == TYPE_DISK &&
vtarget->minSyncFactor <= MPT_ULTRA160)
max_depth = MPT_SCSI_CMD_PER_DEV_HIGH;
else
max_depth = MPT_SCSI_CMD_PER_DEV_LOW;
} else
max_depth = ioc->sh->can_queue;
if (!sdev->tagged_supported)
max_depth = 1;
if (qdepth > max_depth)
qdepth = max_depth;
if (qdepth == 1)
tagged = 0;
else
tagged = MSG_SIMPLE_TAG;
scsi_adjust_queue_depth(sdev, tagged, qdepth);
return sdev->queue_depth;
}
/*
* OS entry point to adjust the queue_depths on a per-device basis.
* Called once per device the bus scan. Use it to force the queue_depth
* member to 1 if a device does not support Q tags.
* Return non-zero if fails.
*/
int
mptscsih_slave_configure(struct scsi_device *sdev)
{
struct Scsi_Host *sh = sdev->host;
VirtTarget *vtarget;
VirtDevice *vdevice;
struct scsi_target *starget;
MPT_SCSI_HOST *hd = shost_priv(sh);
MPT_ADAPTER *ioc = hd->ioc;
starget = scsi_target(sdev);
vtarget = starget->hostdata;
vdevice = sdev->hostdata;
dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"device @ %p, channel=%d, id=%d, lun=%d\n",
ioc->name, sdev, sdev->channel, sdev->id, sdev->lun));
if (ioc->bus_type == SPI)
dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"sdtr %d wdtr %d ppr %d inq length=%d\n",
ioc->name, sdev->sdtr, sdev->wdtr,
sdev->ppr, sdev->inquiry_len));
vdevice->configured_lun = 1;
dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Queue depth=%d, tflags=%x\n",
ioc->name, sdev->queue_depth, vtarget->tflags));
if (ioc->bus_type == SPI)
dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"negoFlags=%x, maxOffset=%x, SyncFactor=%x\n",
ioc->name, vtarget->negoFlags, vtarget->maxOffset,
vtarget->minSyncFactor));
mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH,
SCSI_QDEPTH_DEFAULT);
dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"tagged %d, simple %d, ordered %d\n",
ioc->name,sdev->tagged_supported, sdev->simple_tags,
sdev->ordered_tags));
blk_queue_dma_alignment (sdev->request_queue, 512 - 1);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Private routines...
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* Utility function to copy sense data from the scsi_cmnd buffer
* to the FC and SCSI target structures.
*
*/
static void
mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply)
{
VirtDevice *vdevice;
SCSIIORequest_t *pReq;
u32 sense_count = le32_to_cpu(pScsiReply->SenseCount);
MPT_ADAPTER *ioc = hd->ioc;
/* Get target structure
*/
pReq = (SCSIIORequest_t *) mf;
vdevice = sc->device->hostdata;
if (sense_count) {
u8 *sense_data;
int req_index;
/* Copy the sense received into the scsi command block. */
req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC));
memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc));
/* Log SMART data (asc = 0x5D, non-IM case only) if required.
*/
if ((ioc->events) && (ioc->eventTypes & (1 << MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE))) {
if ((sense_data[12] == 0x5D) && (vdevice->vtarget->raidVolume == 0)) {
int idx;
idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE;
ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE;
ioc->events[idx].eventContext = ioc->eventContext;
ioc->events[idx].data[0] = (pReq->LUN[1] << 24) |
(MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA << 16) |
(sc->device->channel << 8) | sc->device->id;
ioc->events[idx].data[1] = (sense_data[13] << 8) | sense_data[12];
ioc->eventContext++;
if (ioc->pcidev->vendor ==
PCI_VENDOR_ID_IBM) {
mptscsih_issue_sep_command(ioc,
vdevice->vtarget, MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
vdevice->vtarget->tflags |=
MPT_TARGET_FLAGS_LED_ON;
}
}
}
} else {
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Hmmm... SenseData len=0! (?)\n",
ioc->name));
}
}
/**
* mptscsih_get_scsi_lookup - retrieves scmd entry
* @ioc: Pointer to MPT_ADAPTER structure
* @i: index into the array
*
* Returns the scsi_cmd pointer
*/
struct scsi_cmnd *
mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
{
unsigned long flags;
struct scsi_cmnd *scmd;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
scmd = ioc->ScsiLookup[i];
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return scmd;
}
EXPORT_SYMBOL(mptscsih_get_scsi_lookup);
/**
* mptscsih_getclear_scsi_lookup - retrieves and clears scmd entry from ScsiLookup[] array list
* @ioc: Pointer to MPT_ADAPTER structure
* @i: index into the array
*
* Returns the scsi_cmd pointer
*
**/
static struct scsi_cmnd *
mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i)
{
unsigned long flags;
struct scsi_cmnd *scmd;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
scmd = ioc->ScsiLookup[i];
ioc->ScsiLookup[i] = NULL;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return scmd;
}
/**
* mptscsih_set_scsi_lookup - write a scmd entry into the ScsiLookup[] array list
*
* @ioc: Pointer to MPT_ADAPTER structure
* @i: index into the array
* @scmd: scsi_cmnd pointer
*
**/
static void
mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd)
{
unsigned long flags;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
ioc->ScsiLookup[i] = scmd;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
}
/**
* SCPNT_TO_LOOKUP_IDX - searches for a given scmd in the ScsiLookup[] array list
* @ioc: Pointer to MPT_ADAPTER structure
* @sc: scsi_cmnd pointer
*/
static int
SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *sc)
{
unsigned long flags;
int i, index=-1;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
for (i = 0; i < ioc->req_depth; i++) {
if (ioc->ScsiLookup[i] == sc) {
index = i;
goto out;
}
}
out:
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return index;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
int
mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
MPT_SCSI_HOST *hd;
if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL)
return 0;
hd = shost_priv(ioc->sh);
switch (reset_phase) {
case MPT_IOC_SETUP_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
break;
case MPT_IOC_PRE_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
mptscsih_flush_running_cmds(hd);
break;
case MPT_IOC_POST_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
if (ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING) {
ioc->internal_cmds.status |=
MPT_MGMT_STATUS_DID_IOCRESET;
complete(&ioc->internal_cmds.done);
}
break;
default:
break;
}
return 1; /* currently means nothing really */
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
int
mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
{
u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"MPT event (=%02Xh) routed to SCSI host driver!\n",
ioc->name, event));
if ((event == MPI_EVENT_IOC_BUS_RESET ||
event == MPI_EVENT_EXT_BUS_RESET) &&
(ioc->bus_type == SPI) && (ioc->soft_resets < -1))
ioc->soft_resets++;
return 1; /* currently means nothing really */
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Bus Scan and Domain Validation functionality ...
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mptscsih_scandv_complete - Scan and DV callback routine registered
* to Fustion MPT (base) driver.
*
* @ioc: Pointer to MPT_ADAPTER structure
* @mf: Pointer to original MPT request frame
* @mr: Pointer to MPT reply frame (NULL if TurboReply)
*
* This routine is called from mpt.c::mpt_interrupt() at the completion
* of any SCSI IO request.
* This routine is registered with the Fusion MPT (base) driver at driver
* load/init time via the mpt_register() API call.
*
* Returns 1 indicating alloc'd request frame ptr should be freed.
*
* Remark: Sets a completion code and (possibly) saves sense data
* in the IOC member localReply structure.
* Used ONLY for DV and other internal commands.
*/
int
mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
MPT_FRAME_HDR *reply)
{
SCSIIORequest_t *pReq;
SCSIIOReply_t *pReply;
u8 cmd;
u16 req_idx;
u8 *sense_data;
int sz;
ioc->internal_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
ioc->internal_cmds.completion_code = MPT_SCANDV_GOOD;
if (!reply)
goto out;
pReply = (SCSIIOReply_t *) reply;
pReq = (SCSIIORequest_t *) req;
ioc->internal_cmds.completion_code =
mptscsih_get_completion_code(ioc, req, reply);
ioc->internal_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
memcpy(ioc->internal_cmds.reply, reply,
min(MPT_DEFAULT_FRAME_SIZE, 4 * reply->u.reply.MsgLength));
cmd = reply->u.hdr.Function;
if (((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
(cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) &&
(pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
req_idx = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
sense_data = ((u8 *)ioc->sense_buf_pool +
(req_idx * MPT_SENSE_BUFFER_ALLOC));
sz = min_t(int, pReq->SenseBufferLength,
MPT_SENSE_BUFFER_ALLOC);
memcpy(ioc->internal_cmds.sense, sense_data, sz);
}
out:
if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING))
return 0;
ioc->internal_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
complete(&ioc->internal_cmds.done);
return 1;
}
/**
* mptscsih_get_completion_code - get completion code from MPT request
* @ioc: Pointer to MPT_ADAPTER structure
* @req: Pointer to original MPT request frame
* @reply: Pointer to MPT reply frame (NULL if TurboReply)
*
**/
static int
mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
MPT_FRAME_HDR *reply)
{
SCSIIOReply_t *pReply;
MpiRaidActionReply_t *pr;
u8 scsi_status;
u16 status;
int completion_code;
pReply = (SCSIIOReply_t *)reply;
status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
scsi_status = pReply->SCSIStatus;
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"IOCStatus=%04xh, SCSIState=%02xh, SCSIStatus=%02xh,"
"IOCLogInfo=%08xh\n", ioc->name, status, pReply->SCSIState,
scsi_status, le32_to_cpu(pReply->IOCLogInfo)));
switch (status) {
case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
completion_code = MPT_SCANDV_SELECTION_TIMEOUT;
break;
case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
completion_code = MPT_SCANDV_DID_RESET;
break;
case MPI_IOCSTATUS_BUSY:
case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
completion_code = MPT_SCANDV_BUSY;
break;
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
if (pReply->Function == MPI_FUNCTION_CONFIG) {
completion_code = MPT_SCANDV_GOOD;
} else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
pr = (MpiRaidActionReply_t *)reply;
if (le16_to_cpu(pr->ActionStatus) ==
MPI_RAID_ACTION_ASTATUS_SUCCESS)
completion_code = MPT_SCANDV_GOOD;
else
completion_code = MPT_SCANDV_SOME_ERROR;
} else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)
completion_code = MPT_SCANDV_SENSE;
else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
if (req->u.scsireq.CDB[0] == INQUIRY)
completion_code = MPT_SCANDV_ISSUE_SENSE;
else
completion_code = MPT_SCANDV_DID_RESET;
} else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
completion_code = MPT_SCANDV_DID_RESET;
else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
completion_code = MPT_SCANDV_DID_RESET;
else if (scsi_status == MPI_SCSI_STATUS_BUSY)
completion_code = MPT_SCANDV_BUSY;
else
completion_code = MPT_SCANDV_GOOD;
break;
case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
completion_code = MPT_SCANDV_DID_RESET;
else
completion_code = MPT_SCANDV_SOME_ERROR;
break;
default:
completion_code = MPT_SCANDV_SOME_ERROR;
break;
} /* switch(status) */
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
" completionCode set to %08xh\n", ioc->name, completion_code));
return completion_code;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_do_cmd - Do internal command.
* @hd: MPT_SCSI_HOST pointer
* @io: INTERNAL_CMD pointer.
*
* Issue the specified internally generated command and do command
* specific cleanup. For bus scan / DV only.
* NOTES: If command is Inquiry and status is good,
* initialize a target structure, save the data
*
* Remark: Single threaded access only.
*
* Return:
* < 0 if an illegal command or no resources
*
* 0 if good
*
* > 0 if command complete but some type of completion error.
*/
static int
mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
{
MPT_FRAME_HDR *mf;
SCSIIORequest_t *pScsiReq;
int my_idx, ii, dir;
int timeout;
char cmdLen;
char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
u8 cmd = io->cmd;
MPT_ADAPTER *ioc = hd->ioc;
int ret = 0;
unsigned long timeleft;
unsigned long flags;
/* don't send internal command during diag reset */
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: busy with host reset\n", ioc->name, __func__));
return MPT_SCANDV_BUSY;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
mutex_lock(&ioc->internal_cmds.mutex);
/* Set command specific information
*/
switch (cmd) {
case INQUIRY:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
CDB[4] = io->size;
timeout = 10;
break;
case TEST_UNIT_READY:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
timeout = 10;
break;
case START_STOP:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
CDB[4] = 1; /*Spin up the disk */
timeout = 15;
break;
case REQUEST_SENSE:
cmdLen = 6;
CDB[0] = cmd;
CDB[4] = io->size;
dir = MPI_SCSIIO_CONTROL_READ;
timeout = 10;
break;
case READ_BUFFER:
cmdLen = 10;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
if (io->flags & MPT_ICFLAG_ECHO) {
CDB[1] = 0x0A;
} else {
CDB[1] = 0x02;
}
if (io->flags & MPT_ICFLAG_BUF_CAP) {
CDB[1] |= 0x01;
}
CDB[6] = (io->size >> 16) & 0xFF;
CDB[7] = (io->size >> 8) & 0xFF;
CDB[8] = io->size & 0xFF;
timeout = 10;
break;
case WRITE_BUFFER:
cmdLen = 10;
dir = MPI_SCSIIO_CONTROL_WRITE;
CDB[0] = cmd;
if (io->flags & MPT_ICFLAG_ECHO) {
CDB[1] = 0x0A;
} else {
CDB[1] = 0x02;
}
CDB[6] = (io->size >> 16) & 0xFF;
CDB[7] = (io->size >> 8) & 0xFF;
CDB[8] = io->size & 0xFF;
timeout = 10;
break;
case RESERVE:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
timeout = 10;
break;
case RELEASE:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
timeout = 10;
break;
case SYNCHRONIZE_CACHE:
cmdLen = 10;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
// CDB[1] = 0x02; /* set immediate bit */
timeout = 10;
break;
default:
/* Error Case */
ret = -EFAULT;
goto out;
}
/* Get and Populate a free Frame
* MsgContext set in mpt_get_msg_frame call
*/
if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: No msg frames!\n",
ioc->name, __func__));
ret = MPT_SCANDV_BUSY;
goto out;
}
pScsiReq = (SCSIIORequest_t *) mf;
/* Get the request index */
my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
ADD_INDEX_LOG(my_idx); /* for debug */
if (io->flags & MPT_ICFLAG_PHYS_DISK) {
pScsiReq->TargetID = io->physDiskNum;
pScsiReq->Bus = 0;
pScsiReq->ChainOffset = 0;
pScsiReq->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
} else {
pScsiReq->TargetID = io->id;
pScsiReq->Bus = io->channel;
pScsiReq->ChainOffset = 0;
pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
}
pScsiReq->CDBLength = cmdLen;
pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
pScsiReq->Reserved = 0;
pScsiReq->MsgFlags = mpt_msg_flags(ioc);
/* MsgContext set in mpt_get_msg_fram call */
int_to_scsilun(io->lun, (struct scsi_lun *)pScsiReq->LUN);
if (io->flags & MPT_ICFLAG_TAGGED_CMD)
pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_SIMPLEQ);
else
pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
if (cmd == REQUEST_SENSE) {
pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: Untagged! 0x%02x\n", ioc->name, __func__, cmd));
}
for (ii = 0; ii < 16; ii++)
pScsiReq->CDB[ii] = CDB[ii];
pScsiReq->DataLength = cpu_to_le32(io->size);
pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma
+ (my_idx * MPT_SENSE_BUFFER_ALLOC));
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: Sending Command 0x%02x for fw_channel=%d fw_id=%d lun=%d\n",
ioc->name, __func__, cmd, io->channel, io->id, io->lun));
if (dir == MPI_SCSIIO_CONTROL_READ)
ioc->add_sge((char *) &pScsiReq->SGL,
MPT_SGE_FLAGS_SSIMPLE_READ | io->size, io->data_dma);
else
ioc->add_sge((char *) &pScsiReq->SGL,
MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size, io->data_dma);
INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done,
timeout*HZ);
if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = MPT_SCANDV_DID_RESET;
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: TIMED OUT for cmd=0x%02x\n", ioc->name, __func__,
cmd));
if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
mpt_free_msg_frame(ioc, mf);
goto out;
}
if (!timeleft) {
printk(MYIOC_s_WARN_FMT
"Issuing Reset from %s!! doorbell=0x%08xh"
" cmd=0x%02x\n",
ioc->name, __func__, mpt_GetIocState(ioc, 0),
cmd);
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
goto out;
}
ret = ioc->internal_cmds.completion_code;
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: success, rc=0x%02x\n",
ioc->name, __func__, ret));
out:
CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
mutex_unlock(&ioc->internal_cmds.mutex);
return ret;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks.
* @hd: Pointer to a SCSI HOST structure
* @vdevice: virtual target device
*
* Uses the ISR, but with special processing.
* MUST be single-threaded.
*
*/
static void
mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
{
INTERNAL_CMD iocmd;
/* Ignore hidden raid components, this is handled when the command
* is sent to the volume
*/
if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
return;
if (vdevice->vtarget->type != TYPE_DISK || vdevice->vtarget->deleted ||
!vdevice->configured_lun)
return;
/* Following parameters will not change
* in this routine.
*/
iocmd.cmd = SYNCHRONIZE_CACHE;
iocmd.flags = 0;
iocmd.physDiskNum = -1;
iocmd.data = NULL;
iocmd.data_dma = -1;
iocmd.size = 0;
iocmd.rsvd = iocmd.rsvd2 = 0;
iocmd.channel = vdevice->vtarget->channel;
iocmd.id = vdevice->vtarget->id;
iocmd.lun = vdevice->lun;
mptscsih_do_cmd(hd, &iocmd);
}
static ssize_t
mptscsih_version_fw_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
(ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
(ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
(ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
ioc->facts.FWVersion.Word & 0x000000FF);
}
static DEVICE_ATTR(version_fw, S_IRUGO, mptscsih_version_fw_show, NULL);
static ssize_t
mptscsih_version_bios_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
(ioc->biosVersion & 0xFF000000) >> 24,
(ioc->biosVersion & 0x00FF0000) >> 16,
(ioc->biosVersion & 0x0000FF00) >> 8,
ioc->biosVersion & 0x000000FF);
}
static DEVICE_ATTR(version_bios, S_IRUGO, mptscsih_version_bios_show, NULL);
static ssize_t
mptscsih_version_mpi_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%03x\n", ioc->facts.MsgVersion);
}
static DEVICE_ATTR(version_mpi, S_IRUGO, mptscsih_version_mpi_show, NULL);
static ssize_t
mptscsih_version_product_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->prod_name);
}
static DEVICE_ATTR(version_product, S_IRUGO,
mptscsih_version_product_show, NULL);
static ssize_t
mptscsih_version_nvdata_persistent_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02xh\n",
ioc->nvdata_version_persistent);
}
static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
mptscsih_version_nvdata_persistent_show, NULL);
static ssize_t
mptscsih_version_nvdata_default_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02xh\n",ioc->nvdata_version_default);
}
static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
mptscsih_version_nvdata_default_show, NULL);
static ssize_t
mptscsih_board_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_name);
}
static DEVICE_ATTR(board_name, S_IRUGO, mptscsih_board_name_show, NULL);
static ssize_t
mptscsih_board_assembly_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_assembly);
}
static DEVICE_ATTR(board_assembly, S_IRUGO,
mptscsih_board_assembly_show, NULL);
static ssize_t
mptscsih_board_tracer_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_tracer);
}
static DEVICE_ATTR(board_tracer, S_IRUGO,
mptscsih_board_tracer_show, NULL);
static ssize_t
mptscsih_io_delay_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
}
static DEVICE_ATTR(io_delay, S_IRUGO,
mptscsih_io_delay_show, NULL);
static ssize_t
mptscsih_device_delay_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
}
static DEVICE_ATTR(device_delay, S_IRUGO,
mptscsih_device_delay_show, NULL);
static ssize_t
mptscsih_debug_level_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->debug_level);
}
static ssize_t
mptscsih_debug_level_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *host = class_to_shost(dev);
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
int val = 0;
if (sscanf(buf, "%x", &val) != 1)
return -EINVAL;
ioc->debug_level = val;
printk(MYIOC_s_INFO_FMT "debug_level=%08xh\n",
ioc->name, ioc->debug_level);
return strlen(buf);
}
static DEVICE_ATTR(debug_level, S_IRUGO | S_IWUSR,
mptscsih_debug_level_show, mptscsih_debug_level_store);
struct device_attribute *mptscsih_host_attrs[] = {
&dev_attr_version_fw,
&dev_attr_version_bios,
&dev_attr_version_mpi,
&dev_attr_version_product,
&dev_attr_version_nvdata_persistent,
&dev_attr_version_nvdata_default,
&dev_attr_board_name,
&dev_attr_board_assembly,
&dev_attr_board_tracer,
&dev_attr_io_delay,
&dev_attr_device_delay,
&dev_attr_debug_level,
NULL,
};
EXPORT_SYMBOL(mptscsih_host_attrs);
EXPORT_SYMBOL(mptscsih_remove);
EXPORT_SYMBOL(mptscsih_shutdown);
#ifdef CONFIG_PM
EXPORT_SYMBOL(mptscsih_suspend);
EXPORT_SYMBOL(mptscsih_resume);
#endif
EXPORT_SYMBOL(mptscsih_proc_info);
EXPORT_SYMBOL(mptscsih_info);
EXPORT_SYMBOL(mptscsih_qcmd);
EXPORT_SYMBOL(mptscsih_slave_destroy);
EXPORT_SYMBOL(mptscsih_slave_configure);
EXPORT_SYMBOL(mptscsih_abort);
EXPORT_SYMBOL(mptscsih_dev_reset);
EXPORT_SYMBOL(mptscsih_bus_reset);
EXPORT_SYMBOL(mptscsih_host_reset);
EXPORT_SYMBOL(mptscsih_bios_param);
EXPORT_SYMBOL(mptscsih_io_done);
EXPORT_SYMBOL(mptscsih_taskmgmt_complete);
EXPORT_SYMBOL(mptscsih_scandv_complete);
EXPORT_SYMBOL(mptscsih_event_process);
EXPORT_SYMBOL(mptscsih_ioc_reset);
EXPORT_SYMBOL(mptscsih_change_queue_depth);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
| gpl-2.0 |
lenovo-a3-dev/kernel_lenovo_a3 | drivers/staging/rtl8192e/rtllib_module.c | 5238 | 7269 | /*******************************************************************************
Copyright(c) 2004 Intel Corporation. All rights reserved.
Portions of this file are based on the WEP enablement code provided by the
Host AP project hostap-drivers v0.1.3
Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
<jkmaline@cc.hut.fi>
Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
This program is free software; you can redistribute it and/or modify it
under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59
Temple Place - Suite 330, Boston, MA 02111-1307, USA.
The full GNU General Public License is included in this distribution in the
file called LICENSE.
Contact Information:
James P. Ketrenos <ipw2100-admin@linux.intel.com>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
#include <net/arp.h>
#include "rtllib.h"
u32 rt_global_debug_component = COMP_ERR;
EXPORT_SYMBOL(rt_global_debug_component);
void _setup_timer(struct timer_list *ptimer, void *fun, unsigned long data)
{
ptimer->function = fun;
ptimer->data = data;
init_timer(ptimer);
}
static inline int rtllib_networks_allocate(struct rtllib_device *ieee)
{
if (ieee->networks)
return 0;
ieee->networks = kzalloc(
MAX_NETWORK_COUNT * sizeof(struct rtllib_network),
GFP_KERNEL);
if (!ieee->networks) {
printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
ieee->dev->name);
return -ENOMEM;
}
return 0;
}
static inline void rtllib_networks_free(struct rtllib_device *ieee)
{
if (!ieee->networks)
return;
kfree(ieee->networks);
ieee->networks = NULL;
}
static inline void rtllib_networks_initialize(struct rtllib_device *ieee)
{
int i;
INIT_LIST_HEAD(&ieee->network_free_list);
INIT_LIST_HEAD(&ieee->network_list);
for (i = 0; i < MAX_NETWORK_COUNT; i++)
list_add_tail(&ieee->networks[i].list,
&ieee->network_free_list);
}
struct net_device *alloc_rtllib(int sizeof_priv)
{
struct rtllib_device *ieee = NULL;
struct net_device *dev;
int i, err;
RTLLIB_DEBUG_INFO("Initializing...\n");
dev = alloc_etherdev(sizeof(struct rtllib_device) + sizeof_priv);
if (!dev) {
RTLLIB_ERROR("Unable to network device.\n");
goto failed;
}
ieee = (struct rtllib_device *)netdev_priv_rsl(dev);
memset(ieee, 0, sizeof(struct rtllib_device)+sizeof_priv);
ieee->dev = dev;
err = rtllib_networks_allocate(ieee);
if (err) {
RTLLIB_ERROR("Unable to allocate beacon storage: %d\n",
err);
goto failed;
}
rtllib_networks_initialize(ieee);
/* Default fragmentation threshold is maximum payload size */
ieee->fts = DEFAULT_FTS;
ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
ieee->open_wep = 1;
/* Default to enabling full open WEP with host based encrypt/decrypt */
ieee->host_encrypt = 1;
ieee->host_decrypt = 1;
ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
ieee->rtllib_ap_sec_type = rtllib_ap_sec_type;
spin_lock_init(&ieee->lock);
spin_lock_init(&ieee->wpax_suitlist_lock);
spin_lock_init(&ieee->bw_spinlock);
spin_lock_init(&ieee->reorder_spinlock);
atomic_set(&(ieee->atm_chnlop), 0);
atomic_set(&(ieee->atm_swbw), 0);
/* SAM FIXME */
lib80211_crypt_info_init(&ieee->crypt_info, "RTLLIB", &ieee->lock);
ieee->bHalfNMode = false;
ieee->wpa_enabled = 0;
ieee->tkip_countermeasures = 0;
ieee->drop_unencrypted = 0;
ieee->privacy_invoked = 0;
ieee->ieee802_1x = 1;
ieee->raw_tx = 0;
ieee->hwsec_active = 0;
memset(ieee->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
rtllib_softmac_init(ieee);
ieee->pHTInfo = kzalloc(sizeof(struct rt_hi_throughput), GFP_KERNEL);
if (ieee->pHTInfo == NULL) {
RTLLIB_DEBUG(RTLLIB_DL_ERR, "can't alloc memory for HTInfo\n");
return NULL;
}
HTUpdateDefaultSetting(ieee);
HTInitializeHTInfo(ieee);
TSInitialize(ieee);
for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++)
INIT_LIST_HEAD(&ieee->ibss_mac_hash[i]);
for (i = 0; i < 17; i++) {
ieee->last_rxseq_num[i] = -1;
ieee->last_rxfrag_num[i] = -1;
ieee->last_packet_time[i] = 0;
}
return dev;
failed:
if (dev)
free_netdev(dev);
return NULL;
}
EXPORT_SYMBOL(alloc_rtllib);
void free_rtllib(struct net_device *dev)
{
struct rtllib_device *ieee = (struct rtllib_device *)
netdev_priv_rsl(dev);
kfree(ieee->pHTInfo);
ieee->pHTInfo = NULL;
rtllib_softmac_free(ieee);
lib80211_crypt_info_free(&ieee->crypt_info);
rtllib_networks_free(ieee);
free_netdev(dev);
}
EXPORT_SYMBOL(free_rtllib);
u32 rtllib_debug_level;
static int debug = \
RTLLIB_DL_ERR
;
static struct proc_dir_entry *rtllib_proc;
static int show_debug_level(char *page, char **start, off_t offset,
int count, int *eof, void *data)
{
return snprintf(page, count, "0x%08X\n", rtllib_debug_level);
}
static int store_debug_level(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
char buf[] = "0x00000000";
unsigned long len = min((unsigned long)sizeof(buf) - 1, count);
char *p = (char *)buf;
unsigned long val;
if (copy_from_user(buf, buffer, len))
return count;
buf[len] = 0;
if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
p++;
if (p[0] == 'x' || p[0] == 'X')
p++;
val = simple_strtoul(p, &p, 16);
} else
val = simple_strtoul(p, &p, 10);
if (p == buf)
printk(KERN_INFO DRV_NAME
": %s is not in hex or decimal form.\n", buf);
else
rtllib_debug_level = val;
return strnlen(buf, count);
}
int __init rtllib_init(void)
{
struct proc_dir_entry *e;
rtllib_debug_level = debug;
rtllib_proc = create_proc_entry(DRV_NAME, S_IFDIR, init_net.proc_net);
if (rtllib_proc == NULL) {
RTLLIB_ERROR("Unable to create " DRV_NAME
" proc directory\n");
return -EIO;
}
e = create_proc_entry("debug_level", S_IFREG | S_IRUGO | S_IWUSR,
rtllib_proc);
if (!e) {
remove_proc_entry(DRV_NAME, init_net.proc_net);
rtllib_proc = NULL;
return -EIO;
}
e->read_proc = show_debug_level;
e->write_proc = store_debug_level;
e->data = NULL;
return 0;
}
void __exit rtllib_exit(void)
{
if (rtllib_proc) {
remove_proc_entry("debug_level", rtllib_proc);
remove_proc_entry(DRV_NAME, init_net.proc_net);
rtllib_proc = NULL;
}
}
module_init(rtllib_init);
module_exit(rtllib_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
updateing/kernel-msm | drivers/s390/block/dcssblk.c | 5238 | 27413 | /*
* dcssblk.c -- the S/390 block driver for dcss memory
*
* Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
*/
#define KMSG_COMPONENT "dcssblk"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <asm/extmem.h>
#include <asm/io.h>
#define DCSSBLK_NAME "dcssblk"
#define DCSSBLK_MINORS_PER_DISK 1
#define DCSSBLK_PARM_LEN 400
#define DCSS_BUS_ID_SIZE 20
static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static int dcssblk_release(struct gendisk *disk, fmode_t mode);
static void dcssblk_make_request(struct request_queue *q, struct bio *bio);
static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
void **kaddr, unsigned long *pfn);
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
static int dcssblk_major;
static const struct block_device_operations dcssblk_devops = {
.owner = THIS_MODULE,
.open = dcssblk_open,
.release = dcssblk_release,
.direct_access = dcssblk_direct_access,
};
struct dcssblk_dev_info {
struct list_head lh;
struct device dev;
char segment_name[DCSS_BUS_ID_SIZE];
atomic_t use_count;
struct gendisk *gd;
unsigned long start;
unsigned long end;
int segment_type;
unsigned char save_pending;
unsigned char is_shared;
struct request_queue *dcssblk_queue;
int num_of_segments;
struct list_head seg_list;
};
struct segment_info {
struct list_head lh;
char segment_name[DCSS_BUS_ID_SIZE];
unsigned long start;
unsigned long end;
int segment_type;
};
static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf,
size_t count);
static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf,
size_t count);
static ssize_t dcssblk_save_store(struct device * dev, struct device_attribute *attr, const char * buf,
size_t count);
static ssize_t dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf);
static ssize_t dcssblk_shared_store(struct device * dev, struct device_attribute *attr, const char * buf,
size_t count);
static ssize_t dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf);
static ssize_t dcssblk_seglist_show(struct device *dev,
struct device_attribute *attr,
char *buf);
static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show,
dcssblk_save_store);
static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show,
dcssblk_shared_store);
static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL);
static struct device *dcssblk_root_dev;
static LIST_HEAD(dcssblk_devices);
static struct rw_semaphore dcssblk_devices_sem;
/*
* release function for segment device.
*/
static void
dcssblk_release_segment(struct device *dev)
{
struct dcssblk_dev_info *dev_info;
struct segment_info *entry, *temp;
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) {
list_del(&entry->lh);
kfree(entry);
}
kfree(dev_info);
module_put(THIS_MODULE);
}
/*
* get a minor number. needs to be called with
* down_write(&dcssblk_devices_sem) and the
* device needs to be enqueued before the semaphore is
* freed.
*/
static int
dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
{
int minor, found;
struct dcssblk_dev_info *entry;
if (dev_info == NULL)
return -EINVAL;
for (minor = 0; minor < (1<<MINORBITS); minor++) {
found = 0;
// test if minor available
list_for_each_entry(entry, &dcssblk_devices, lh)
if (minor == entry->gd->first_minor)
found++;
if (!found) break; // got unused minor
}
if (found)
return -EBUSY;
dev_info->gd->first_minor = minor;
return 0;
}
/*
* get the struct dcssblk_dev_info from dcssblk_devices
* for the given name.
* down_read(&dcssblk_devices_sem) must be held.
*/
static struct dcssblk_dev_info *
dcssblk_get_device_by_name(char *name)
{
struct dcssblk_dev_info *entry;
list_for_each_entry(entry, &dcssblk_devices, lh) {
if (!strcmp(name, entry->segment_name)) {
return entry;
}
}
return NULL;
}
/*
* get the struct segment_info from seg_list
* for the given name.
* down_read(&dcssblk_devices_sem) must be held.
*/
static struct segment_info *
dcssblk_get_segment_by_name(char *name)
{
struct dcssblk_dev_info *dev_info;
struct segment_info *entry;
list_for_each_entry(dev_info, &dcssblk_devices, lh) {
list_for_each_entry(entry, &dev_info->seg_list, lh) {
if (!strcmp(name, entry->segment_name))
return entry;
}
}
return NULL;
}
/*
* get the highest address of the multi-segment block.
*/
static unsigned long
dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info)
{
unsigned long highest_addr;
struct segment_info *entry;
highest_addr = 0;
list_for_each_entry(entry, &dev_info->seg_list, lh) {
if (highest_addr < entry->end)
highest_addr = entry->end;
}
return highest_addr;
}
/*
* get the lowest address of the multi-segment block.
*/
static unsigned long
dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info)
{
int set_first;
unsigned long lowest_addr;
struct segment_info *entry;
set_first = 0;
lowest_addr = 0;
list_for_each_entry(entry, &dev_info->seg_list, lh) {
if (set_first == 0) {
lowest_addr = entry->start;
set_first = 1;
} else {
if (lowest_addr > entry->start)
lowest_addr = entry->start;
}
}
return lowest_addr;
}
/*
* Check continuity of segments.
*/
static int
dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
{
int i, j, rc;
struct segment_info *sort_list, *entry, temp;
if (dev_info->num_of_segments <= 1)
return 0;
sort_list = kzalloc(
sizeof(struct segment_info) * dev_info->num_of_segments,
GFP_KERNEL);
if (sort_list == NULL)
return -ENOMEM;
i = 0;
list_for_each_entry(entry, &dev_info->seg_list, lh) {
memcpy(&sort_list[i], entry, sizeof(struct segment_info));
i++;
}
/* sort segments */
for (i = 0; i < dev_info->num_of_segments; i++)
for (j = 0; j < dev_info->num_of_segments; j++)
if (sort_list[j].start > sort_list[i].start) {
memcpy(&temp, &sort_list[i],
sizeof(struct segment_info));
memcpy(&sort_list[i], &sort_list[j],
sizeof(struct segment_info));
memcpy(&sort_list[j], &temp,
sizeof(struct segment_info));
}
/* check continuity */
for (i = 0; i < dev_info->num_of_segments - 1; i++) {
if ((sort_list[i].end + 1) != sort_list[i+1].start) {
pr_err("Adjacent DCSSs %s and %s are not "
"contiguous\n", sort_list[i].segment_name,
sort_list[i+1].segment_name);
rc = -EINVAL;
goto out;
}
/* EN and EW are allowed in a block device */
if (sort_list[i].segment_type != sort_list[i+1].segment_type) {
if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) ||
(sort_list[i].segment_type == SEG_TYPE_ER) ||
!(sort_list[i+1].segment_type &
SEGMENT_EXCLUSIVE) ||
(sort_list[i+1].segment_type == SEG_TYPE_ER)) {
pr_err("DCSS %s and DCSS %s have "
"incompatible types\n",
sort_list[i].segment_name,
sort_list[i+1].segment_name);
rc = -EINVAL;
goto out;
}
}
}
rc = 0;
out:
kfree(sort_list);
return rc;
}
/*
* Load a segment
*/
static int
dcssblk_load_segment(char *name, struct segment_info **seg_info)
{
int rc;
/* already loaded? */
down_read(&dcssblk_devices_sem);
*seg_info = dcssblk_get_segment_by_name(name);
up_read(&dcssblk_devices_sem);
if (*seg_info != NULL)
return -EEXIST;
/* get a struct segment_info */
*seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL);
if (*seg_info == NULL)
return -ENOMEM;
strcpy((*seg_info)->segment_name, name);
/* load the segment */
rc = segment_load(name, SEGMENT_SHARED,
&(*seg_info)->start, &(*seg_info)->end);
if (rc < 0) {
segment_warning(rc, (*seg_info)->segment_name);
kfree(*seg_info);
} else {
INIT_LIST_HEAD(&(*seg_info)->lh);
(*seg_info)->segment_type = rc;
}
return rc;
}
static void dcssblk_unregister_callback(struct device *dev)
{
device_unregister(dev);
put_device(dev);
}
/*
* device attribute for switching shared/nonshared (exclusive)
* operation (show + store)
*/
static ssize_t
dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dcssblk_dev_info *dev_info;
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n");
}
static ssize_t
dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
{
struct dcssblk_dev_info *dev_info;
struct segment_info *entry, *temp;
int rc;
if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
return -EINVAL;
down_write(&dcssblk_devices_sem);
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
if (atomic_read(&dev_info->use_count)) {
rc = -EBUSY;
goto out;
}
if (inbuf[0] == '1') {
/* reload segments in shared mode */
list_for_each_entry(entry, &dev_info->seg_list, lh) {
rc = segment_modify_shared(entry->segment_name,
SEGMENT_SHARED);
if (rc < 0) {
BUG_ON(rc == -EINVAL);
if (rc != -EAGAIN)
goto removeseg;
}
}
dev_info->is_shared = 1;
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
case SEG_TYPE_ER:
case SEG_TYPE_SC:
set_disk_ro(dev_info->gd, 1);
}
} else if (inbuf[0] == '0') {
/* reload segments in exclusive mode */
if (dev_info->segment_type == SEG_TYPE_SC) {
pr_err("DCSS %s is of type SC and cannot be "
"loaded as exclusive-writable\n",
dev_info->segment_name);
rc = -EINVAL;
goto out;
}
list_for_each_entry(entry, &dev_info->seg_list, lh) {
rc = segment_modify_shared(entry->segment_name,
SEGMENT_EXCLUSIVE);
if (rc < 0) {
BUG_ON(rc == -EINVAL);
if (rc != -EAGAIN)
goto removeseg;
}
}
dev_info->is_shared = 0;
set_disk_ro(dev_info->gd, 0);
} else {
rc = -EINVAL;
goto out;
}
rc = count;
goto out;
removeseg:
pr_err("DCSS device %s is removed after a failed access mode "
"change\n", dev_info->segment_name);
temp = entry;
list_for_each_entry(entry, &dev_info->seg_list, lh) {
if (entry != temp)
segment_unload(entry->segment_name);
}
list_del(&dev_info->lh);
del_gendisk(dev_info->gd);
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
rc = device_schedule_callback(dev, dcssblk_unregister_callback);
out:
up_write(&dcssblk_devices_sem);
return rc;
}
/*
* device attribute for save operation on current copy
* of the segment. If the segment is busy, saving will
* become pending until it gets released, which can be
* undone by storing a non-true value to this entry.
* (show + store)
*/
static ssize_t
dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dcssblk_dev_info *dev_info;
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n");
}
static ssize_t
dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
{
struct dcssblk_dev_info *dev_info;
struct segment_info *entry;
if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
return -EINVAL;
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
down_write(&dcssblk_devices_sem);
if (inbuf[0] == '1') {
if (atomic_read(&dev_info->use_count) == 0) {
// device is idle => we save immediately
pr_info("All DCSSs that map to device %s are "
"saved\n", dev_info->segment_name);
list_for_each_entry(entry, &dev_info->seg_list, lh) {
segment_save(entry->segment_name);
}
} else {
// device is busy => we save it when it becomes
// idle in dcssblk_release
pr_info("Device %s is in use, its DCSSs will be "
"saved when it becomes idle\n",
dev_info->segment_name);
dev_info->save_pending = 1;
}
} else if (inbuf[0] == '0') {
if (dev_info->save_pending) {
// device is busy & the user wants to undo his save
// request
dev_info->save_pending = 0;
pr_info("A pending save request for device %s "
"has been canceled\n",
dev_info->segment_name);
}
} else {
up_write(&dcssblk_devices_sem);
return -EINVAL;
}
up_write(&dcssblk_devices_sem);
return count;
}
/*
* device attribute for showing all segments in a device
*/
static ssize_t
dcssblk_seglist_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int i;
struct dcssblk_dev_info *dev_info;
struct segment_info *entry;
down_read(&dcssblk_devices_sem);
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
i = 0;
buf[0] = '\0';
list_for_each_entry(entry, &dev_info->seg_list, lh) {
strcpy(&buf[i], entry->segment_name);
i += strlen(entry->segment_name);
buf[i] = '\n';
i++;
}
up_read(&dcssblk_devices_sem);
return i;
}
/*
* device attribute for adding devices
*/
static ssize_t
dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
int rc, i, j, num_of_segments;
struct dcssblk_dev_info *dev_info;
struct segment_info *seg_info, *temp;
char *local_buf;
unsigned long seg_byte_size;
dev_info = NULL;
seg_info = NULL;
if (dev != dcssblk_root_dev) {
rc = -EINVAL;
goto out_nobuf;
}
if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) {
rc = -ENAMETOOLONG;
goto out_nobuf;
}
local_buf = kmalloc(count + 1, GFP_KERNEL);
if (local_buf == NULL) {
rc = -ENOMEM;
goto out_nobuf;
}
/*
* parse input
*/
num_of_segments = 0;
for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) {
for (j = i; (buf[j] != ':') &&
(buf[j] != '\0') &&
(buf[j] != '\n') &&
j < count; j++) {
local_buf[j-i] = toupper(buf[j]);
}
local_buf[j-i] = '\0';
if (((j - i) == 0) || ((j - i) > 8)) {
rc = -ENAMETOOLONG;
goto seg_list_del;
}
rc = dcssblk_load_segment(local_buf, &seg_info);
if (rc < 0)
goto seg_list_del;
/*
* get a struct dcssblk_dev_info
*/
if (num_of_segments == 0) {
dev_info = kzalloc(sizeof(struct dcssblk_dev_info),
GFP_KERNEL);
if (dev_info == NULL) {
rc = -ENOMEM;
goto out;
}
strcpy(dev_info->segment_name, local_buf);
dev_info->segment_type = seg_info->segment_type;
INIT_LIST_HEAD(&dev_info->seg_list);
}
list_add_tail(&seg_info->lh, &dev_info->seg_list);
num_of_segments++;
i = j;
if ((buf[j] == '\0') || (buf[j] == '\n'))
break;
}
/* no trailing colon at the end of the input */
if ((i > 0) && (buf[i-1] == ':')) {
rc = -ENAMETOOLONG;
goto seg_list_del;
}
strlcpy(local_buf, buf, i + 1);
dev_info->num_of_segments = num_of_segments;
rc = dcssblk_is_continuous(dev_info);
if (rc < 0)
goto seg_list_del;
dev_info->start = dcssblk_find_lowest_addr(dev_info);
dev_info->end = dcssblk_find_highest_addr(dev_info);
dev_set_name(&dev_info->dev, dev_info->segment_name);
dev_info->dev.release = dcssblk_release_segment;
INIT_LIST_HEAD(&dev_info->lh);
dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK);
if (dev_info->gd == NULL) {
rc = -ENOMEM;
goto seg_list_del;
}
dev_info->gd->major = dcssblk_major;
dev_info->gd->fops = &dcssblk_devops;
dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL);
dev_info->gd->queue = dev_info->dcssblk_queue;
dev_info->gd->private_data = dev_info;
dev_info->gd->driverfs_dev = &dev_info->dev;
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
pr_info("Loaded %s with total size %lu bytes and capacity %lu "
"sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9);
dev_info->save_pending = 0;
dev_info->is_shared = 1;
dev_info->dev.parent = dcssblk_root_dev;
/*
*get minor, add to list
*/
down_write(&dcssblk_devices_sem);
if (dcssblk_get_segment_by_name(local_buf)) {
rc = -EEXIST;
goto release_gd;
}
rc = dcssblk_assign_free_minor(dev_info);
if (rc)
goto release_gd;
sprintf(dev_info->gd->disk_name, "dcssblk%d",
dev_info->gd->first_minor);
list_add_tail(&dev_info->lh, &dcssblk_devices);
if (!try_module_get(THIS_MODULE)) {
rc = -ENODEV;
goto dev_list_del;
}
/*
* register the device
*/
rc = device_register(&dev_info->dev);
if (rc) {
module_put(THIS_MODULE);
goto dev_list_del;
}
get_device(&dev_info->dev);
rc = device_create_file(&dev_info->dev, &dev_attr_shared);
if (rc)
goto unregister_dev;
rc = device_create_file(&dev_info->dev, &dev_attr_save);
if (rc)
goto unregister_dev;
rc = device_create_file(&dev_info->dev, &dev_attr_seglist);
if (rc)
goto unregister_dev;
add_disk(dev_info->gd);
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
case SEG_TYPE_ER:
case SEG_TYPE_SC:
set_disk_ro(dev_info->gd,1);
break;
default:
set_disk_ro(dev_info->gd,0);
break;
}
up_write(&dcssblk_devices_sem);
rc = count;
goto out;
unregister_dev:
list_del(&dev_info->lh);
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
device_unregister(&dev_info->dev);
list_for_each_entry(seg_info, &dev_info->seg_list, lh) {
segment_unload(seg_info->segment_name);
}
put_device(&dev_info->dev);
up_write(&dcssblk_devices_sem);
goto out;
dev_list_del:
list_del(&dev_info->lh);
release_gd:
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
up_write(&dcssblk_devices_sem);
seg_list_del:
if (dev_info == NULL)
goto out;
list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) {
list_del(&seg_info->lh);
segment_unload(seg_info->segment_name);
kfree(seg_info);
}
kfree(dev_info);
out:
kfree(local_buf);
out_nobuf:
return rc;
}
/*
* device attribute for removing devices
*/
static ssize_t
dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct dcssblk_dev_info *dev_info;
struct segment_info *entry;
int rc, i;
char *local_buf;
if (dev != dcssblk_root_dev) {
return -EINVAL;
}
local_buf = kmalloc(count + 1, GFP_KERNEL);
if (local_buf == NULL) {
return -ENOMEM;
}
/*
* parse input
*/
for (i = 0; ((*(buf+i)!='\0') && (*(buf+i)!='\n') && i < count); i++) {
local_buf[i] = toupper(buf[i]);
}
local_buf[i] = '\0';
if ((i == 0) || (i > 8)) {
rc = -ENAMETOOLONG;
goto out_buf;
}
down_write(&dcssblk_devices_sem);
dev_info = dcssblk_get_device_by_name(local_buf);
if (dev_info == NULL) {
up_write(&dcssblk_devices_sem);
pr_warning("Device %s cannot be removed because it is not a "
"known device\n", local_buf);
rc = -ENODEV;
goto out_buf;
}
if (atomic_read(&dev_info->use_count) != 0) {
up_write(&dcssblk_devices_sem);
pr_warning("Device %s cannot be removed while it is in "
"use\n", local_buf);
rc = -EBUSY;
goto out_buf;
}
list_del(&dev_info->lh);
del_gendisk(dev_info->gd);
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
device_unregister(&dev_info->dev);
/* unload all related segments */
list_for_each_entry(entry, &dev_info->seg_list, lh)
segment_unload(entry->segment_name);
put_device(&dev_info->dev);
up_write(&dcssblk_devices_sem);
rc = count;
out_buf:
kfree(local_buf);
return rc;
}
static int
dcssblk_open(struct block_device *bdev, fmode_t mode)
{
struct dcssblk_dev_info *dev_info;
int rc;
dev_info = bdev->bd_disk->private_data;
if (NULL == dev_info) {
rc = -ENODEV;
goto out;
}
atomic_inc(&dev_info->use_count);
bdev->bd_block_size = 4096;
rc = 0;
out:
return rc;
}
static int
dcssblk_release(struct gendisk *disk, fmode_t mode)
{
struct dcssblk_dev_info *dev_info = disk->private_data;
struct segment_info *entry;
int rc;
if (!dev_info) {
rc = -ENODEV;
goto out;
}
down_write(&dcssblk_devices_sem);
if (atomic_dec_and_test(&dev_info->use_count)
&& (dev_info->save_pending)) {
pr_info("Device %s has become idle and is being saved "
"now\n", dev_info->segment_name);
list_for_each_entry(entry, &dev_info->seg_list, lh) {
segment_save(entry->segment_name);
}
dev_info->save_pending = 0;
}
up_write(&dcssblk_devices_sem);
rc = 0;
out:
return rc;
}
static void
dcssblk_make_request(struct request_queue *q, struct bio *bio)
{
struct dcssblk_dev_info *dev_info;
struct bio_vec *bvec;
unsigned long index;
unsigned long page_addr;
unsigned long source_addr;
unsigned long bytes_done;
int i;
bytes_done = 0;
dev_info = bio->bi_bdev->bd_disk->private_data;
if (dev_info == NULL)
goto fail;
if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
if (((bio->bi_size >> 9) + bio->bi_sector)
> get_capacity(bio->bi_bdev->bd_disk)) {
/* Request beyond end of DCSS segment. */
goto fail;
}
/* verify data transfer direction */
if (dev_info->is_shared) {
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
case SEG_TYPE_ER:
case SEG_TYPE_SC:
/* cannot write to these segments */
if (bio_data_dir(bio) == WRITE) {
pr_warning("Writing to %s failed because it "
"is a read-only device\n",
dev_name(&dev_info->dev));
goto fail;
}
}
}
index = (bio->bi_sector >> 3);
bio_for_each_segment(bvec, bio, i) {
page_addr = (unsigned long)
page_address(bvec->bv_page) + bvec->bv_offset;
source_addr = dev_info->start + (index<<12) + bytes_done;
if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0)
// More paranoia.
goto fail;
if (bio_data_dir(bio) == READ) {
memcpy((void*)page_addr, (void*)source_addr,
bvec->bv_len);
} else {
memcpy((void*)source_addr, (void*)page_addr,
bvec->bv_len);
}
bytes_done += bvec->bv_len;
}
bio_endio(bio, 0);
return;
fail:
bio_io_error(bio);
}
static int
dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
void **kaddr, unsigned long *pfn)
{
struct dcssblk_dev_info *dev_info;
unsigned long pgoff;
dev_info = bdev->bd_disk->private_data;
if (!dev_info)
return -ENODEV;
if (secnum % (PAGE_SIZE/512))
return -EINVAL;
pgoff = secnum / (PAGE_SIZE / 512);
if ((pgoff+1)*PAGE_SIZE-1 > dev_info->end - dev_info->start)
return -ERANGE;
*kaddr = (void *) (dev_info->start+pgoff*PAGE_SIZE);
*pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
return 0;
}
static void
dcssblk_check_params(void)
{
int rc, i, j, k;
char buf[DCSSBLK_PARM_LEN + 1];
struct dcssblk_dev_info *dev_info;
for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0');
i++) {
for (j = i; (dcssblk_segments[j] != ',') &&
(dcssblk_segments[j] != '\0') &&
(dcssblk_segments[j] != '(') &&
(j < DCSSBLK_PARM_LEN); j++)
{
buf[j-i] = dcssblk_segments[j];
}
buf[j-i] = '\0';
rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i);
if ((rc >= 0) && (dcssblk_segments[j] == '(')) {
for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++)
buf[k] = toupper(buf[k]);
buf[k] = '\0';
if (!strncmp(&dcssblk_segments[j], "(local)", 7)) {
down_read(&dcssblk_devices_sem);
dev_info = dcssblk_get_device_by_name(buf);
up_read(&dcssblk_devices_sem);
if (dev_info)
dcssblk_shared_store(&dev_info->dev,
NULL, "0\n", 2);
}
}
while ((dcssblk_segments[j] != ',') &&
(dcssblk_segments[j] != '\0'))
{
j++;
}
if (dcssblk_segments[j] == '\0')
break;
i = j;
}
}
/*
* Suspend / Resume
*/
static int dcssblk_freeze(struct device *dev)
{
struct dcssblk_dev_info *dev_info;
int rc = 0;
list_for_each_entry(dev_info, &dcssblk_devices, lh) {
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
case SEG_TYPE_ER:
case SEG_TYPE_SC:
if (!dev_info->is_shared)
rc = -EINVAL;
break;
default:
rc = -EINVAL;
break;
}
if (rc)
break;
}
if (rc)
pr_err("Suspending the system failed because DCSS device %s "
"is writable\n",
dev_info->segment_name);
return rc;
}
static int dcssblk_restore(struct device *dev)
{
struct dcssblk_dev_info *dev_info;
struct segment_info *entry;
unsigned long start, end;
int rc = 0;
list_for_each_entry(dev_info, &dcssblk_devices, lh) {
list_for_each_entry(entry, &dev_info->seg_list, lh) {
segment_unload(entry->segment_name);
rc = segment_load(entry->segment_name, SEGMENT_SHARED,
&start, &end);
if (rc < 0) {
// TODO in_use check ?
segment_warning(rc, entry->segment_name);
goto out_panic;
}
if (start != entry->start || end != entry->end) {
pr_err("The address range of DCSS %s changed "
"while the system was suspended\n",
entry->segment_name);
goto out_panic;
}
}
}
return 0;
out_panic:
panic("fatal dcssblk resume error\n");
}
static int dcssblk_thaw(struct device *dev)
{
return 0;
}
static const struct dev_pm_ops dcssblk_pm_ops = {
.freeze = dcssblk_freeze,
.thaw = dcssblk_thaw,
.restore = dcssblk_restore,
};
static struct platform_driver dcssblk_pdrv = {
.driver = {
.name = "dcssblk",
.owner = THIS_MODULE,
.pm = &dcssblk_pm_ops,
},
};
static struct platform_device *dcssblk_pdev;
/*
* The init/exit functions.
*/
static void __exit
dcssblk_exit(void)
{
platform_device_unregister(dcssblk_pdev);
platform_driver_unregister(&dcssblk_pdrv);
root_device_unregister(dcssblk_root_dev);
unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
}
static int __init
dcssblk_init(void)
{
int rc;
rc = platform_driver_register(&dcssblk_pdrv);
if (rc)
return rc;
dcssblk_pdev = platform_device_register_simple("dcssblk", -1, NULL,
0);
if (IS_ERR(dcssblk_pdev)) {
rc = PTR_ERR(dcssblk_pdev);
goto out_pdrv;
}
dcssblk_root_dev = root_device_register("dcssblk");
if (IS_ERR(dcssblk_root_dev)) {
rc = PTR_ERR(dcssblk_root_dev);
goto out_pdev;
}
rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
if (rc)
goto out_root;
rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
if (rc)
goto out_root;
rc = register_blkdev(0, DCSSBLK_NAME);
if (rc < 0)
goto out_root;
dcssblk_major = rc;
init_rwsem(&dcssblk_devices_sem);
dcssblk_check_params();
return 0;
out_root:
root_device_unregister(dcssblk_root_dev);
out_pdev:
platform_device_unregister(dcssblk_pdev);
out_pdrv:
platform_driver_unregister(&dcssblk_pdrv);
return rc;
}
module_init(dcssblk_init);
module_exit(dcssblk_exit);
module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444);
MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, "
"comma-separated list, names in each set separated "
"by commas are separated by colons, each set contains "
"names of contiguous segments and each name max. 8 chars.\n"
"Adding \"(local)\" to the end of each set equals echoing 0 "
"to /sys/devices/dcssblk/<device name>/shared after loading "
"the contiguous segments - \n"
"e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\"");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Nautical-Rom/android_kernel_moto_shamu | drivers/net/ethernet/cisco/enic/vnic_intr.c | 9590 | 2028 | /*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
void vnic_intr_free(struct vnic_intr *intr)
{
intr->ctrl = NULL;
}
int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index)
{
intr->index = index;
intr->vdev = vdev;
intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
if (!intr->ctrl) {
pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
return -EINVAL;
}
return 0;
}
void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion)
{
vnic_intr_coalescing_timer_set(intr, coalescing_timer);
iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
iowrite32(0, &intr->ctrl->int_credits);
}
void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
u32 coalescing_timer)
{
iowrite32(vnic_dev_intr_coal_timer_usec_to_hw(intr->vdev,
coalescing_timer), &intr->ctrl->coalescing_timer);
}
void vnic_intr_clean(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->int_credits);
}
| gpl-2.0 |
Andorreta/android_kernel_google_msm | fs/ocfs2/cluster/masklog.c | 11126 | 3894 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* Copyright (C) 2004, 2005 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <asm/uaccess.h>
#include "masklog.h"
struct mlog_bits mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK);
EXPORT_SYMBOL_GPL(mlog_and_bits);
struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(0);
EXPORT_SYMBOL_GPL(mlog_not_bits);
static ssize_t mlog_mask_show(u64 mask, char *buf)
{
char *state;
if (__mlog_test_u64(mask, mlog_and_bits))
state = "allow";
else if (__mlog_test_u64(mask, mlog_not_bits))
state = "deny";
else
state = "off";
return snprintf(buf, PAGE_SIZE, "%s\n", state);
}
static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count)
{
if (!strnicmp(buf, "allow", 5)) {
__mlog_set_u64(mask, mlog_and_bits);
__mlog_clear_u64(mask, mlog_not_bits);
} else if (!strnicmp(buf, "deny", 4)) {
__mlog_set_u64(mask, mlog_not_bits);
__mlog_clear_u64(mask, mlog_and_bits);
} else if (!strnicmp(buf, "off", 3)) {
__mlog_clear_u64(mask, mlog_not_bits);
__mlog_clear_u64(mask, mlog_and_bits);
} else
return -EINVAL;
return count;
}
struct mlog_attribute {
struct attribute attr;
u64 mask;
};
#define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr)
#define define_mask(_name) { \
.attr = { \
.name = #_name, \
.mode = S_IRUGO | S_IWUSR, \
}, \
.mask = ML_##_name, \
}
static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
define_mask(TCP),
define_mask(MSG),
define_mask(SOCKET),
define_mask(HEARTBEAT),
define_mask(HB_BIO),
define_mask(DLMFS),
define_mask(DLM),
define_mask(DLM_DOMAIN),
define_mask(DLM_THREAD),
define_mask(DLM_MASTER),
define_mask(DLM_RECOVERY),
define_mask(DLM_GLUE),
define_mask(VOTE),
define_mask(CONN),
define_mask(QUORUM),
define_mask(BASTS),
define_mask(CLUSTER),
define_mask(ERROR),
define_mask(NOTICE),
define_mask(KTHREAD),
};
static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
static ssize_t mlog_show(struct kobject *obj, struct attribute *attr,
char *buf)
{
struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
return mlog_mask_show(mlog_attr->mask, buf);
}
static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
const char *buf, size_t count)
{
struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
return mlog_mask_store(mlog_attr->mask, buf, count);
}
static const struct sysfs_ops mlog_attr_ops = {
.show = mlog_show,
.store = mlog_store,
};
static struct kobj_type mlog_ktype = {
.default_attrs = mlog_attr_ptrs,
.sysfs_ops = &mlog_attr_ops,
};
static struct kset mlog_kset = {
.kobj = {.ktype = &mlog_ktype},
};
int mlog_sys_init(struct kset *o2cb_kset)
{
int i = 0;
while (mlog_attrs[i].attr.mode) {
mlog_attr_ptrs[i] = &mlog_attrs[i].attr;
i++;
}
mlog_attr_ptrs[i] = NULL;
kobject_set_name(&mlog_kset.kobj, "logmask");
mlog_kset.kobj.kset = o2cb_kset;
return kset_register(&mlog_kset);
}
void mlog_sys_shutdown(void)
{
kset_unregister(&mlog_kset);
}
| gpl-2.0 |
linusw/linux-bfq | net/sched/sch_hhf.c | 119 | 21948 | /* net/sched/sch_hhf.c Heavy-Hitter Filter (HHF)
*
* Copyright (C) 2013 Terry Lam <vtlam@google.com>
* Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com>
*/
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <net/pkt_sched.h>
#include <net/sock.h>
/* Heavy-Hitter Filter (HHF)
*
* Principles :
* Flows are classified into two buckets: non-heavy-hitter and heavy-hitter
* buckets. Initially, a new flow starts as non-heavy-hitter. Once classified
* as heavy-hitter, it is immediately switched to the heavy-hitter bucket.
* The buckets are dequeued by a Weighted Deficit Round Robin (WDRR) scheduler,
* in which the heavy-hitter bucket is served with less weight.
* In other words, non-heavy-hitters (e.g., short bursts of critical traffic)
* are isolated from heavy-hitters (e.g., persistent bulk traffic) and also have
* higher share of bandwidth.
*
* To capture heavy-hitters, we use the "multi-stage filter" algorithm in the
* following paper:
* [EV02] C. Estan and G. Varghese, "New Directions in Traffic Measurement and
* Accounting", in ACM SIGCOMM, 2002.
*
* Conceptually, a multi-stage filter comprises k independent hash functions
* and k counter arrays. Packets are indexed into k counter arrays by k hash
* functions, respectively. The counters are then increased by the packet sizes.
* Therefore,
* - For a heavy-hitter flow: *all* of its k array counters must be large.
* - For a non-heavy-hitter flow: some of its k array counters can be large
* due to hash collision with other small flows; however, with high
* probability, not *all* k counters are large.
*
* By the design of the multi-stage filter algorithm, the false negative rate
* (heavy-hitters getting away uncaptured) is zero. However, the algorithm is
* susceptible to false positives (non-heavy-hitters mistakenly classified as
* heavy-hitters).
* Therefore, we also implement the following optimizations to reduce false
* positives by avoiding unnecessary increment of the counter values:
* - Optimization O1: once a heavy-hitter is identified, its bytes are not
* accounted in the array counters. This technique is called "shielding"
* in Section 3.3.1 of [EV02].
* - Optimization O2: conservative update of counters
* (Section 3.3.2 of [EV02]),
* New counter value = max {old counter value,
* smallest counter value + packet bytes}
*
* Finally, we refresh the counters periodically since otherwise the counter
* values will keep accumulating.
*
* Once a flow is classified as heavy-hitter, we also save its per-flow state
* in an exact-matching flow table so that its subsequent packets can be
* dispatched to the heavy-hitter bucket accordingly.
*
*
* At a high level, this qdisc works as follows:
* Given a packet p:
* - If the flow-id of p (e.g., TCP 5-tuple) is already in the exact-matching
* heavy-hitter flow table, denoted table T, then send p to the heavy-hitter
* bucket.
* - Otherwise, forward p to the multi-stage filter, denoted filter F
* + If F decides that p belongs to a non-heavy-hitter flow, then send p
* to the non-heavy-hitter bucket.
* + Otherwise, if F decides that p belongs to a new heavy-hitter flow,
* then set up a new flow entry for the flow-id of p in the table T and
* send p to the heavy-hitter bucket.
*
* In this implementation:
* - T is a fixed-size hash-table with 1024 entries. Hash collision is
* resolved by linked-list chaining.
* - F has four counter arrays, each array containing 1024 32-bit counters.
* That means 4 * 1024 * 32 bits = 16KB of memory.
* - Since each array in F contains 1024 counters, 10 bits are sufficient to
* index into each array.
* Hence, instead of having four hash functions, we chop the 32-bit
* skb-hash into three 10-bit chunks, and the remaining 10-bit chunk is
* computed as XOR sum of those three chunks.
* - We need to clear the counter arrays periodically; however, directly
* memsetting 16KB of memory can lead to cache eviction and unwanted delay.
* So by representing each counter by a valid bit, we only need to reset
* 4K of 1 bit (i.e. 512 bytes) instead of 16KB of memory.
* - The Deficit Round Robin engine is taken from fq_codel implementation
* (net/sched/sch_fq_codel.c). Note that wdrr_bucket corresponds to
* fq_codel_flow in fq_codel implementation.
*
*/
/* Non-configurable parameters */
#define HH_FLOWS_CNT 1024 /* number of entries in exact-matching table T */
#define HHF_ARRAYS_CNT 4 /* number of arrays in multi-stage filter F */
#define HHF_ARRAYS_LEN 1024 /* number of counters in each array of F */
#define HHF_BIT_MASK_LEN 10 /* masking 10 bits */
#define HHF_BIT_MASK 0x3FF /* bitmask of 10 bits */
#define WDRR_BUCKET_CNT 2 /* two buckets for Weighted DRR */
enum wdrr_bucket_idx {
WDRR_BUCKET_FOR_HH = 0, /* bucket id for heavy-hitters */
WDRR_BUCKET_FOR_NON_HH = 1 /* bucket id for non-heavy-hitters */
};
#define hhf_time_before(a, b) \
(typecheck(u32, a) && typecheck(u32, b) && ((s32)((a) - (b)) < 0))
/* Heavy-hitter per-flow state */
struct hh_flow_state {
u32 hash_id; /* hash of flow-id (e.g. TCP 5-tuple) */
u32 hit_timestamp; /* last time heavy-hitter was seen */
struct list_head flowchain; /* chaining under hash collision */
};
/* Weighted Deficit Round Robin (WDRR) scheduler */
struct wdrr_bucket {
struct sk_buff *head;
struct sk_buff *tail;
struct list_head bucketchain;
int deficit;
};
struct hhf_sched_data {
struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
u32 perturbation; /* hash perturbation */
u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
u32 drop_overlimit; /* number of times max qdisc packet
* limit was hit
*/
struct list_head *hh_flows; /* table T (currently active HHs) */
u32 hh_flows_limit; /* max active HH allocs */
u32 hh_flows_overlimit; /* num of disallowed HH allocs */
u32 hh_flows_total_cnt; /* total admitted HHs */
u32 hh_flows_current_cnt; /* total current HHs */
u32 *hhf_arrays[HHF_ARRAYS_CNT]; /* HH filter F */
u32 hhf_arrays_reset_timestamp; /* last time hhf_arrays
* was reset
*/
unsigned long *hhf_valid_bits[HHF_ARRAYS_CNT]; /* shadow valid bits
* of hhf_arrays
*/
/* Similar to the "new_flows" vs. "old_flows" concept in fq_codel DRR */
struct list_head new_buckets; /* list of new buckets */
struct list_head old_buckets; /* list of old buckets */
/* Configurable HHF parameters */
u32 hhf_reset_timeout; /* interval to reset counter
* arrays in filter F
* (default 40ms)
*/
u32 hhf_admit_bytes; /* counter thresh to classify as
* HH (default 128KB).
* With these default values,
* 128KB / 40ms = 25 Mbps
* i.e., we expect to capture HHs
* sending > 25 Mbps.
*/
u32 hhf_evict_timeout; /* aging threshold to evict idle
* HHs out of table T. This should
* be large enough to avoid
* reordering during HH eviction.
* (default 1s)
*/
u32 hhf_non_hh_weight; /* WDRR weight for non-HHs
* (default 2,
* i.e., non-HH : HH = 2 : 1)
*/
};
static u32 hhf_time_stamp(void)
{
return jiffies;
}
/* Looks up a heavy-hitter flow in a chaining list of table T. */
static struct hh_flow_state *seek_list(const u32 hash,
struct list_head *head,
struct hhf_sched_data *q)
{
struct hh_flow_state *flow, *next;
u32 now = hhf_time_stamp();
if (list_empty(head))
return NULL;
list_for_each_entry_safe(flow, next, head, flowchain) {
u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
if (hhf_time_before(prev, now)) {
/* Delete expired heavy-hitters, but preserve one entry
* to avoid kzalloc() when next time this slot is hit.
*/
if (list_is_last(&flow->flowchain, head))
return NULL;
list_del(&flow->flowchain);
kfree(flow);
q->hh_flows_current_cnt--;
} else if (flow->hash_id == hash) {
return flow;
}
}
return NULL;
}
/* Returns a flow state entry for a new heavy-hitter. Either reuses an expired
* entry or dynamically alloc a new entry.
*/
static struct hh_flow_state *alloc_new_hh(struct list_head *head,
struct hhf_sched_data *q)
{
struct hh_flow_state *flow;
u32 now = hhf_time_stamp();
if (!list_empty(head)) {
/* Find an expired heavy-hitter flow entry. */
list_for_each_entry(flow, head, flowchain) {
u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
if (hhf_time_before(prev, now))
return flow;
}
}
if (q->hh_flows_current_cnt >= q->hh_flows_limit) {
q->hh_flows_overlimit++;
return NULL;
}
/* Create new entry. */
flow = kzalloc(sizeof(struct hh_flow_state), GFP_ATOMIC);
if (!flow)
return NULL;
q->hh_flows_current_cnt++;
INIT_LIST_HEAD(&flow->flowchain);
list_add_tail(&flow->flowchain, head);
return flow;
}
/* Assigns packets to WDRR buckets. Implements a multi-stage filter to
* classify heavy-hitters.
*/
static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
{
struct hhf_sched_data *q = qdisc_priv(sch);
u32 tmp_hash, hash;
u32 xorsum, filter_pos[HHF_ARRAYS_CNT], flow_pos;
struct hh_flow_state *flow;
u32 pkt_len, min_hhf_val;
int i;
u32 prev;
u32 now = hhf_time_stamp();
/* Reset the HHF counter arrays if this is the right time. */
prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout;
if (hhf_time_before(prev, now)) {
for (i = 0; i < HHF_ARRAYS_CNT; i++)
bitmap_zero(q->hhf_valid_bits[i], HHF_ARRAYS_LEN);
q->hhf_arrays_reset_timestamp = now;
}
/* Get hashed flow-id of the skb. */
hash = skb_get_hash_perturb(skb, q->perturbation);
/* Check if this packet belongs to an already established HH flow. */
flow_pos = hash & HHF_BIT_MASK;
flow = seek_list(hash, &q->hh_flows[flow_pos], q);
if (flow) { /* found its HH flow */
flow->hit_timestamp = now;
return WDRR_BUCKET_FOR_HH;
}
/* Now pass the packet through the multi-stage filter. */
tmp_hash = hash;
xorsum = 0;
for (i = 0; i < HHF_ARRAYS_CNT - 1; i++) {
/* Split the skb_hash into three 10-bit chunks. */
filter_pos[i] = tmp_hash & HHF_BIT_MASK;
xorsum ^= filter_pos[i];
tmp_hash >>= HHF_BIT_MASK_LEN;
}
/* The last chunk is computed as XOR sum of other chunks. */
filter_pos[HHF_ARRAYS_CNT - 1] = xorsum ^ tmp_hash;
pkt_len = qdisc_pkt_len(skb);
min_hhf_val = ~0U;
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
u32 val;
if (!test_bit(filter_pos[i], q->hhf_valid_bits[i])) {
q->hhf_arrays[i][filter_pos[i]] = 0;
__set_bit(filter_pos[i], q->hhf_valid_bits[i]);
}
val = q->hhf_arrays[i][filter_pos[i]] + pkt_len;
if (min_hhf_val > val)
min_hhf_val = val;
}
/* Found a new HH iff all counter values > HH admit threshold. */
if (min_hhf_val > q->hhf_admit_bytes) {
/* Just captured a new heavy-hitter. */
flow = alloc_new_hh(&q->hh_flows[flow_pos], q);
if (!flow) /* memory alloc problem */
return WDRR_BUCKET_FOR_NON_HH;
flow->hash_id = hash;
flow->hit_timestamp = now;
q->hh_flows_total_cnt++;
/* By returning without updating counters in q->hhf_arrays,
* we implicitly implement "shielding" (see Optimization O1).
*/
return WDRR_BUCKET_FOR_HH;
}
/* Conservative update of HHF arrays (see Optimization O2). */
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
if (q->hhf_arrays[i][filter_pos[i]] < min_hhf_val)
q->hhf_arrays[i][filter_pos[i]] = min_hhf_val;
}
return WDRR_BUCKET_FOR_NON_HH;
}
/* Removes one skb from head of bucket. */
static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket)
{
struct sk_buff *skb = bucket->head;
bucket->head = skb->next;
skb->next = NULL;
return skb;
}
/* Tail-adds skb to bucket. */
static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb)
{
if (bucket->head == NULL)
bucket->head = skb;
else
bucket->tail->next = skb;
bucket->tail = skb;
skb->next = NULL;
}
static unsigned int hhf_drop(struct Qdisc *sch)
{
struct hhf_sched_data *q = qdisc_priv(sch);
struct wdrr_bucket *bucket;
/* Always try to drop from heavy-hitters first. */
bucket = &q->buckets[WDRR_BUCKET_FOR_HH];
if (!bucket->head)
bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH];
if (bucket->head) {
struct sk_buff *skb = dequeue_head(bucket);
sch->q.qlen--;
qdisc_qstats_drop(sch);
qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb);
}
/* Return id of the bucket from which the packet was dropped. */
return bucket - q->buckets;
}
static unsigned int hhf_qdisc_drop(struct Qdisc *sch)
{
unsigned int prev_backlog;
prev_backlog = sch->qstats.backlog;
hhf_drop(sch);
return prev_backlog - sch->qstats.backlog;
}
static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct hhf_sched_data *q = qdisc_priv(sch);
enum wdrr_bucket_idx idx;
struct wdrr_bucket *bucket;
unsigned int prev_backlog;
idx = hhf_classify(skb, sch);
bucket = &q->buckets[idx];
bucket_add(bucket, skb);
qdisc_qstats_backlog_inc(sch, skb);
if (list_empty(&bucket->bucketchain)) {
unsigned int weight;
/* The logic of new_buckets vs. old_buckets is the same as
* new_flows vs. old_flows in the implementation of fq_codel,
* i.e., short bursts of non-HHs should have strict priority.
*/
if (idx == WDRR_BUCKET_FOR_HH) {
/* Always move heavy-hitters to old bucket. */
weight = 1;
list_add_tail(&bucket->bucketchain, &q->old_buckets);
} else {
weight = q->hhf_non_hh_weight;
list_add_tail(&bucket->bucketchain, &q->new_buckets);
}
bucket->deficit = weight * q->quantum;
}
if (++sch->q.qlen <= sch->limit)
return NET_XMIT_SUCCESS;
prev_backlog = sch->qstats.backlog;
q->drop_overlimit++;
/* Return Congestion Notification only if we dropped a packet from this
* bucket.
*/
if (hhf_drop(sch) == idx)
return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this. */
qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
return NET_XMIT_SUCCESS;
}
static struct sk_buff *hhf_dequeue(struct Qdisc *sch)
{
struct hhf_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb = NULL;
struct wdrr_bucket *bucket;
struct list_head *head;
begin:
head = &q->new_buckets;
if (list_empty(head)) {
head = &q->old_buckets;
if (list_empty(head))
return NULL;
}
bucket = list_first_entry(head, struct wdrr_bucket, bucketchain);
if (bucket->deficit <= 0) {
int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ?
1 : q->hhf_non_hh_weight;
bucket->deficit += weight * q->quantum;
list_move_tail(&bucket->bucketchain, &q->old_buckets);
goto begin;
}
if (bucket->head) {
skb = dequeue_head(bucket);
sch->q.qlen--;
qdisc_qstats_backlog_dec(sch, skb);
}
if (!skb) {
/* Force a pass through old_buckets to prevent starvation. */
if ((head == &q->new_buckets) && !list_empty(&q->old_buckets))
list_move_tail(&bucket->bucketchain, &q->old_buckets);
else
list_del_init(&bucket->bucketchain);
goto begin;
}
qdisc_bstats_update(sch, skb);
bucket->deficit -= qdisc_pkt_len(skb);
return skb;
}
static void hhf_reset(struct Qdisc *sch)
{
struct sk_buff *skb;
while ((skb = hhf_dequeue(sch)) != NULL)
kfree_skb(skb);
}
static void *hhf_zalloc(size_t sz)
{
void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
if (!ptr)
ptr = vzalloc(sz);
return ptr;
}
static void hhf_free(void *addr)
{
kvfree(addr);
}
static void hhf_destroy(struct Qdisc *sch)
{
int i;
struct hhf_sched_data *q = qdisc_priv(sch);
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
hhf_free(q->hhf_arrays[i]);
hhf_free(q->hhf_valid_bits[i]);
}
for (i = 0; i < HH_FLOWS_CNT; i++) {
struct hh_flow_state *flow, *next;
struct list_head *head = &q->hh_flows[i];
if (list_empty(head))
continue;
list_for_each_entry_safe(flow, next, head, flowchain) {
list_del(&flow->flowchain);
kfree(flow);
}
}
hhf_free(q->hh_flows);
}
static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
[TCA_HHF_BACKLOG_LIMIT] = { .type = NLA_U32 },
[TCA_HHF_QUANTUM] = { .type = NLA_U32 },
[TCA_HHF_HH_FLOWS_LIMIT] = { .type = NLA_U32 },
[TCA_HHF_RESET_TIMEOUT] = { .type = NLA_U32 },
[TCA_HHF_ADMIT_BYTES] = { .type = NLA_U32 },
[TCA_HHF_EVICT_TIMEOUT] = { .type = NLA_U32 },
[TCA_HHF_NON_HH_WEIGHT] = { .type = NLA_U32 },
};
static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
{
struct hhf_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_HHF_MAX + 1];
unsigned int qlen, prev_backlog;
int err;
u64 non_hh_quantum;
u32 new_quantum = q->quantum;
u32 new_hhf_non_hh_weight = q->hhf_non_hh_weight;
if (!opt)
return -EINVAL;
err = nla_parse_nested(tb, TCA_HHF_MAX, opt, hhf_policy);
if (err < 0)
return err;
if (tb[TCA_HHF_QUANTUM])
new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]);
if (tb[TCA_HHF_NON_HH_WEIGHT])
new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
if (non_hh_quantum > INT_MAX)
return -EINVAL;
sch_tree_lock(sch);
if (tb[TCA_HHF_BACKLOG_LIMIT])
sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]);
q->quantum = new_quantum;
q->hhf_non_hh_weight = new_hhf_non_hh_weight;
if (tb[TCA_HHF_HH_FLOWS_LIMIT])
q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]);
if (tb[TCA_HHF_RESET_TIMEOUT]) {
u32 us = nla_get_u32(tb[TCA_HHF_RESET_TIMEOUT]);
q->hhf_reset_timeout = usecs_to_jiffies(us);
}
if (tb[TCA_HHF_ADMIT_BYTES])
q->hhf_admit_bytes = nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]);
if (tb[TCA_HHF_EVICT_TIMEOUT]) {
u32 us = nla_get_u32(tb[TCA_HHF_EVICT_TIMEOUT]);
q->hhf_evict_timeout = usecs_to_jiffies(us);
}
qlen = sch->q.qlen;
prev_backlog = sch->qstats.backlog;
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = hhf_dequeue(sch);
kfree_skb(skb);
}
qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
prev_backlog - sch->qstats.backlog);
sch_tree_unlock(sch);
return 0;
}
static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
{
struct hhf_sched_data *q = qdisc_priv(sch);
int i;
sch->limit = 1000;
q->quantum = psched_mtu(qdisc_dev(sch));
q->perturbation = prandom_u32();
INIT_LIST_HEAD(&q->new_buckets);
INIT_LIST_HEAD(&q->old_buckets);
/* Configurable HHF parameters */
q->hhf_reset_timeout = HZ / 25; /* 40 ms */
q->hhf_admit_bytes = 131072; /* 128 KB */
q->hhf_evict_timeout = HZ; /* 1 sec */
q->hhf_non_hh_weight = 2;
if (opt) {
int err = hhf_change(sch, opt);
if (err)
return err;
}
if (!q->hh_flows) {
/* Initialize heavy-hitter flow table. */
q->hh_flows = hhf_zalloc(HH_FLOWS_CNT *
sizeof(struct list_head));
if (!q->hh_flows)
return -ENOMEM;
for (i = 0; i < HH_FLOWS_CNT; i++)
INIT_LIST_HEAD(&q->hh_flows[i]);
/* Cap max active HHs at twice len of hh_flows table. */
q->hh_flows_limit = 2 * HH_FLOWS_CNT;
q->hh_flows_overlimit = 0;
q->hh_flows_total_cnt = 0;
q->hh_flows_current_cnt = 0;
/* Initialize heavy-hitter filter arrays. */
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
sizeof(u32));
if (!q->hhf_arrays[i]) {
hhf_destroy(sch);
return -ENOMEM;
}
}
q->hhf_arrays_reset_timestamp = hhf_time_stamp();
/* Initialize valid bits of heavy-hitter filter arrays. */
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
BITS_PER_BYTE);
if (!q->hhf_valid_bits[i]) {
hhf_destroy(sch);
return -ENOMEM;
}
}
/* Initialize Weighted DRR buckets. */
for (i = 0; i < WDRR_BUCKET_CNT; i++) {
struct wdrr_bucket *bucket = q->buckets + i;
INIT_LIST_HEAD(&bucket->bucketchain);
}
}
return 0;
}
static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct hhf_sched_data *q = qdisc_priv(sch);
struct nlattr *opts;
opts = nla_nest_start(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, sch->limit) ||
nla_put_u32(skb, TCA_HHF_QUANTUM, q->quantum) ||
nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT, q->hh_flows_limit) ||
nla_put_u32(skb, TCA_HHF_RESET_TIMEOUT,
jiffies_to_usecs(q->hhf_reset_timeout)) ||
nla_put_u32(skb, TCA_HHF_ADMIT_BYTES, q->hhf_admit_bytes) ||
nla_put_u32(skb, TCA_HHF_EVICT_TIMEOUT,
jiffies_to_usecs(q->hhf_evict_timeout)) ||
nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT, q->hhf_non_hh_weight))
goto nla_put_failure;
return nla_nest_end(skb, opts);
nla_put_failure:
return -1;
}
static int hhf_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct hhf_sched_data *q = qdisc_priv(sch);
struct tc_hhf_xstats st = {
.drop_overlimit = q->drop_overlimit,
.hh_overlimit = q->hh_flows_overlimit,
.hh_tot_count = q->hh_flows_total_cnt,
.hh_cur_count = q->hh_flows_current_cnt,
};
return gnet_stats_copy_app(d, &st, sizeof(st));
}
static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
.id = "hhf",
.priv_size = sizeof(struct hhf_sched_data),
.enqueue = hhf_enqueue,
.dequeue = hhf_dequeue,
.peek = qdisc_peek_dequeued,
.drop = hhf_qdisc_drop,
.init = hhf_init,
.reset = hhf_reset,
.destroy = hhf_destroy,
.change = hhf_change,
.dump = hhf_dump,
.dump_stats = hhf_dump_stats,
.owner = THIS_MODULE,
};
static int __init hhf_module_init(void)
{
return register_qdisc(&hhf_qdisc_ops);
}
static void __exit hhf_module_exit(void)
{
unregister_qdisc(&hhf_qdisc_ops);
}
module_init(hhf_module_init)
module_exit(hhf_module_exit)
MODULE_AUTHOR("Terry Lam");
MODULE_AUTHOR("Nandita Dukkipati");
MODULE_LICENSE("GPL");
| gpl-2.0 |
XirXes/PyramidION | arch/arm/mach-msm/platsmp.c | 119 | 4112 | /*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <asm/hardware/gic.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/mach-types.h>
#include <mach/socinfo.h>
#include <mach/smp.h>
#include <mach/hardware.h>
#include <mach/msm_iomap.h>
#include "pm.h"
#include "scm-boot.h"
int pen_release = -1;
/* Initialize the present map (cpu_set(i, cpu_present_map)). */
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
int i;
for (i = 0; i < max_cpus; i++)
cpu_set(i, cpu_present_map);
}
void __init smp_init_cpus(void)
{
unsigned int i, ncores = get_core_count();
for (i = 0; i < ncores; i++)
cpu_set(i, cpu_possible_map);
set_smp_cross_call(gic_raise_softirq);
}
static void __cpuinit release_secondary(unsigned int cpu)
{
void *base_ptr;
BUG_ON(cpu >= get_core_count());
/* KraitMP or ScorpionMP ? */
if ((read_cpuid_id() & 0xFF0) >> 4 != 0x2D) {
base_ptr = ioremap_nocache(0x02098000, SZ_4K);
if (base_ptr) {
if (machine_is_msm8960_sim() ||
machine_is_msm8960_rumi3()) {
writel_relaxed(0x10, base_ptr+0x04);
writel_relaxed(0x80, base_ptr+0x04);
} else if (get_core_count() == 2) {
writel_relaxed(0x109, base_ptr+0x04);
writel_relaxed(0x101, base_ptr+0x04);
ndelay(300);
writel_relaxed(0x121, base_ptr+0x04);
udelay(2);
writel_relaxed(0x020, base_ptr+0x04);
udelay(2);
writel_relaxed(0x000, base_ptr+0x04);
udelay(100);
writel_relaxed(0x080, base_ptr+0x04);
}
mb();
iounmap(base_ptr);
}
} else {
base_ptr = ioremap_nocache(0x00902000, SZ_4K*2);
if (base_ptr) {
writel_relaxed(0x0, base_ptr+0x15A0);
dmb();
writel_relaxed(0x0, base_ptr+0xD80);
writel_relaxed(0x3, base_ptr+0xE64);
mb();
iounmap(base_ptr);
}
}
}
DEFINE_PER_CPU(int, cold_boot_done);
/* Executed by primary CPU, brings other CPUs out of reset. Called at boot
as well as when a CPU is coming out of shutdown induced by echo 0 >
/sys/devices/.../cpuX.
*/
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
int cnt = 0;
int ret;
pr_debug("Starting secondary CPU %d\n", cpu);
/* Set preset_lpj to avoid subsequent lpj recalculations */
preset_lpj = loops_per_jiffy;
if (per_cpu(cold_boot_done, cpu) == false) {
ret = scm_set_boot_addr((void *)
virt_to_phys(msm_secondary_startup),
SCM_FLAG_COLDBOOT_CPU1);
if (ret == 0)
release_secondary(cpu);
else
printk(KERN_DEBUG "Failed to set secondary core boot "
"address\n");
per_cpu(cold_boot_done, cpu) = true;
}
pen_release = cpu;
dmac_flush_range((void *)&pen_release,
(void *)(&pen_release + sizeof(pen_release)));
__asm__("sev");
mb();
/* Use smp_cross_call() to send a soft interrupt to wake up
* the other core.
*/
gic_raise_softirq(cpumask_of(cpu), 1);
while (pen_release != 0xFFFFFFFF) {
dmac_inv_range((void *)&pen_release,
(void *)(&pen_release+sizeof(pen_release)));
usleep(500);
if (cnt++ >= 10)
break;
}
return 0;
}
/* Initialization routine for secondary CPUs after they are brought out of
* reset.
*/
void __cpuinit platform_secondary_init(unsigned int cpu)
{
pr_debug("CPU%u: Booted secondary processor\n", cpu);
#ifdef CONFIG_HOTPLUG_CPU
WARN_ON(msm_pm_platform_secondary_init(cpu));
#endif
trace_hardirqs_off();
/* Edge trigger PPIs except AVS_SVICINT and AVS_SVICINTSWDONE */
writel(0xFFFFD7FF, MSM_QGIC_DIST_BASE + GIC_DIST_CONFIG + 4);
/* RUMI does not adhere to GIC spec by enabling STIs by default.
* Enable/clear is supposed to be RO for STIs, but is RW on RUMI.
*/
if (!machine_is_msm8x60_sim())
writel(0x0000FFFF, MSM_QGIC_DIST_BASE + GIC_DIST_ENABLE_SET);
gic_secondary_init(0);
}
| gpl-2.0 |
codeaurora-unoffical/linux-msm | arch/sparc/crypto/sha1_glue.c | 119 | 4598 | // SPDX-License-Identifier: GPL-2.0-only
/* Glue code for SHA1 hashing optimized for sparc64 crypto opcodes.
*
* This is based largely upon arch/x86/crypto/sha1_ssse3_glue.c
*
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) Mathias Krause <minipli@googlemail.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha1.h>
#include <asm/pstate.h>
#include <asm/elf.h>
#include "opcodes.h"
asmlinkage void sha1_sparc64_transform(u32 *digest, const char *data,
unsigned int rounds);
static int sha1_sparc64_init(struct shash_desc *desc)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
*sctx = (struct sha1_state){
.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};
return 0;
}
static void __sha1_sparc64_update(struct sha1_state *sctx, const u8 *data,
unsigned int len, unsigned int partial)
{
unsigned int done = 0;
sctx->count += len;
if (partial) {
done = SHA1_BLOCK_SIZE - partial;
memcpy(sctx->buffer + partial, data, done);
sha1_sparc64_transform(sctx->state, sctx->buffer, 1);
}
if (len - done >= SHA1_BLOCK_SIZE) {
const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
sha1_sparc64_transform(sctx->state, data + done, rounds);
done += rounds * SHA1_BLOCK_SIZE;
}
memcpy(sctx->buffer, data + done, len - done);
}
static int sha1_sparc64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
/* Handle the fast case right here */
if (partial + len < SHA1_BLOCK_SIZE) {
sctx->count += len;
memcpy(sctx->buffer + partial, data, len);
} else
__sha1_sparc64_update(sctx, data, len, partial);
return 0;
}
/* Add padding and return the message digest. */
static int sha1_sparc64_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int i, index, padlen;
__be32 *dst = (__be32 *)out;
__be64 bits;
static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64 and append length */
index = sctx->count % SHA1_BLOCK_SIZE;
padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
/* We need to fill a whole block for __sha1_sparc64_update() */
if (padlen <= 56) {
sctx->count += padlen;
memcpy(sctx->buffer + index, padding, padlen);
} else {
__sha1_sparc64_update(sctx, padding, padlen, index);
}
__sha1_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_be32(sctx->state[i]);
/* Wipe context */
memset(sctx, 0, sizeof(*sctx));
return 0;
}
static int sha1_sparc64_export(struct shash_desc *desc, void *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int sha1_sparc64_import(struct shash_desc *desc, const void *in)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_sparc64_init,
.update = sha1_sparc64_update,
.final = sha1_sparc64_final,
.export = sha1_sparc64_export,
.import = sha1_sparc64_import,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static bool __init sparc64_has_sha1_opcode(void)
{
unsigned long cfr;
if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
return false;
__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
if (!(cfr & CFR_SHA1))
return false;
return true;
}
static int __init sha1_sparc64_mod_init(void)
{
if (sparc64_has_sha1_opcode()) {
pr_info("Using sparc64 sha1 opcode optimized SHA-1 implementation\n");
return crypto_register_shash(&alg);
}
pr_info("sparc64 sha1 opcode not available.\n");
return -ENODEV;
}
static void __exit sha1_sparc64_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(sha1_sparc64_mod_init);
module_exit(sha1_sparc64_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
MODULE_ALIAS_CRYPTO("sha1");
#include "crop_devid.c"
| gpl-2.0 |
taoguan/linux | drivers/gpu/drm/radeon/r600_cp.c | 375 | 80218 | /*
* Copyright 2008-2009 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Dave Airlie <airlied@redhat.com>
* Alex Deucher <alexander.deucher@amd.com>
*
* ------------------------ This file is DEPRECATED! -------------------------
*/
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon_drv.h"
#define PFP_UCODE_SIZE 576
#define PM4_UCODE_SIZE 1792
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
MODULE_FIRMWARE("radeon/R600_me.bin");
MODULE_FIRMWARE("radeon/RV610_pfp.bin");
MODULE_FIRMWARE("radeon/RV610_me.bin");
MODULE_FIRMWARE("radeon/RV630_pfp.bin");
MODULE_FIRMWARE("radeon/RV630_me.bin");
MODULE_FIRMWARE("radeon/RV620_pfp.bin");
MODULE_FIRMWARE("radeon/RV620_me.bin");
MODULE_FIRMWARE("radeon/RV635_pfp.bin");
MODULE_FIRMWARE("radeon/RV635_me.bin");
MODULE_FIRMWARE("radeon/RV670_pfp.bin");
MODULE_FIRMWARE("radeon/RV670_me.bin");
MODULE_FIRMWARE("radeon/RS780_pfp.bin");
MODULE_FIRMWARE("radeon/RS780_me.bin");
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
MODULE_FIRMWARE("radeon/RV770_me.bin");
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
MODULE_FIRMWARE("radeon/RV730_me.bin");
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
unsigned family, u32 *ib, int *l);
void r600_cs_legacy_init(void);
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
# define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1))
#define R600_PTE_VALID (1 << 0)
#define R600_PTE_SYSTEM (1 << 1)
#define R600_PTE_SNOOPED (1 << 2)
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
/* MAX values used for gfx init */
#define R6XX_MAX_SH_GPRS 256
#define R6XX_MAX_TEMP_GPRS 16
#define R6XX_MAX_SH_THREADS 256
#define R6XX_MAX_SH_STACK_ENTRIES 4096
#define R6XX_MAX_BACKENDS 8
#define R6XX_MAX_BACKENDS_MASK 0xff
#define R6XX_MAX_SIMDS 8
#define R6XX_MAX_SIMDS_MASK 0xff
#define R6XX_MAX_PIPES 8
#define R6XX_MAX_PIPES_MASK 0xff
#define R7XX_MAX_SH_GPRS 256
#define R7XX_MAX_TEMP_GPRS 16
#define R7XX_MAX_SH_THREADS 256
#define R7XX_MAX_SH_STACK_ENTRIES 4096
#define R7XX_MAX_BACKENDS 8
#define R7XX_MAX_BACKENDS_MASK 0xff
#define R7XX_MAX_SIMDS 16
#define R7XX_MAX_SIMDS_MASK 0xffff
#define R7XX_MAX_PIPES 8
#define R7XX_MAX_PIPES_MASK 0xff
static int r600_do_wait_for_fifo(drm_radeon_private_t *dev_priv, int entries)
{
int i;
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
for (i = 0; i < dev_priv->usec_timeout; i++) {
int slots;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
slots = (RADEON_READ(R600_GRBM_STATUS)
& R700_CMDFIFO_AVAIL_MASK);
else
slots = (RADEON_READ(R600_GRBM_STATUS)
& R600_CMDFIFO_AVAIL_MASK);
if (slots >= entries)
return 0;
DRM_UDELAY(1);
}
DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n",
RADEON_READ(R600_GRBM_STATUS),
RADEON_READ(R600_GRBM_STATUS2));
return -EBUSY;
}
static int r600_do_wait_for_idle(drm_radeon_private_t *dev_priv)
{
int i, ret;
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
ret = r600_do_wait_for_fifo(dev_priv, 8);
else
ret = r600_do_wait_for_fifo(dev_priv, 16);
if (ret)
return ret;
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (!(RADEON_READ(R600_GRBM_STATUS) & R600_GUI_ACTIVE))
return 0;
DRM_UDELAY(1);
}
DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n",
RADEON_READ(R600_GRBM_STATUS),
RADEON_READ(R600_GRBM_STATUS2));
return -EBUSY;
}
void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
struct drm_sg_mem *entry = dev->sg;
int max_pages;
int pages;
int i;
if (!entry)
return;
if (gart_info->bus_addr) {
max_pages = (gart_info->table_size / sizeof(u64));
pages = (entry->pages <= max_pages)
? entry->pages : max_pages;
for (i = 0; i < pages; i++) {
if (!entry->busaddr[i])
break;
pci_unmap_page(dev->pdev, entry->busaddr[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
gart_info->bus_addr = 0;
}
}
/* R600 has page table setup */
int r600_page_table_init(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info;
struct drm_local_map *map = &gart_info->mapping;
struct drm_sg_mem *entry = dev->sg;
int ret = 0;
int i, j;
int pages;
u64 page_base;
dma_addr_t entry_addr;
int max_ati_pages, max_real_pages, gart_idx;
/* okay page table is available - lets rock */
max_ati_pages = (gart_info->table_size / sizeof(u64));
max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
pages = (entry->pages <= max_real_pages) ?
entry->pages : max_real_pages;
memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u64));
gart_idx = 0;
for (i = 0; i < pages; i++) {
entry->busaddr[i] = pci_map_page(dev->pdev,
entry->pagelist[i], 0,
PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
DRM_ERROR("unable to map PCIGART pages!\n");
r600_page_table_cleanup(dev, gart_info);
goto done;
}
entry_addr = entry->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
page_base = (u64) entry_addr & ATI_PCIGART_PAGE_MASK;
page_base |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
page_base |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
DRM_WRITE64(map, gart_idx * sizeof(u64), page_base);
gart_idx++;
if ((i % 128) == 0)
DRM_DEBUG("page entry %d: 0x%016llx\n",
i, (unsigned long long)page_base);
entry_addr += ATI_PCIGART_PAGE_SIZE;
}
}
ret = 1;
done:
return ret;
}
static void r600_vm_flush_gart_range(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
u32 resp, countdown = 1000;
RADEON_WRITE(R600_VM_CONTEXT0_INVALIDATION_LOW_ADDR, dev_priv->gart_vm_start >> 12);
RADEON_WRITE(R600_VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
RADEON_WRITE(R600_VM_CONTEXT0_REQUEST_RESPONSE, 2);
do {
resp = RADEON_READ(R600_VM_CONTEXT0_REQUEST_RESPONSE);
countdown--;
DRM_UDELAY(1);
} while (((resp & 0xf0) == 0) && countdown);
}
static void r600_vm_init(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
/* initialise the VM to use the page table we constructed up there */
u32 vm_c0, i;
u32 mc_rd_a;
u32 vm_l2_cntl, vm_l2_cntl3;
/* okay set up the PCIE aperture type thingo */
RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR, dev_priv->gart_vm_start >> 12);
RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
/* setup MC RD a */
mc_rd_a = R600_MCD_L1_TLB | R600_MCD_L1_FRAG_PROC | R600_MCD_SYSTEM_ACCESS_MODE_IN_SYS |
R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | R600_MCD_EFFECTIVE_L1_TLB_SIZE(5) |
R600_MCD_EFFECTIVE_L1_QUEUE_SIZE(5) | R600_MCD_WAIT_L2_QUERY;
RADEON_WRITE(R600_MCD_RD_A_CNTL, mc_rd_a);
RADEON_WRITE(R600_MCD_RD_B_CNTL, mc_rd_a);
RADEON_WRITE(R600_MCD_WR_A_CNTL, mc_rd_a);
RADEON_WRITE(R600_MCD_WR_B_CNTL, mc_rd_a);
RADEON_WRITE(R600_MCD_RD_GFX_CNTL, mc_rd_a);
RADEON_WRITE(R600_MCD_WR_GFX_CNTL, mc_rd_a);
RADEON_WRITE(R600_MCD_RD_SYS_CNTL, mc_rd_a);
RADEON_WRITE(R600_MCD_WR_SYS_CNTL, mc_rd_a);
RADEON_WRITE(R600_MCD_RD_HDP_CNTL, mc_rd_a | R600_MCD_L1_STRICT_ORDERING);
RADEON_WRITE(R600_MCD_WR_HDP_CNTL, mc_rd_a /*| R600_MCD_L1_STRICT_ORDERING*/);
RADEON_WRITE(R600_MCD_RD_PDMA_CNTL, mc_rd_a);
RADEON_WRITE(R600_MCD_WR_PDMA_CNTL, mc_rd_a);
RADEON_WRITE(R600_MCD_RD_SEM_CNTL, mc_rd_a | R600_MCD_SEMAPHORE_MODE);
RADEON_WRITE(R600_MCD_WR_SEM_CNTL, mc_rd_a);
vm_l2_cntl = R600_VM_L2_CACHE_EN | R600_VM_L2_FRAG_PROC | R600_VM_ENABLE_PTE_CACHE_LRU_W;
vm_l2_cntl |= R600_VM_L2_CNTL_QUEUE_SIZE(7);
RADEON_WRITE(R600_VM_L2_CNTL, vm_l2_cntl);
RADEON_WRITE(R600_VM_L2_CNTL2, 0);
vm_l2_cntl3 = (R600_VM_L2_CNTL3_BANK_SELECT_0(0) |
R600_VM_L2_CNTL3_BANK_SELECT_1(1) |
R600_VM_L2_CNTL3_CACHE_UPDATE_MODE(2));
RADEON_WRITE(R600_VM_L2_CNTL3, vm_l2_cntl3);
vm_c0 = R600_VM_ENABLE_CONTEXT | R600_VM_PAGE_TABLE_DEPTH_FLAT;
RADEON_WRITE(R600_VM_CONTEXT0_CNTL, vm_c0);
vm_c0 &= ~R600_VM_ENABLE_CONTEXT;
/* disable all other contexts */
for (i = 1; i < 8; i++)
RADEON_WRITE(R600_VM_CONTEXT0_CNTL + (i * 4), vm_c0);
RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, dev_priv->gart_info.bus_addr >> 12);
RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_START_ADDR, dev_priv->gart_vm_start >> 12);
RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_END_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
r600_vm_flush_gart_range(dev);
}
static int r600_cp_init_microcode(drm_radeon_private_t *dev_priv)
{
struct platform_device *pdev;
const char *chip_name;
size_t pfp_req_size, me_req_size;
char fw_name[30];
int err;
pdev = platform_device_register_simple("r600_cp", 0, NULL, 0);
err = IS_ERR(pdev);
if (err) {
printk(KERN_ERR "r600_cp: Failed to register firmware\n");
return -EINVAL;
}
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_R600: chip_name = "R600"; break;
case CHIP_RV610: chip_name = "RV610"; break;
case CHIP_RV630: chip_name = "RV630"; break;
case CHIP_RV620: chip_name = "RV620"; break;
case CHIP_RV635: chip_name = "RV635"; break;
case CHIP_RV670: chip_name = "RV670"; break;
case CHIP_RS780:
case CHIP_RS880: chip_name = "RS780"; break;
case CHIP_RV770: chip_name = "RV770"; break;
case CHIP_RV730:
case CHIP_RV740: chip_name = "RV730"; break;
case CHIP_RV710: chip_name = "RV710"; break;
default: BUG();
}
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
pfp_req_size = R700_PFP_UCODE_SIZE * 4;
me_req_size = R700_PM4_UCODE_SIZE * 4;
} else {
pfp_req_size = PFP_UCODE_SIZE * 4;
me_req_size = PM4_UCODE_SIZE * 12;
}
DRM_INFO("Loading %s CP Microcode\n", chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
err = request_firmware(&dev_priv->pfp_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (dev_priv->pfp_fw->size != pfp_req_size) {
printk(KERN_ERR
"r600_cp: Bogus length %zu in firmware \"%s\"\n",
dev_priv->pfp_fw->size, fw_name);
err = -EINVAL;
goto out;
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
err = request_firmware(&dev_priv->me_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (dev_priv->me_fw->size != me_req_size) {
printk(KERN_ERR
"r600_cp: Bogus length %zu in firmware \"%s\"\n",
dev_priv->me_fw->size, fw_name);
err = -EINVAL;
}
out:
platform_device_unregister(pdev);
if (err) {
if (err != -EINVAL)
printk(KERN_ERR
"r600_cp: Failed to load firmware \"%s\"\n",
fw_name);
release_firmware(dev_priv->pfp_fw);
dev_priv->pfp_fw = NULL;
release_firmware(dev_priv->me_fw);
dev_priv->me_fw = NULL;
}
return err;
}
static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
{
const __be32 *fw_data;
int i;
if (!dev_priv->me_fw || !dev_priv->pfp_fw)
return;
r600_do_cp_stop(dev_priv);
RADEON_WRITE(R600_CP_RB_CNTL,
#ifdef __BIG_ENDIAN
R600_BUF_SWAP_32BIT |
#endif
R600_RB_NO_UPDATE |
R600_RB_BLKSZ(15) |
R600_RB_BUFSZ(3));
RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
RADEON_READ(R600_GRBM_SOFT_RESET);
mdelay(15);
RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
fw_data = (const __be32 *)dev_priv->me_fw->data;
RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
RADEON_WRITE(R600_CP_ME_RAM_DATA,
be32_to_cpup(fw_data++));
fw_data = (const __be32 *)dev_priv->pfp_fw->data;
RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
for (i = 0; i < PFP_UCODE_SIZE; i++)
RADEON_WRITE(R600_CP_PFP_UCODE_DATA,
be32_to_cpup(fw_data++));
RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
RADEON_WRITE(R600_CP_ME_RAM_RADDR, 0);
}
static void r700_vm_init(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
/* initialise the VM to use the page table we constructed up there */
u32 vm_c0, i;
u32 mc_vm_md_l1;
u32 vm_l2_cntl, vm_l2_cntl3;
/* okay set up the PCIE aperture type thingo */
RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR, dev_priv->gart_vm_start >> 12);
RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
mc_vm_md_l1 = R700_ENABLE_L1_TLB |
R700_ENABLE_L1_FRAGMENT_PROCESSING |
R700_SYSTEM_ACCESS_MODE_IN_SYS |
R700_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
R700_EFFECTIVE_L1_TLB_SIZE(5) |
R700_EFFECTIVE_L1_QUEUE_SIZE(5);
RADEON_WRITE(R700_MC_VM_MD_L1_TLB0_CNTL, mc_vm_md_l1);
RADEON_WRITE(R700_MC_VM_MD_L1_TLB1_CNTL, mc_vm_md_l1);
RADEON_WRITE(R700_MC_VM_MD_L1_TLB2_CNTL, mc_vm_md_l1);
RADEON_WRITE(R700_MC_VM_MB_L1_TLB0_CNTL, mc_vm_md_l1);
RADEON_WRITE(R700_MC_VM_MB_L1_TLB1_CNTL, mc_vm_md_l1);
RADEON_WRITE(R700_MC_VM_MB_L1_TLB2_CNTL, mc_vm_md_l1);
RADEON_WRITE(R700_MC_VM_MB_L1_TLB3_CNTL, mc_vm_md_l1);
vm_l2_cntl = R600_VM_L2_CACHE_EN | R600_VM_L2_FRAG_PROC | R600_VM_ENABLE_PTE_CACHE_LRU_W;
vm_l2_cntl |= R700_VM_L2_CNTL_QUEUE_SIZE(7);
RADEON_WRITE(R600_VM_L2_CNTL, vm_l2_cntl);
RADEON_WRITE(R600_VM_L2_CNTL2, 0);
vm_l2_cntl3 = R700_VM_L2_CNTL3_BANK_SELECT(0) | R700_VM_L2_CNTL3_CACHE_UPDATE_MODE(2);
RADEON_WRITE(R600_VM_L2_CNTL3, vm_l2_cntl3);
vm_c0 = R600_VM_ENABLE_CONTEXT | R600_VM_PAGE_TABLE_DEPTH_FLAT;
RADEON_WRITE(R600_VM_CONTEXT0_CNTL, vm_c0);
vm_c0 &= ~R600_VM_ENABLE_CONTEXT;
/* disable all other contexts */
for (i = 1; i < 8; i++)
RADEON_WRITE(R600_VM_CONTEXT0_CNTL + (i * 4), vm_c0);
RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, dev_priv->gart_info.bus_addr >> 12);
RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_START_ADDR, dev_priv->gart_vm_start >> 12);
RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_END_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
r600_vm_flush_gart_range(dev);
}
static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
{
const __be32 *fw_data;
int i;
if (!dev_priv->me_fw || !dev_priv->pfp_fw)
return;
r600_do_cp_stop(dev_priv);
RADEON_WRITE(R600_CP_RB_CNTL,
#ifdef __BIG_ENDIAN
R600_BUF_SWAP_32BIT |
#endif
R600_RB_NO_UPDATE |
R600_RB_BLKSZ(15) |
R600_RB_BUFSZ(3));
RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
RADEON_READ(R600_GRBM_SOFT_RESET);
mdelay(15);
RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
fw_data = (const __be32 *)dev_priv->pfp_fw->data;
RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
RADEON_WRITE(R600_CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
fw_data = (const __be32 *)dev_priv->me_fw->data;
RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
RADEON_WRITE(R600_CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
RADEON_WRITE(R600_CP_ME_RAM_RADDR, 0);
}
static void r600_test_writeback(drm_radeon_private_t *dev_priv)
{
u32 tmp;
/* Start with assuming that writeback doesn't work */
dev_priv->writeback_works = 0;
/* Writeback doesn't seem to work everywhere, test it here and possibly
* enable it if it appears to work
*/
radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(1), 0);
RADEON_WRITE(R600_SCRATCH_REG1, 0xdeadbeef);
for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
u32 val;
val = radeon_read_ring_rptr(dev_priv, R600_SCRATCHOFF(1));
if (val == 0xdeadbeef)
break;
DRM_UDELAY(1);
}
if (tmp < dev_priv->usec_timeout) {
dev_priv->writeback_works = 1;
DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
} else {
dev_priv->writeback_works = 0;
DRM_INFO("writeback test failed\n");
}
if (radeon_no_wb == 1) {
dev_priv->writeback_works = 0;
DRM_INFO("writeback forced off\n");
}
if (!dev_priv->writeback_works) {
/* Disable writeback to avoid unnecessary bus master transfer */
RADEON_WRITE(R600_CP_RB_CNTL,
#ifdef __BIG_ENDIAN
R600_BUF_SWAP_32BIT |
#endif
RADEON_READ(R600_CP_RB_CNTL) |
R600_RB_NO_UPDATE);
RADEON_WRITE(R600_SCRATCH_UMSK, 0);
}
}
int r600_do_engine_reset(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
u32 cp_ptr, cp_me_cntl, cp_rb_cntl;
DRM_INFO("Resetting GPU\n");
cp_ptr = RADEON_READ(R600_CP_RB_WPTR);
cp_me_cntl = RADEON_READ(R600_CP_ME_CNTL);
RADEON_WRITE(R600_CP_ME_CNTL, R600_CP_ME_HALT);
RADEON_WRITE(R600_GRBM_SOFT_RESET, 0x7fff);
RADEON_READ(R600_GRBM_SOFT_RESET);
DRM_UDELAY(50);
RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
RADEON_READ(R600_GRBM_SOFT_RESET);
RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL);
RADEON_WRITE(R600_CP_RB_CNTL,
#ifdef __BIG_ENDIAN
R600_BUF_SWAP_32BIT |
#endif
R600_RB_RPTR_WR_ENA);
RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr);
RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr);
RADEON_WRITE(R600_CP_RB_CNTL, cp_rb_cntl);
RADEON_WRITE(R600_CP_ME_CNTL, cp_me_cntl);
/* Reset the CP ring */
r600_do_cp_reset(dev_priv);
/* The CP is no longer running after an engine reset */
dev_priv->cp_running = 0;
/* Reset any pending vertex, indirect buffers */
radeon_freelist_reset(dev);
return 0;
}
static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 backend_map = 0;
u32 enabled_backends_mask;
u32 enabled_backends_count;
u32 cur_pipe;
u32 swizzle_pipe[R6XX_MAX_PIPES];
u32 cur_backend;
u32 i;
if (num_tile_pipes > R6XX_MAX_PIPES)
num_tile_pipes = R6XX_MAX_PIPES;
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_backends > R6XX_MAX_BACKENDS)
num_backends = R6XX_MAX_BACKENDS;
if (num_backends < 1)
num_backends = 1;
enabled_backends_mask = 0;
enabled_backends_count = 0;
for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
if (((backend_disable_mask >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends)
break;
}
if (enabled_backends_count == 0) {
enabled_backends_mask = 1;
enabled_backends_count = 1;
}
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
swizzle_pipe[0] = 0;
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 3:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
break;
case 4:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
break;
case 5:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
break;
case 6:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 5;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
break;
case 7:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
break;
case 8:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
swizzle_pipe[7] = 7;
break;
}
cur_backend = 0;
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
}
return backend_map;
}
static int r600_count_pipe_bits(uint32_t val)
{
return hweight32(val);
}
static void r600_gfx_init(struct drm_device *dev,
drm_radeon_private_t *dev_priv)
{
int i, j, num_qd_pipes;
u32 sx_debug_1;
u32 tc_cntl;
u32 arb_pop;
u32 num_gs_verts_per_thread;
u32 vgt_gs_per_es;
u32 gs_prim_buffer_depth = 0;
u32 sq_ms_fifo_sizes;
u32 sq_config;
u32 sq_gpr_resource_mgmt_1 = 0;
u32 sq_gpr_resource_mgmt_2 = 0;
u32 sq_thread_resource_mgmt = 0;
u32 sq_stack_resource_mgmt_1 = 0;
u32 sq_stack_resource_mgmt_2 = 0;
u32 hdp_host_path_cntl;
u32 backend_map;
u32 gb_tiling_config = 0;
u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config;
u32 ramcfg;
/* setup chip specs */
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_R600:
dev_priv->r600_max_pipes = 4;
dev_priv->r600_max_tile_pipes = 8;
dev_priv->r600_max_simds = 4;
dev_priv->r600_max_backends = 4;
dev_priv->r600_max_gprs = 256;
dev_priv->r600_max_threads = 192;
dev_priv->r600_max_stack_entries = 256;
dev_priv->r600_max_hw_contexts = 8;
dev_priv->r600_max_gs_threads = 16;
dev_priv->r600_sx_max_export_size = 128;
dev_priv->r600_sx_max_export_pos_size = 16;
dev_priv->r600_sx_max_export_smx_size = 128;
dev_priv->r600_sq_num_cf_insts = 2;
break;
case CHIP_RV630:
case CHIP_RV635:
dev_priv->r600_max_pipes = 2;
dev_priv->r600_max_tile_pipes = 2;
dev_priv->r600_max_simds = 3;
dev_priv->r600_max_backends = 1;
dev_priv->r600_max_gprs = 128;
dev_priv->r600_max_threads = 192;
dev_priv->r600_max_stack_entries = 128;
dev_priv->r600_max_hw_contexts = 8;
dev_priv->r600_max_gs_threads = 4;
dev_priv->r600_sx_max_export_size = 128;
dev_priv->r600_sx_max_export_pos_size = 16;
dev_priv->r600_sx_max_export_smx_size = 128;
dev_priv->r600_sq_num_cf_insts = 2;
break;
case CHIP_RV610:
case CHIP_RS780:
case CHIP_RS880:
case CHIP_RV620:
dev_priv->r600_max_pipes = 1;
dev_priv->r600_max_tile_pipes = 1;
dev_priv->r600_max_simds = 2;
dev_priv->r600_max_backends = 1;
dev_priv->r600_max_gprs = 128;
dev_priv->r600_max_threads = 192;
dev_priv->r600_max_stack_entries = 128;
dev_priv->r600_max_hw_contexts = 4;
dev_priv->r600_max_gs_threads = 4;
dev_priv->r600_sx_max_export_size = 128;
dev_priv->r600_sx_max_export_pos_size = 16;
dev_priv->r600_sx_max_export_smx_size = 128;
dev_priv->r600_sq_num_cf_insts = 1;
break;
case CHIP_RV670:
dev_priv->r600_max_pipes = 4;
dev_priv->r600_max_tile_pipes = 4;
dev_priv->r600_max_simds = 4;
dev_priv->r600_max_backends = 4;
dev_priv->r600_max_gprs = 192;
dev_priv->r600_max_threads = 192;
dev_priv->r600_max_stack_entries = 256;
dev_priv->r600_max_hw_contexts = 8;
dev_priv->r600_max_gs_threads = 16;
dev_priv->r600_sx_max_export_size = 128;
dev_priv->r600_sx_max_export_pos_size = 16;
dev_priv->r600_sx_max_export_smx_size = 128;
dev_priv->r600_sq_num_cf_insts = 2;
break;
default:
break;
}
/* Initialize HDP */
j = 0;
for (i = 0; i < 32; i++) {
RADEON_WRITE((0x2c14 + j), 0x00000000);
RADEON_WRITE((0x2c18 + j), 0x00000000);
RADEON_WRITE((0x2c1c + j), 0x00000000);
RADEON_WRITE((0x2c20 + j), 0x00000000);
RADEON_WRITE((0x2c24 + j), 0x00000000);
j += 0x18;
}
RADEON_WRITE(R600_GRBM_CNTL, R600_GRBM_READ_TIMEOUT(0xff));
/* setup tiling, simd, pipe config */
ramcfg = RADEON_READ(R600_RAMCFG);
switch (dev_priv->r600_max_tile_pipes) {
case 1:
gb_tiling_config |= R600_PIPE_TILING(0);
break;
case 2:
gb_tiling_config |= R600_PIPE_TILING(1);
break;
case 4:
gb_tiling_config |= R600_PIPE_TILING(2);
break;
case 8:
gb_tiling_config |= R600_PIPE_TILING(3);
break;
default:
break;
}
gb_tiling_config |= R600_BANK_TILING((ramcfg >> R600_NOOFBANK_SHIFT) & R600_NOOFBANK_MASK);
gb_tiling_config |= R600_GROUP_SIZE(0);
if (((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK) > 3) {
gb_tiling_config |= R600_ROW_TILING(3);
gb_tiling_config |= R600_SAMPLE_SPLIT(3);
} else {
gb_tiling_config |=
R600_ROW_TILING(((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK));
gb_tiling_config |=
R600_SAMPLE_SPLIT(((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK));
}
gb_tiling_config |= R600_BANK_SWAPS(1);
cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
cc_rb_backend_disable |=
R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
cc_gc_shader_pipe_config |=
R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK);
backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
(R6XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable &
R6XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
gb_tiling_config |= R600_BACKEND_MAP(backend_map);
RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
if (gb_tiling_config & 0xc0) {
dev_priv->r600_group_size = 512;
} else {
dev_priv->r600_group_size = 256;
}
dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
if (gb_tiling_config & 0x30) {
dev_priv->r600_nbanks = 8;
} else {
dev_priv->r600_nbanks = 4;
}
RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
num_qd_pipes =
R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
/* set HW defaults for 3D engine */
RADEON_WRITE(R600_CP_QUEUE_THRESHOLDS, (R600_ROQ_IB1_START(0x16) |
R600_ROQ_IB2_START(0x2b)));
RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, (R600_MEQ_END(0x40) |
R600_ROQ_END(0x40)));
RADEON_WRITE(R600_TA_CNTL_AUX, (R600_DISABLE_CUBE_ANISO |
R600_SYNC_GRADIENT |
R600_SYNC_WALKER |
R600_SYNC_ALIGNER));
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV670)
RADEON_WRITE(R600_ARB_GDEC_RD_CNTL, 0x00000021);
sx_debug_1 = RADEON_READ(R600_SX_DEBUG_1);
sx_debug_1 |= R600_SMX_EVENT_RELEASE;
if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600))
sx_debug_1 |= R600_ENABLE_NEW_SMX_ADDRESS;
RADEON_WRITE(R600_SX_DEBUG_1, sx_debug_1);
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880))
RADEON_WRITE(R600_DB_DEBUG, R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE);
else
RADEON_WRITE(R600_DB_DEBUG, 0);
RADEON_WRITE(R600_DB_WATERMARKS, (R600_DEPTH_FREE(4) |
R600_DEPTH_FLUSH(16) |
R600_DEPTH_PENDING_FREE(4) |
R600_DEPTH_CACHELINE_FREE(16)));
RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
RADEON_WRITE(R600_VGT_NUM_INSTANCES, 0);
RADEON_WRITE(R600_SPI_CONFIG_CNTL, R600_GPR_WRITE_PRIORITY(0));
RADEON_WRITE(R600_SPI_CONFIG_CNTL_1, R600_VTX_DONE_DELAY(0));
sq_ms_fifo_sizes = RADEON_READ(R600_SQ_MS_FIFO_SIZES);
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) {
sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(0xa) |
R600_FETCH_FIFO_HIWATER(0xa) |
R600_DONE_FIFO_HIWATER(0xe0) |
R600_ALU_UPDATE_FIFO_HIWATER(0x8));
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630)) {
sq_ms_fifo_sizes &= ~R600_DONE_FIFO_HIWATER(0xff);
sq_ms_fifo_sizes |= R600_DONE_FIFO_HIWATER(0x4);
}
RADEON_WRITE(R600_SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
* should be adjusted as needed by the 2D/3D drivers. This just sets default values
*/
sq_config = RADEON_READ(R600_SQ_CONFIG);
sq_config &= ~(R600_PS_PRIO(3) |
R600_VS_PRIO(3) |
R600_GS_PRIO(3) |
R600_ES_PRIO(3));
sq_config |= (R600_DX9_CONSTS |
R600_VC_ENABLE |
R600_PS_PRIO(0) |
R600_VS_PRIO(1) |
R600_GS_PRIO(2) |
R600_ES_PRIO(3));
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) {
sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(124) |
R600_NUM_VS_GPRS(124) |
R600_NUM_CLAUSE_TEMP_GPRS(4));
sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(0) |
R600_NUM_ES_GPRS(0));
sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(136) |
R600_NUM_VS_THREADS(48) |
R600_NUM_GS_THREADS(4) |
R600_NUM_ES_THREADS(4));
sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(128) |
R600_NUM_VS_STACK_ENTRIES(128));
sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(0) |
R600_NUM_ES_STACK_ENTRIES(0));
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) {
/* no vertex cache */
sq_config &= ~R600_VC_ENABLE;
sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) |
R600_NUM_VS_GPRS(44) |
R600_NUM_CLAUSE_TEMP_GPRS(2));
sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(17) |
R600_NUM_ES_GPRS(17));
sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) |
R600_NUM_VS_THREADS(78) |
R600_NUM_GS_THREADS(4) |
R600_NUM_ES_THREADS(31));
sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(40) |
R600_NUM_VS_STACK_ENTRIES(40));
sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(32) |
R600_NUM_ES_STACK_ENTRIES(16));
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV635)) {
sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) |
R600_NUM_VS_GPRS(44) |
R600_NUM_CLAUSE_TEMP_GPRS(2));
sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(18) |
R600_NUM_ES_GPRS(18));
sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) |
R600_NUM_VS_THREADS(78) |
R600_NUM_GS_THREADS(4) |
R600_NUM_ES_THREADS(31));
sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(40) |
R600_NUM_VS_STACK_ENTRIES(40));
sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(32) |
R600_NUM_ES_STACK_ENTRIES(16));
} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV670) {
sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) |
R600_NUM_VS_GPRS(44) |
R600_NUM_CLAUSE_TEMP_GPRS(2));
sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(17) |
R600_NUM_ES_GPRS(17));
sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) |
R600_NUM_VS_THREADS(78) |
R600_NUM_GS_THREADS(4) |
R600_NUM_ES_THREADS(31));
sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(64) |
R600_NUM_VS_STACK_ENTRIES(64));
sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(64) |
R600_NUM_ES_STACK_ENTRIES(64));
}
RADEON_WRITE(R600_SQ_CONFIG, sq_config);
RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
RADEON_WRITE(R600_SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880))
RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_TC_ONLY));
else
RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_VC_AND_TC));
RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_2S, (R600_S0_X(0xc) |
R600_S0_Y(0x4) |
R600_S1_X(0x4) |
R600_S1_Y(0xc)));
RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_4S, (R600_S0_X(0xe) |
R600_S0_Y(0xe) |
R600_S1_X(0x2) |
R600_S1_Y(0x2) |
R600_S2_X(0xa) |
R600_S2_Y(0x6) |
R600_S3_X(0x6) |
R600_S3_Y(0xa)));
RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_8S_WD0, (R600_S0_X(0xe) |
R600_S0_Y(0xb) |
R600_S1_X(0x4) |
R600_S1_Y(0xc) |
R600_S2_X(0x1) |
R600_S2_Y(0x6) |
R600_S3_X(0xa) |
R600_S3_Y(0xe)));
RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_8S_WD1, (R600_S4_X(0x6) |
R600_S4_Y(0x1) |
R600_S5_X(0x0) |
R600_S5_Y(0x0) |
R600_S6_X(0xb) |
R600_S6_Y(0x4) |
R600_S7_X(0x7) |
R600_S7_Y(0x8)));
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_R600:
case CHIP_RV630:
case CHIP_RV635:
gs_prim_buffer_depth = 0;
break;
case CHIP_RV610:
case CHIP_RS780:
case CHIP_RS880:
case CHIP_RV620:
gs_prim_buffer_depth = 32;
break;
case CHIP_RV670:
gs_prim_buffer_depth = 128;
break;
default:
break;
}
num_gs_verts_per_thread = dev_priv->r600_max_pipes * 16;
vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
/* Max value for this is 256 */
if (vgt_gs_per_es > 256)
vgt_gs_per_es = 256;
RADEON_WRITE(R600_VGT_ES_PER_GS, 128);
RADEON_WRITE(R600_VGT_GS_PER_ES, vgt_gs_per_es);
RADEON_WRITE(R600_VGT_GS_PER_VS, 2);
RADEON_WRITE(R600_VGT_GS_VERTEX_REUSE, 16);
/* more default values. 2D/3D driver should adjust as needed */
RADEON_WRITE(R600_PA_SC_LINE_STIPPLE_STATE, 0);
RADEON_WRITE(R600_VGT_STRMOUT_EN, 0);
RADEON_WRITE(R600_SX_MISC, 0);
RADEON_WRITE(R600_PA_SC_MODE_CNTL, 0);
RADEON_WRITE(R600_PA_SC_AA_CONFIG, 0);
RADEON_WRITE(R600_PA_SC_LINE_STIPPLE, 0);
RADEON_WRITE(R600_SPI_INPUT_Z, 0);
RADEON_WRITE(R600_SPI_PS_IN_CONTROL_0, R600_NUM_INTERP(2));
RADEON_WRITE(R600_CB_COLOR7_FRAG, 0);
/* clear render buffer base addresses */
RADEON_WRITE(R600_CB_COLOR0_BASE, 0);
RADEON_WRITE(R600_CB_COLOR1_BASE, 0);
RADEON_WRITE(R600_CB_COLOR2_BASE, 0);
RADEON_WRITE(R600_CB_COLOR3_BASE, 0);
RADEON_WRITE(R600_CB_COLOR4_BASE, 0);
RADEON_WRITE(R600_CB_COLOR5_BASE, 0);
RADEON_WRITE(R600_CB_COLOR6_BASE, 0);
RADEON_WRITE(R600_CB_COLOR7_BASE, 0);
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_RV610:
case CHIP_RS780:
case CHIP_RS880:
case CHIP_RV620:
tc_cntl = R600_TC_L2_SIZE(8);
break;
case CHIP_RV630:
case CHIP_RV635:
tc_cntl = R600_TC_L2_SIZE(4);
break;
case CHIP_R600:
tc_cntl = R600_TC_L2_SIZE(0) | R600_L2_DISABLE_LATE_HIT;
break;
default:
tc_cntl = R600_TC_L2_SIZE(0);
break;
}
RADEON_WRITE(R600_TC_CNTL, tc_cntl);
hdp_host_path_cntl = RADEON_READ(R600_HDP_HOST_PATH_CNTL);
RADEON_WRITE(R600_HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
arb_pop = RADEON_READ(R600_ARB_POP);
arb_pop |= R600_ENABLE_TC128;
RADEON_WRITE(R600_ARB_POP, arb_pop);
RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
RADEON_WRITE(R600_PA_CL_ENHANCE, (R600_CLIP_VTX_REORDER_ENA |
R600_NUM_CLIP_SEQ(3)));
RADEON_WRITE(R600_PA_SC_ENHANCE, R600_FORCE_EOV_MAX_CLK_CNT(4095));
}
static u32 r700_get_tile_pipe_to_backend_map(drm_radeon_private_t *dev_priv,
u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 backend_map = 0;
u32 enabled_backends_mask;
u32 enabled_backends_count;
u32 cur_pipe;
u32 swizzle_pipe[R7XX_MAX_PIPES];
u32 cur_backend;
u32 i;
bool force_no_swizzle;
if (num_tile_pipes > R7XX_MAX_PIPES)
num_tile_pipes = R7XX_MAX_PIPES;
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_backends > R7XX_MAX_BACKENDS)
num_backends = R7XX_MAX_BACKENDS;
if (num_backends < 1)
num_backends = 1;
enabled_backends_mask = 0;
enabled_backends_count = 0;
for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
if (((backend_disable_mask >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends)
break;
}
if (enabled_backends_count == 0) {
enabled_backends_mask = 1;
enabled_backends_count = 1;
}
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_RV770:
case CHIP_RV730:
force_no_swizzle = false;
break;
case CHIP_RV710:
case CHIP_RV740:
default:
force_no_swizzle = true;
break;
}
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
swizzle_pipe[0] = 0;
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 3:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
}
break;
case 4:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 3;
swizzle_pipe[3] = 1;
}
break;
case 5:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
}
break;
case 6:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 5;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
}
break;
case 7:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 5;
}
break;
case 8:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
swizzle_pipe[7] = 7;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 7;
swizzle_pipe[7] = 5;
}
break;
}
cur_backend = 0;
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
}
return backend_map;
}
static void r700_gfx_init(struct drm_device *dev,
drm_radeon_private_t *dev_priv)
{
int i, j, num_qd_pipes;
u32 ta_aux_cntl;
u32 sx_debug_1;
u32 smx_dc_ctl0;
u32 db_debug3;
u32 num_gs_verts_per_thread;
u32 vgt_gs_per_es;
u32 gs_prim_buffer_depth = 0;
u32 sq_ms_fifo_sizes;
u32 sq_config;
u32 sq_thread_resource_mgmt;
u32 hdp_host_path_cntl;
u32 sq_dyn_gpr_size_simd_ab_0;
u32 backend_map;
u32 gb_tiling_config = 0;
u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config;
u32 mc_arb_ramcfg;
u32 db_debug4;
/* setup chip specs */
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_RV770:
dev_priv->r600_max_pipes = 4;
dev_priv->r600_max_tile_pipes = 8;
dev_priv->r600_max_simds = 10;
dev_priv->r600_max_backends = 4;
dev_priv->r600_max_gprs = 256;
dev_priv->r600_max_threads = 248;
dev_priv->r600_max_stack_entries = 512;
dev_priv->r600_max_hw_contexts = 8;
dev_priv->r600_max_gs_threads = 16 * 2;
dev_priv->r600_sx_max_export_size = 128;
dev_priv->r600_sx_max_export_pos_size = 16;
dev_priv->r600_sx_max_export_smx_size = 112;
dev_priv->r600_sq_num_cf_insts = 2;
dev_priv->r700_sx_num_of_sets = 7;
dev_priv->r700_sc_prim_fifo_size = 0xF9;
dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
break;
case CHIP_RV730:
dev_priv->r600_max_pipes = 2;
dev_priv->r600_max_tile_pipes = 4;
dev_priv->r600_max_simds = 8;
dev_priv->r600_max_backends = 2;
dev_priv->r600_max_gprs = 128;
dev_priv->r600_max_threads = 248;
dev_priv->r600_max_stack_entries = 256;
dev_priv->r600_max_hw_contexts = 8;
dev_priv->r600_max_gs_threads = 16 * 2;
dev_priv->r600_sx_max_export_size = 256;
dev_priv->r600_sx_max_export_pos_size = 32;
dev_priv->r600_sx_max_export_smx_size = 224;
dev_priv->r600_sq_num_cf_insts = 2;
dev_priv->r700_sx_num_of_sets = 7;
dev_priv->r700_sc_prim_fifo_size = 0xf9;
dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
if (dev_priv->r600_sx_max_export_pos_size > 16) {
dev_priv->r600_sx_max_export_pos_size -= 16;
dev_priv->r600_sx_max_export_smx_size += 16;
}
break;
case CHIP_RV710:
dev_priv->r600_max_pipes = 2;
dev_priv->r600_max_tile_pipes = 2;
dev_priv->r600_max_simds = 2;
dev_priv->r600_max_backends = 1;
dev_priv->r600_max_gprs = 256;
dev_priv->r600_max_threads = 192;
dev_priv->r600_max_stack_entries = 256;
dev_priv->r600_max_hw_contexts = 4;
dev_priv->r600_max_gs_threads = 8 * 2;
dev_priv->r600_sx_max_export_size = 128;
dev_priv->r600_sx_max_export_pos_size = 16;
dev_priv->r600_sx_max_export_smx_size = 112;
dev_priv->r600_sq_num_cf_insts = 1;
dev_priv->r700_sx_num_of_sets = 7;
dev_priv->r700_sc_prim_fifo_size = 0x40;
dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
break;
case CHIP_RV740:
dev_priv->r600_max_pipes = 4;
dev_priv->r600_max_tile_pipes = 4;
dev_priv->r600_max_simds = 8;
dev_priv->r600_max_backends = 4;
dev_priv->r600_max_gprs = 256;
dev_priv->r600_max_threads = 248;
dev_priv->r600_max_stack_entries = 512;
dev_priv->r600_max_hw_contexts = 8;
dev_priv->r600_max_gs_threads = 16 * 2;
dev_priv->r600_sx_max_export_size = 256;
dev_priv->r600_sx_max_export_pos_size = 32;
dev_priv->r600_sx_max_export_smx_size = 224;
dev_priv->r600_sq_num_cf_insts = 2;
dev_priv->r700_sx_num_of_sets = 7;
dev_priv->r700_sc_prim_fifo_size = 0x100;
dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
if (dev_priv->r600_sx_max_export_pos_size > 16) {
dev_priv->r600_sx_max_export_pos_size -= 16;
dev_priv->r600_sx_max_export_smx_size += 16;
}
break;
default:
break;
}
/* Initialize HDP */
j = 0;
for (i = 0; i < 32; i++) {
RADEON_WRITE((0x2c14 + j), 0x00000000);
RADEON_WRITE((0x2c18 + j), 0x00000000);
RADEON_WRITE((0x2c1c + j), 0x00000000);
RADEON_WRITE((0x2c20 + j), 0x00000000);
RADEON_WRITE((0x2c24 + j), 0x00000000);
j += 0x18;
}
RADEON_WRITE(R600_GRBM_CNTL, R600_GRBM_READ_TIMEOUT(0xff));
/* setup tiling, simd, pipe config */
mc_arb_ramcfg = RADEON_READ(R700_MC_ARB_RAMCFG);
switch (dev_priv->r600_max_tile_pipes) {
case 1:
gb_tiling_config |= R600_PIPE_TILING(0);
break;
case 2:
gb_tiling_config |= R600_PIPE_TILING(1);
break;
case 4:
gb_tiling_config |= R600_PIPE_TILING(2);
break;
case 8:
gb_tiling_config |= R600_PIPE_TILING(3);
break;
default:
break;
}
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)
gb_tiling_config |= R600_BANK_TILING(1);
else
gb_tiling_config |= R600_BANK_TILING((mc_arb_ramcfg >> R700_NOOFBANK_SHIFT) & R700_NOOFBANK_MASK);
gb_tiling_config |= R600_GROUP_SIZE(0);
if (((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK) > 3) {
gb_tiling_config |= R600_ROW_TILING(3);
gb_tiling_config |= R600_SAMPLE_SPLIT(3);
} else {
gb_tiling_config |=
R600_ROW_TILING(((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK));
gb_tiling_config |=
R600_SAMPLE_SPLIT(((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK));
}
gb_tiling_config |= R600_BANK_SWAPS(1);
cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
cc_rb_backend_disable |=
R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
cc_gc_shader_pipe_config |=
R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK);
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
backend_map = 0x28;
else
backend_map = r700_get_tile_pipe_to_backend_map(dev_priv,
dev_priv->r600_max_tile_pipes,
(R7XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable &
R7XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
gb_tiling_config |= R600_BACKEND_MAP(backend_map);
RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
if (gb_tiling_config & 0xc0) {
dev_priv->r600_group_size = 512;
} else {
dev_priv->r600_group_size = 256;
}
dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
if (gb_tiling_config & 0x30) {
dev_priv->r600_nbanks = 8;
} else {
dev_priv->r600_nbanks = 4;
}
RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
num_qd_pipes =
R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
/* set HW defaults for 3D engine */
RADEON_WRITE(R600_CP_QUEUE_THRESHOLDS, (R600_ROQ_IB1_START(0x16) |
R600_ROQ_IB2_START(0x2b)));
RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30));
ta_aux_cntl = RADEON_READ(R600_TA_CNTL_AUX);
RADEON_WRITE(R600_TA_CNTL_AUX, ta_aux_cntl | R600_DISABLE_CUBE_ANISO);
sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1);
sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS;
RADEON_WRITE(R700_SX_DEBUG_1, sx_debug_1);
smx_dc_ctl0 = RADEON_READ(R600_SMX_DC_CTL0);
smx_dc_ctl0 &= ~R700_CACHE_DEPTH(0x1ff);
smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1);
RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0);
if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV740)
RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
R700_GS_FLUSH_CTL(4) |
R700_ACK_FLUSH_CTL(3) |
R700_SYNC_FLUSH_CTL));
db_debug3 = RADEON_READ(R700_DB_DEBUG3);
db_debug3 &= ~R700_DB_CLK_OFF_DELAY(0x1f);
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_RV770:
case CHIP_RV740:
db_debug3 |= R700_DB_CLK_OFF_DELAY(0x1f);
break;
case CHIP_RV710:
case CHIP_RV730:
default:
db_debug3 |= R700_DB_CLK_OFF_DELAY(2);
break;
}
RADEON_WRITE(R700_DB_DEBUG3, db_debug3);
if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV770) {
db_debug4 = RADEON_READ(RV700_DB_DEBUG4);
db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER;
RADEON_WRITE(RV700_DB_DEBUG4, db_debug4);
}
RADEON_WRITE(R600_SX_EXPORT_BUFFER_SIZES, (R600_COLOR_BUFFER_SIZE((dev_priv->r600_sx_max_export_size / 4) - 1) |
R600_POSITION_BUFFER_SIZE((dev_priv->r600_sx_max_export_pos_size / 4) - 1) |
R600_SMX_BUFFER_SIZE((dev_priv->r600_sx_max_export_smx_size / 4) - 1)));
RADEON_WRITE(R700_PA_SC_FIFO_SIZE_R7XX, (R700_SC_PRIM_FIFO_SIZE(dev_priv->r700_sc_prim_fifo_size) |
R700_SC_HIZ_TILE_FIFO_SIZE(dev_priv->r700_sc_hiz_tile_fifo_size) |
R700_SC_EARLYZ_TILE_FIFO_SIZE(dev_priv->r700_sc_earlyz_tile_fifo_fize)));
RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
RADEON_WRITE(R600_VGT_NUM_INSTANCES, 1);
RADEON_WRITE(R600_SPI_CONFIG_CNTL, R600_GPR_WRITE_PRIORITY(0));
RADEON_WRITE(R600_SPI_CONFIG_CNTL_1, R600_VTX_DONE_DELAY(4));
RADEON_WRITE(R600_CP_PERFMON_CNTL, 0);
sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(16 * dev_priv->r600_sq_num_cf_insts) |
R600_DONE_FIFO_HIWATER(0xe0) |
R600_ALU_UPDATE_FIFO_HIWATER(0x8));
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_RV770:
case CHIP_RV730:
case CHIP_RV710:
sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
break;
case CHIP_RV740:
default:
sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
break;
}
RADEON_WRITE(R600_SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
* should be adjusted as needed by the 2D/3D drivers. This just sets default values
*/
sq_config = RADEON_READ(R600_SQ_CONFIG);
sq_config &= ~(R600_PS_PRIO(3) |
R600_VS_PRIO(3) |
R600_GS_PRIO(3) |
R600_ES_PRIO(3));
sq_config |= (R600_DX9_CONSTS |
R600_VC_ENABLE |
R600_EXPORT_SRC_C |
R600_PS_PRIO(0) |
R600_VS_PRIO(1) |
R600_GS_PRIO(2) |
R600_ES_PRIO(3));
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)
/* no vertex cache */
sq_config &= ~R600_VC_ENABLE;
RADEON_WRITE(R600_SQ_CONFIG, sq_config);
RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_1, (R600_NUM_PS_GPRS((dev_priv->r600_max_gprs * 24)/64) |
R600_NUM_VS_GPRS((dev_priv->r600_max_gprs * 24)/64) |
R600_NUM_CLAUSE_TEMP_GPRS(((dev_priv->r600_max_gprs * 24)/64)/2)));
RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_2, (R600_NUM_GS_GPRS((dev_priv->r600_max_gprs * 7)/64) |
R600_NUM_ES_GPRS((dev_priv->r600_max_gprs * 7)/64)));
sq_thread_resource_mgmt = (R600_NUM_PS_THREADS((dev_priv->r600_max_threads * 4)/8) |
R600_NUM_VS_THREADS((dev_priv->r600_max_threads * 2)/8) |
R600_NUM_ES_THREADS((dev_priv->r600_max_threads * 1)/8));
if (((dev_priv->r600_max_threads * 1) / 8) > dev_priv->r600_max_gs_threads)
sq_thread_resource_mgmt |= R600_NUM_GS_THREADS(dev_priv->r600_max_gs_threads);
else
sq_thread_resource_mgmt |= R600_NUM_GS_THREADS((dev_priv->r600_max_gs_threads * 1)/8);
RADEON_WRITE(R600_SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_1, (R600_NUM_PS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4) |
R600_NUM_VS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4)));
RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_2, (R600_NUM_GS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4) |
R600_NUM_ES_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4)));
sq_dyn_gpr_size_simd_ab_0 = (R700_SIMDA_RING0((dev_priv->r600_max_gprs * 38)/64) |
R700_SIMDA_RING1((dev_priv->r600_max_gprs * 38)/64) |
R700_SIMDB_RING0((dev_priv->r600_max_gprs * 38)/64) |
R700_SIMDB_RING1((dev_priv->r600_max_gprs * 38)/64));
RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
RADEON_WRITE(R700_PA_SC_FORCE_EOV_MAX_CNTS, (R700_FORCE_EOV_MAX_CLK_CNT(4095) |
R700_FORCE_EOV_MAX_REZ_CNT(255)));
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)
RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, (R600_CACHE_INVALIDATION(R600_TC_ONLY) |
R700_AUTO_INVLD_EN(R700_ES_AND_GS_AUTO)));
else
RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, (R600_CACHE_INVALIDATION(R600_VC_AND_TC) |
R700_AUTO_INVLD_EN(R700_ES_AND_GS_AUTO)));
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_RV770:
case CHIP_RV730:
case CHIP_RV740:
gs_prim_buffer_depth = 384;
break;
case CHIP_RV710:
gs_prim_buffer_depth = 128;
break;
default:
break;
}
num_gs_verts_per_thread = dev_priv->r600_max_pipes * 16;
vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
/* Max value for this is 256 */
if (vgt_gs_per_es > 256)
vgt_gs_per_es = 256;
RADEON_WRITE(R600_VGT_ES_PER_GS, 128);
RADEON_WRITE(R600_VGT_GS_PER_ES, vgt_gs_per_es);
RADEON_WRITE(R600_VGT_GS_PER_VS, 2);
/* more default values. 2D/3D driver should adjust as needed */
RADEON_WRITE(R600_VGT_GS_VERTEX_REUSE, 16);
RADEON_WRITE(R600_PA_SC_LINE_STIPPLE_STATE, 0);
RADEON_WRITE(R600_VGT_STRMOUT_EN, 0);
RADEON_WRITE(R600_SX_MISC, 0);
RADEON_WRITE(R600_PA_SC_MODE_CNTL, 0);
RADEON_WRITE(R700_PA_SC_EDGERULE, 0xaaaaaaaa);
RADEON_WRITE(R600_PA_SC_AA_CONFIG, 0);
RADEON_WRITE(R600_PA_SC_CLIPRECT_RULE, 0xffff);
RADEON_WRITE(R600_PA_SC_LINE_STIPPLE, 0);
RADEON_WRITE(R600_SPI_INPUT_Z, 0);
RADEON_WRITE(R600_SPI_PS_IN_CONTROL_0, R600_NUM_INTERP(2));
RADEON_WRITE(R600_CB_COLOR7_FRAG, 0);
/* clear render buffer base addresses */
RADEON_WRITE(R600_CB_COLOR0_BASE, 0);
RADEON_WRITE(R600_CB_COLOR1_BASE, 0);
RADEON_WRITE(R600_CB_COLOR2_BASE, 0);
RADEON_WRITE(R600_CB_COLOR3_BASE, 0);
RADEON_WRITE(R600_CB_COLOR4_BASE, 0);
RADEON_WRITE(R600_CB_COLOR5_BASE, 0);
RADEON_WRITE(R600_CB_COLOR6_BASE, 0);
RADEON_WRITE(R600_CB_COLOR7_BASE, 0);
RADEON_WRITE(R700_TCP_CNTL, 0);
hdp_host_path_cntl = RADEON_READ(R600_HDP_HOST_PATH_CNTL);
RADEON_WRITE(R600_HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
RADEON_WRITE(R600_PA_CL_ENHANCE, (R600_CLIP_VTX_REORDER_ENA |
R600_NUM_CLIP_SEQ(3)));
}
static void r600_cp_init_ring_buffer(struct drm_device *dev,
drm_radeon_private_t *dev_priv,
struct drm_file *file_priv)
{
struct drm_radeon_master_private *master_priv;
u32 ring_start;
u64 rptr_addr;
if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
r700_gfx_init(dev, dev_priv);
else
r600_gfx_init(dev, dev_priv);
RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
RADEON_READ(R600_GRBM_SOFT_RESET);
mdelay(15);
RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
/* Set ring buffer size */
#ifdef __BIG_ENDIAN
RADEON_WRITE(R600_CP_RB_CNTL,
R600_BUF_SWAP_32BIT |
R600_RB_NO_UPDATE |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#else
RADEON_WRITE(R600_CP_RB_CNTL,
RADEON_RB_NO_UPDATE |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#endif
RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x0);
/* Set the write pointer delay */
RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
#ifdef __BIG_ENDIAN
RADEON_WRITE(R600_CP_RB_CNTL,
R600_BUF_SWAP_32BIT |
R600_RB_NO_UPDATE |
R600_RB_RPTR_WR_ENA |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#else
RADEON_WRITE(R600_CP_RB_CNTL,
R600_RB_NO_UPDATE |
R600_RB_RPTR_WR_ENA |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#endif
/* Initialize the ring buffer's read and write pointers */
RADEON_WRITE(R600_CP_RB_RPTR_WR, 0);
RADEON_WRITE(R600_CP_RB_WPTR, 0);
SET_RING_HEAD(dev_priv, 0);
dev_priv->ring.tail = 0;
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
rptr_addr = dev_priv->ring_rptr->offset
- dev->agp->base +
dev_priv->gart_vm_start;
} else
#endif
{
rptr_addr = dev_priv->ring_rptr->offset
- ((unsigned long) dev->sg->virtual)
+ dev_priv->gart_vm_start;
}
RADEON_WRITE(R600_CP_RB_RPTR_ADDR, (rptr_addr & 0xfffffffc));
RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, upper_32_bits(rptr_addr));
#ifdef __BIG_ENDIAN
RADEON_WRITE(R600_CP_RB_CNTL,
RADEON_BUF_SWAP_32BIT |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#else
RADEON_WRITE(R600_CP_RB_CNTL,
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#endif
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
/* XXX */
radeon_write_agp_base(dev_priv, dev->agp->base);
/* XXX */
radeon_write_agp_location(dev_priv,
(((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) |
(dev_priv->gart_vm_start >> 16)));
ring_start = (dev_priv->cp_ring->offset
- dev->agp->base
+ dev_priv->gart_vm_start);
} else
#endif
ring_start = (dev_priv->cp_ring->offset
- (unsigned long)dev->sg->virtual
+ dev_priv->gart_vm_start);
RADEON_WRITE(R600_CP_RB_BASE, ring_start >> 8);
RADEON_WRITE(R600_CP_ME_CNTL, 0xff);
RADEON_WRITE(R600_CP_DEBUG, (1 << 27) | (1 << 28));
/* Initialize the scratch register pointer. This will cause
* the scratch register values to be written out to memory
* whenever they are updated.
*
* We simply put this behind the ring read pointer, this works
* with PCI GART as well as (whatever kind of) AGP GART
*/
{
u64 scratch_addr;
scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32;
scratch_addr += R600_SCRATCH_REG_OFFSET;
scratch_addr >>= 8;
scratch_addr &= 0xffffffff;
RADEON_WRITE(R600_SCRATCH_ADDR, (uint32_t)scratch_addr);
}
RADEON_WRITE(R600_SCRATCH_UMSK, 0x7);
/* Turn on bus mastering */
radeon_enable_bm(dev_priv);
radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(0), 0);
RADEON_WRITE(R600_LAST_FRAME_REG, 0);
radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(1), 0);
RADEON_WRITE(R600_LAST_DISPATCH_REG, 0);
radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(2), 0);
RADEON_WRITE(R600_LAST_CLEAR_REG, 0);
/* reset sarea copies of these */
master_priv = file_priv->master->driver_priv;
if (master_priv->sarea_priv) {
master_priv->sarea_priv->last_frame = 0;
master_priv->sarea_priv->last_dispatch = 0;
master_priv->sarea_priv->last_clear = 0;
}
r600_do_wait_for_idle(dev_priv);
}
int r600_do_cleanup_cp(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (dev->irq_enabled)
drm_irq_uninstall(dev);
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
if (dev_priv->cp_ring != NULL) {
drm_legacy_ioremapfree(dev_priv->cp_ring, dev);
dev_priv->cp_ring = NULL;
}
if (dev_priv->ring_rptr != NULL) {
drm_legacy_ioremapfree(dev_priv->ring_rptr, dev);
dev_priv->ring_rptr = NULL;
}
if (dev->agp_buffer_map != NULL) {
drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
dev->agp_buffer_map = NULL;
}
} else
#endif
{
if (dev_priv->gart_info.bus_addr)
r600_page_table_cleanup(dev, &dev_priv->gart_info);
if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) {
drm_legacy_ioremapfree(&dev_priv->gart_info.mapping, dev);
dev_priv->gart_info.addr = NULL;
}
}
/* only clear to the start of flags */
memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
return 0;
}
int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
DRM_DEBUG("\n");
mutex_init(&dev_priv->cs_mutex);
r600_cs_legacy_init();
/* if we require new memory map but we don't have it fail */
if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
r600_do_cleanup_cp(dev);
return -EINVAL;
}
if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
DRM_DEBUG("Forcing AGP card to PCI mode\n");
dev_priv->flags &= ~RADEON_IS_AGP;
/* The writeback test succeeds, but when writeback is enabled,
* the ring buffer read ptr update fails after first 128 bytes.
*/
radeon_no_wb = 1;
} else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
&& !init->is_pci) {
DRM_DEBUG("Restoring AGP flag\n");
dev_priv->flags |= RADEON_IS_AGP;
}
dev_priv->usec_timeout = init->usec_timeout;
if (dev_priv->usec_timeout < 1 ||
dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
DRM_DEBUG("TIMEOUT problem!\n");
r600_do_cleanup_cp(dev);
return -EINVAL;
}
/* Enable vblank on CRTC1 for older X servers
*/
dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
dev_priv->do_boxes = 0;
dev_priv->cp_mode = init->cp_mode;
/* We don't support anything other than bus-mastering ring mode,
* but the ring can be in either AGP or PCI space for the ring
* read pointer.
*/
if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
(init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
r600_do_cleanup_cp(dev);
return -EINVAL;
}
switch (init->fb_bpp) {
case 16:
dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
break;
case 32:
default:
dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
break;
}
dev_priv->front_offset = init->front_offset;
dev_priv->front_pitch = init->front_pitch;
dev_priv->back_offset = init->back_offset;
dev_priv->back_pitch = init->back_pitch;
dev_priv->ring_offset = init->ring_offset;
dev_priv->ring_rptr_offset = init->ring_rptr_offset;
dev_priv->buffers_offset = init->buffers_offset;
dev_priv->gart_textures_offset = init->gart_textures_offset;
master_priv->sarea = drm_legacy_getsarea(dev);
if (!master_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
r600_do_cleanup_cp(dev);
return -EINVAL;
}
dev_priv->cp_ring = drm_legacy_findmap(dev, init->ring_offset);
if (!dev_priv->cp_ring) {
DRM_ERROR("could not find cp ring region!\n");
r600_do_cleanup_cp(dev);
return -EINVAL;
}
dev_priv->ring_rptr = drm_legacy_findmap(dev, init->ring_rptr_offset);
if (!dev_priv->ring_rptr) {
DRM_ERROR("could not find ring read pointer!\n");
r600_do_cleanup_cp(dev);
return -EINVAL;
}
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) {
DRM_ERROR("could not find dma buffer region!\n");
r600_do_cleanup_cp(dev);
return -EINVAL;
}
if (init->gart_textures_offset) {
dev_priv->gart_textures =
drm_legacy_findmap(dev, init->gart_textures_offset);
if (!dev_priv->gart_textures) {
DRM_ERROR("could not find GART texture region!\n");
r600_do_cleanup_cp(dev);
return -EINVAL;
}
}
#if __OS_HAS_AGP
/* XXX */
if (dev_priv->flags & RADEON_IS_AGP) {
drm_legacy_ioremap_wc(dev_priv->cp_ring, dev);
drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev);
drm_legacy_ioremap_wc(dev->agp_buffer_map, dev);
if (!dev_priv->cp_ring->handle ||
!dev_priv->ring_rptr->handle ||
!dev->agp_buffer_map->handle) {
DRM_ERROR("could not find ioremap agp regions!\n");
r600_do_cleanup_cp(dev);
return -EINVAL;
}
} else
#endif
{
dev_priv->cp_ring->handle = (void *)(unsigned long)dev_priv->cp_ring->offset;
dev_priv->ring_rptr->handle =
(void *)(unsigned long)dev_priv->ring_rptr->offset;
dev->agp_buffer_map->handle =
(void *)(unsigned long)dev->agp_buffer_map->offset;
DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
dev_priv->cp_ring->handle);
DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
dev_priv->ring_rptr->handle);
DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
dev->agp_buffer_map->handle);
}
dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 24;
dev_priv->fb_size =
(((radeon_read_fb_location(dev_priv) & 0xffff0000u) << 8) + 0x1000000)
- dev_priv->fb_location;
dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
((dev_priv->front_offset
+ dev_priv->fb_location) >> 10));
dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
((dev_priv->back_offset
+ dev_priv->fb_location) >> 10));
dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
((dev_priv->depth_offset
+ dev_priv->fb_location) >> 10));
dev_priv->gart_size = init->gart_size;
/* New let's set the memory map ... */
if (dev_priv->new_memmap) {
u32 base = 0;
DRM_INFO("Setting GART location based on new memory map\n");
/* If using AGP, try to locate the AGP aperture at the same
* location in the card and on the bus, though we have to
* align it down.
*/
#if __OS_HAS_AGP
/* XXX */
if (dev_priv->flags & RADEON_IS_AGP) {
base = dev->agp->base;
/* Check if valid */
if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
dev->agp->base);
base = 0;
}
}
#endif
/* If not or if AGP is at 0 (Macs), try to put it elsewhere */
if (base == 0) {
base = dev_priv->fb_location + dev_priv->fb_size;
if (base < dev_priv->fb_location ||
((base + dev_priv->gart_size) & 0xfffffffful) < base)
base = dev_priv->fb_location
- dev_priv->gart_size;
}
dev_priv->gart_vm_start = base & 0xffc00000u;
if (dev_priv->gart_vm_start != base)
DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
base, dev_priv->gart_vm_start);
}
#if __OS_HAS_AGP
/* XXX */
if (dev_priv->flags & RADEON_IS_AGP)
dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
- dev->agp->base
+ dev_priv->gart_vm_start);
else
#endif
dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
- (unsigned long)dev->sg->virtual
+ dev_priv->gart_vm_start);
DRM_DEBUG("fb 0x%08x size %d\n",
(unsigned int) dev_priv->fb_location,
(unsigned int) dev_priv->fb_size);
DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
DRM_DEBUG("dev_priv->gart_vm_start 0x%08x\n",
(unsigned int) dev_priv->gart_vm_start);
DRM_DEBUG("dev_priv->gart_buffers_offset 0x%08lx\n",
dev_priv->gart_buffers_offset);
dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
dev_priv->ring.rptr_update_l2qw = order_base_2(/* init->rptr_update */ 4096 / 8);
dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
dev_priv->ring.fetch_size_l2ow = order_base_2(/* init->fetch_size */ 32 / 16);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
/* XXX turn off pcie gart */
} else
#endif
{
dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
/* if we have an offset set from userspace */
if (!dev_priv->pcigart_offset_set) {
DRM_ERROR("Need gart offset from userspace\n");
r600_do_cleanup_cp(dev);
return -EINVAL;
}
DRM_DEBUG("Using gart offset 0x%08lx\n", dev_priv->pcigart_offset);
dev_priv->gart_info.bus_addr =
dev_priv->pcigart_offset + dev_priv->fb_location;
dev_priv->gart_info.mapping.offset =
dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
dev_priv->gart_info.mapping.size =
dev_priv->gart_info.table_size;
drm_legacy_ioremap_wc(&dev_priv->gart_info.mapping, dev);
if (!dev_priv->gart_info.mapping.handle) {
DRM_ERROR("ioremap failed.\n");
r600_do_cleanup_cp(dev);
return -EINVAL;
}
dev_priv->gart_info.addr =
dev_priv->gart_info.mapping.handle;
DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
dev_priv->gart_info.addr,
dev_priv->pcigart_offset);
if (!r600_page_table_init(dev)) {
DRM_ERROR("Failed to init GART table\n");
r600_do_cleanup_cp(dev);
return -EINVAL;
}
if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
r700_vm_init(dev);
else
r600_vm_init(dev);
}
if (!dev_priv->me_fw || !dev_priv->pfp_fw) {
int err = r600_cp_init_microcode(dev_priv);
if (err) {
DRM_ERROR("Failed to load firmware!\n");
r600_do_cleanup_cp(dev);
return err;
}
}
if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
r700_cp_load_microcode(dev_priv);
else
r600_cp_load_microcode(dev_priv);
r600_cp_init_ring_buffer(dev, dev_priv, file_priv);
dev_priv->last_buf = 0;
r600_do_engine_reset(dev);
r600_test_writeback(dev_priv);
return 0;
}
int r600_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)) {
r700_vm_init(dev);
r700_cp_load_microcode(dev_priv);
} else {
r600_vm_init(dev);
r600_cp_load_microcode(dev_priv);
}
r600_cp_init_ring_buffer(dev, dev_priv, file_priv);
r600_do_engine_reset(dev);
return 0;
}
/* Wait for the CP to go idle.
*/
int r600_do_cp_idle(drm_radeon_private_t *dev_priv)
{
RING_LOCALS;
DRM_DEBUG("\n");
BEGIN_RING(5);
OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
/* wait for 3D idle clean */
OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2);
OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN);
ADVANCE_RING();
COMMIT_RING();
return r600_do_wait_for_idle(dev_priv);
}
/* Start the Command Processor.
*/
void r600_do_cp_start(drm_radeon_private_t *dev_priv)
{
u32 cp_me;
RING_LOCALS;
DRM_DEBUG("\n");
BEGIN_RING(7);
OUT_RING(CP_PACKET3(R600_IT_ME_INITIALIZE, 5));
OUT_RING(0x00000001);
if (((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770))
OUT_RING(0x00000003);
else
OUT_RING(0x00000000);
OUT_RING((dev_priv->r600_max_hw_contexts - 1));
OUT_RING(R600_ME_INITIALIZE_DEVICE_ID(1));
OUT_RING(0x00000000);
OUT_RING(0x00000000);
ADVANCE_RING();
COMMIT_RING();
/* set the mux and reset the halt bit */
cp_me = 0xff;
RADEON_WRITE(R600_CP_ME_CNTL, cp_me);
dev_priv->cp_running = 1;
}
void r600_do_cp_reset(drm_radeon_private_t *dev_priv)
{
u32 cur_read_ptr;
DRM_DEBUG("\n");
cur_read_ptr = RADEON_READ(R600_CP_RB_RPTR);
RADEON_WRITE(R600_CP_RB_WPTR, cur_read_ptr);
SET_RING_HEAD(dev_priv, cur_read_ptr);
dev_priv->ring.tail = cur_read_ptr;
}
void r600_do_cp_stop(drm_radeon_private_t *dev_priv)
{
uint32_t cp_me;
DRM_DEBUG("\n");
cp_me = 0xff | R600_CP_ME_HALT;
RADEON_WRITE(R600_CP_ME_CNTL, cp_me);
dev_priv->cp_running = 0;
}
int r600_cp_dispatch_indirect(struct drm_device *dev,
struct drm_buf *buf, int start, int end)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
if (start != end) {
unsigned long offset = (dev_priv->gart_buffers_offset
+ buf->offset + start);
int dwords = (end - start + 3) / sizeof(u32);
DRM_DEBUG("dwords:%d\n", dwords);
DRM_DEBUG("offset 0x%lx\n", offset);
/* Indirect buffer data must be a multiple of 16 dwords.
* pad the data with a Type-2 CP packet.
*/
while (dwords & 0xf) {
u32 *data = (u32 *)
((char *)dev->agp_buffer_map->handle
+ buf->offset + start);
data[dwords++] = RADEON_CP_PACKET2;
}
/* Fire off the indirect buffer */
BEGIN_RING(4);
OUT_RING(CP_PACKET3(R600_IT_INDIRECT_BUFFER, 2));
OUT_RING((offset & 0xfffffffc));
OUT_RING((upper_32_bits(offset) & 0xff));
OUT_RING(dwords);
ADVANCE_RING();
}
return 0;
}
void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_master *master = file_priv->master;
struct drm_radeon_master_private *master_priv = master->driver_priv;
drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
int nbox = sarea_priv->nbox;
struct drm_clip_rect *pbox = sarea_priv->boxes;
int i, cpp, src_pitch, dst_pitch;
uint64_t src, dst;
RING_LOCALS;
DRM_DEBUG("\n");
if (dev_priv->color_fmt == RADEON_COLOR_FORMAT_ARGB8888)
cpp = 4;
else
cpp = 2;
if (sarea_priv->pfCurrentPage == 0) {
src_pitch = dev_priv->back_pitch;
dst_pitch = dev_priv->front_pitch;
src = dev_priv->back_offset + dev_priv->fb_location;
dst = dev_priv->front_offset + dev_priv->fb_location;
} else {
src_pitch = dev_priv->front_pitch;
dst_pitch = dev_priv->back_pitch;
src = dev_priv->front_offset + dev_priv->fb_location;
dst = dev_priv->back_offset + dev_priv->fb_location;
}
if (r600_prepare_blit_copy(dev, file_priv)) {
DRM_ERROR("unable to allocate vertex buffer for swap buffer\n");
return;
}
for (i = 0; i < nbox; i++) {
int x = pbox[i].x1;
int y = pbox[i].y1;
int w = pbox[i].x2 - x;
int h = pbox[i].y2 - y;
DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
r600_blit_swap(dev,
src, dst,
x, y, x, y, w, h,
src_pitch, dst_pitch, cpp);
}
r600_done_blit_copy(dev);
/* Increment the frame counter. The client-side 3D driver must
* throttle the framerate by waiting for this value before
* performing the swapbuffer ioctl.
*/
sarea_priv->last_frame++;
BEGIN_RING(3);
R600_FRAME_AGE(sarea_priv->last_frame);
ADVANCE_RING();
}
int r600_cp_dispatch_texture(struct drm_device *dev,
struct drm_file *file_priv,
drm_radeon_texture_t *tex,
drm_radeon_tex_image_t *image)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_buf *buf;
u32 *buffer;
const u8 __user *data;
unsigned int size, pass_size;
u64 src_offset, dst_offset;
if (!radeon_check_offset(dev_priv, tex->offset)) {
DRM_ERROR("Invalid destination offset\n");
return -EINVAL;
}
/* this might fail for zero-sized uploads - are those illegal? */
if (!radeon_check_offset(dev_priv, tex->offset + tex->height * tex->pitch - 1)) {
DRM_ERROR("Invalid final destination offset\n");
return -EINVAL;
}
size = tex->height * tex->pitch;
if (size == 0)
return 0;
dst_offset = tex->offset;
if (r600_prepare_blit_copy(dev, file_priv)) {
DRM_ERROR("unable to allocate vertex buffer for swap buffer\n");
return -EAGAIN;
}
do {
data = (const u8 __user *)image->data;
pass_size = size;
buf = radeon_freelist_get(dev);
if (!buf) {
DRM_DEBUG("EAGAIN\n");
if (copy_to_user(tex->image, image, sizeof(*image)))
return -EFAULT;
return -EAGAIN;
}
if (pass_size > buf->total)
pass_size = buf->total;
/* Dispatch the indirect buffer.
*/
buffer =
(u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
if (copy_from_user(buffer, data, pass_size)) {
DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size);
return -EFAULT;
}
buf->file_priv = file_priv;
buf->used = pass_size;
src_offset = dev_priv->gart_buffers_offset + buf->offset;
r600_blit_copy(dev, src_offset, dst_offset, pass_size);
radeon_cp_discard_buffer(dev, file_priv->master, buf);
/* Update the input parameters for next time */
image->data = (const u8 __user *)image->data + pass_size;
dst_offset += pass_size;
size -= pass_size;
} while (size > 0);
r600_done_blit_copy(dev);
return 0;
}
/*
* Legacy cs ioctl
*/
static u32 radeon_cs_id_get(struct drm_radeon_private *radeon)
{
/* FIXME: check if wrap affect last reported wrap & sequence */
radeon->cs_id_scnt = (radeon->cs_id_scnt + 1) & 0x00FFFFFF;
if (!radeon->cs_id_scnt) {
/* increment wrap counter */
radeon->cs_id_wcnt += 0x01000000;
/* valid sequence counter start at 1 */
radeon->cs_id_scnt = 1;
}
return (radeon->cs_id_scnt | radeon->cs_id_wcnt);
}
static void r600_cs_id_emit(drm_radeon_private_t *dev_priv, u32 *id)
{
RING_LOCALS;
*id = radeon_cs_id_get(dev_priv);
/* SCRATCH 2 */
BEGIN_RING(3);
R600_CLEAR_AGE(*id);
ADVANCE_RING();
COMMIT_RING();
}
static int r600_ib_get(struct drm_device *dev,
struct drm_file *fpriv,
struct drm_buf **buffer)
{
struct drm_buf *buf;
*buffer = NULL;
buf = radeon_freelist_get(dev);
if (!buf) {
return -EBUSY;
}
buf->file_priv = fpriv;
*buffer = buf;
return 0;
}
static void r600_ib_free(struct drm_device *dev, struct drm_buf *buf,
struct drm_file *fpriv, int l, int r)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if (buf) {
if (!r)
r600_cp_dispatch_indirect(dev, buf, 0, l * 4);
radeon_cp_discard_buffer(dev, fpriv->master, buf);
COMMIT_RING();
}
}
int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv)
{
struct drm_radeon_private *dev_priv = dev->dev_private;
struct drm_radeon_cs *cs = data;
struct drm_buf *buf;
unsigned family;
int l, r = 0;
u32 *ib, cs_id = 0;
if (dev_priv == NULL) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
family = dev_priv->flags & RADEON_FAMILY_MASK;
if (family < CHIP_R600) {
DRM_ERROR("cs ioctl valid only for R6XX & R7XX in legacy mode\n");
return -EINVAL;
}
mutex_lock(&dev_priv->cs_mutex);
/* get ib */
r = r600_ib_get(dev, fpriv, &buf);
if (r) {
DRM_ERROR("ib_get failed\n");
goto out;
}
ib = dev->agp_buffer_map->handle + buf->offset;
/* now parse command stream */
r = r600_cs_legacy(dev, data, fpriv, family, ib, &l);
if (r) {
goto out;
}
out:
r600_ib_free(dev, buf, fpriv, l, r);
/* emit cs id sequence */
r600_cs_id_emit(dev_priv, &cs_id);
cs->cs_id = cs_id;
mutex_unlock(&dev_priv->cs_mutex);
return r;
}
void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size)
{
struct drm_radeon_private *dev_priv = dev->dev_private;
*npipes = dev_priv->r600_npipes;
*nbanks = dev_priv->r600_nbanks;
*group_size = dev_priv->r600_group_size;
}
| gpl-2.0 |
CandyDevices/kernel_mediatek_sprout | drivers/usb/chipidea/debug.c | 887 | 7393 | #include <linux/kernel.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include "ci.h"
#include "udc.h"
#include "bits.h"
#include "debug.h"
/**
* ci_device_show: prints information about device capabilities and status
*/
static int ci_device_show(struct seq_file *s, void *data)
{
struct ci13xxx *ci = s->private;
struct usb_gadget *gadget = &ci->gadget;
seq_printf(s, "speed = %d\n", gadget->speed);
seq_printf(s, "max_speed = %d\n", gadget->max_speed);
seq_printf(s, "is_otg = %d\n", gadget->is_otg);
seq_printf(s, "is_a_peripheral = %d\n", gadget->is_a_peripheral);
seq_printf(s, "b_hnp_enable = %d\n", gadget->b_hnp_enable);
seq_printf(s, "a_hnp_support = %d\n", gadget->a_hnp_support);
seq_printf(s, "a_alt_hnp_support = %d\n", gadget->a_alt_hnp_support);
seq_printf(s, "name = %s\n",
(gadget->name ? gadget->name : ""));
if (!ci->driver)
return 0;
seq_printf(s, "gadget function = %s\n",
(ci->driver->function ? ci->driver->function : ""));
seq_printf(s, "gadget max speed = %d\n", ci->driver->max_speed);
return 0;
}
static int ci_device_open(struct inode *inode, struct file *file)
{
return single_open(file, ci_device_show, inode->i_private);
}
static const struct file_operations ci_device_fops = {
.open = ci_device_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/**
* ci_port_test_show: reads port test mode
*/
static int ci_port_test_show(struct seq_file *s, void *data)
{
struct ci13xxx *ci = s->private;
unsigned long flags;
unsigned mode;
spin_lock_irqsave(&ci->lock, flags);
mode = hw_port_test_get(ci);
spin_unlock_irqrestore(&ci->lock, flags);
seq_printf(s, "mode = %u\n", mode);
return 0;
}
/**
* ci_port_test_write: writes port test mode
*/
static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct ci13xxx *ci = s->private;
unsigned long flags;
unsigned mode;
char buf[32];
int ret;
if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
if (sscanf(buf, "%u", &mode) != 1)
return -EINVAL;
spin_lock_irqsave(&ci->lock, flags);
ret = hw_port_test_set(ci, mode);
spin_unlock_irqrestore(&ci->lock, flags);
return ret ? ret : count;
}
static int ci_port_test_open(struct inode *inode, struct file *file)
{
return single_open(file, ci_port_test_show, inode->i_private);
}
static const struct file_operations ci_port_test_fops = {
.open = ci_port_test_open,
.write = ci_port_test_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/**
* ci_qheads_show: DMA contents of all queue heads
*/
static int ci_qheads_show(struct seq_file *s, void *data)
{
struct ci13xxx *ci = s->private;
unsigned long flags;
unsigned i, j;
if (ci->role != CI_ROLE_GADGET) {
seq_printf(s, "not in gadget mode\n");
return 0;
}
spin_lock_irqsave(&ci->lock, flags);
for (i = 0; i < ci->hw_ep_max/2; i++) {
struct ci13xxx_ep *mEpRx = &ci->ci13xxx_ep[i];
struct ci13xxx_ep *mEpTx =
&ci->ci13xxx_ep[i + ci->hw_ep_max/2];
seq_printf(s, "EP=%02i: RX=%08X TX=%08X\n",
i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++)
seq_printf(s, " %04X: %08X %08X\n", j,
*((u32 *)mEpRx->qh.ptr + j),
*((u32 *)mEpTx->qh.ptr + j));
}
spin_unlock_irqrestore(&ci->lock, flags);
return 0;
}
static int ci_qheads_open(struct inode *inode, struct file *file)
{
return single_open(file, ci_qheads_show, inode->i_private);
}
static const struct file_operations ci_qheads_fops = {
.open = ci_qheads_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/**
* ci_requests_show: DMA contents of all requests currently queued (all endpts)
*/
static int ci_requests_show(struct seq_file *s, void *data)
{
struct ci13xxx *ci = s->private;
unsigned long flags;
struct list_head *ptr = NULL;
struct ci13xxx_req *req = NULL;
unsigned i, j, qsize = sizeof(struct ci13xxx_td)/sizeof(u32);
if (ci->role != CI_ROLE_GADGET) {
seq_printf(s, "not in gadget mode\n");
return 0;
}
spin_lock_irqsave(&ci->lock, flags);
for (i = 0; i < ci->hw_ep_max; i++)
list_for_each(ptr, &ci->ci13xxx_ep[i].qh.queue) {
req = list_entry(ptr, struct ci13xxx_req, queue);
seq_printf(s, "EP=%02i: TD=%08X %s\n",
i % (ci->hw_ep_max / 2), (u32)req->dma,
((i < ci->hw_ep_max/2) ? "RX" : "TX"));
for (j = 0; j < qsize; j++)
seq_printf(s, " %04X: %08X\n", j,
*((u32 *)req->ptr + j));
}
spin_unlock_irqrestore(&ci->lock, flags);
return 0;
}
static int ci_requests_open(struct inode *inode, struct file *file)
{
return single_open(file, ci_requests_show, inode->i_private);
}
static const struct file_operations ci_requests_fops = {
.open = ci_requests_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int ci_role_show(struct seq_file *s, void *data)
{
struct ci13xxx *ci = s->private;
seq_printf(s, "%s\n", ci_role(ci)->name);
return 0;
}
static ssize_t ci_role_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct ci13xxx *ci = s->private;
enum ci_role role;
char buf[8];
int ret;
if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
for (role = CI_ROLE_HOST; role < CI_ROLE_END; role++)
if (ci->roles[role] &&
!strncmp(buf, ci->roles[role]->name,
strlen(ci->roles[role]->name)))
break;
if (role == CI_ROLE_END || role == ci->role)
return -EINVAL;
ci_role_stop(ci);
ret = ci_role_start(ci, role);
return ret ? ret : count;
}
static int ci_role_open(struct inode *inode, struct file *file)
{
return single_open(file, ci_role_show, inode->i_private);
}
static const struct file_operations ci_role_fops = {
.open = ci_role_open,
.write = ci_role_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/**
* dbg_create_files: initializes the attribute interface
* @ci: device
*
* This function returns an error code
*/
int dbg_create_files(struct ci13xxx *ci)
{
struct dentry *dent;
ci->debugfs = debugfs_create_dir(dev_name(ci->dev), NULL);
if (!ci->debugfs)
return -ENOMEM;
dent = debugfs_create_file("device", S_IRUGO, ci->debugfs, ci,
&ci_device_fops);
if (!dent)
goto err;
dent = debugfs_create_file("port_test", S_IRUGO | S_IWUSR, ci->debugfs,
ci, &ci_port_test_fops);
if (!dent)
goto err;
dent = debugfs_create_file("qheads", S_IRUGO, ci->debugfs, ci,
&ci_qheads_fops);
if (!dent)
goto err;
dent = debugfs_create_file("requests", S_IRUGO, ci->debugfs, ci,
&ci_requests_fops);
if (!dent)
goto err;
dent = debugfs_create_file("role", S_IRUGO | S_IWUSR, ci->debugfs, ci,
&ci_role_fops);
if (dent)
return 0;
err:
debugfs_remove_recursive(ci->debugfs);
return -ENOMEM;
}
/**
* dbg_remove_files: destroys the attribute interface
* @ci: device
*/
void dbg_remove_files(struct ci13xxx *ci)
{
debugfs_remove_recursive(ci->debugfs);
}
| gpl-2.0 |
sakuramilk/sc02c_kernel | sound/soc/blackfin/bf5xx-i2s.c | 887 | 7846 | /*
* File: sound/soc/blackfin/bf5xx-i2s.c
* Author: Cliff Cai <Cliff.Cai@analog.com>
*
* Created: Tue June 06 2008
* Description: Blackfin I2S CPU DAI driver
*
* Modified:
* Copyright 2008 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <asm/irq.h>
#include <asm/portmux.h>
#include <linux/mutex.h>
#include <linux/gpio.h>
#include "bf5xx-sport.h"
#include "bf5xx-i2s.h"
struct bf5xx_i2s_port {
u16 tcr1;
u16 rcr1;
u16 tcr2;
u16 rcr2;
int configured;
};
static struct bf5xx_i2s_port bf5xx_i2s;
static int sport_num = CONFIG_SND_BF5XX_SPORT_NUM;
static struct sport_param sport_params[2] = {
{
.dma_rx_chan = CH_SPORT0_RX,
.dma_tx_chan = CH_SPORT0_TX,
.err_irq = IRQ_SPORT0_ERROR,
.regs = (struct sport_register *)SPORT0_TCR1,
},
{
.dma_rx_chan = CH_SPORT1_RX,
.dma_tx_chan = CH_SPORT1_TX,
.err_irq = IRQ_SPORT1_ERROR,
.regs = (struct sport_register *)SPORT1_TCR1,
}
};
/*
* Setting the TFS pin selector for SPORT 0 based on whether the selected
* port id F or G. If the port is F then no conflict should exist for the
* TFS. When Port G is selected and EMAC then there is a conflict between
* the PHY interrupt line and TFS. Current settings prevent the conflict
* by ignoring the TFS pin when Port G is selected. This allows both
* codecs and EMAC using Port G concurrently.
*/
#ifdef CONFIG_BF527_SPORT0_PORTG
#define LOCAL_SPORT0_TFS (0)
#else
#define LOCAL_SPORT0_TFS (P_SPORT0_TFS)
#endif
static u16 sport_req[][7] = { {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, LOCAL_SPORT0_TFS, 0},
{P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI,
P_SPORT1_RSCLK, P_SPORT1_TFS, 0} };
static int bf5xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
int ret = 0;
/* interface format:support I2S,slave mode */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
bf5xx_i2s.tcr1 |= TFSR | TCKFE;
bf5xx_i2s.rcr1 |= RFSR | RCKFE;
bf5xx_i2s.tcr2 |= TSFSE;
bf5xx_i2s.rcr2 |= RSFSE;
break;
case SND_SOC_DAIFMT_DSP_A:
bf5xx_i2s.tcr1 |= TFSR;
bf5xx_i2s.rcr1 |= RFSR;
break;
case SND_SOC_DAIFMT_LEFT_J:
ret = -EINVAL;
break;
default:
printk(KERN_ERR "%s: Unknown DAI format type\n", __func__);
ret = -EINVAL;
break;
}
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
break;
case SND_SOC_DAIFMT_CBS_CFS:
case SND_SOC_DAIFMT_CBM_CFS:
case SND_SOC_DAIFMT_CBS_CFM:
ret = -EINVAL;
break;
default:
printk(KERN_ERR "%s: Unknown DAI master type\n", __func__);
ret = -EINVAL;
break;
}
return ret;
}
static int bf5xx_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
int ret = 0;
bf5xx_i2s.tcr2 &= ~0x1f;
bf5xx_i2s.rcr2 &= ~0x1f;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
bf5xx_i2s.tcr2 |= 15;
bf5xx_i2s.rcr2 |= 15;
sport_handle->wdsize = 2;
break;
case SNDRV_PCM_FORMAT_S24_LE:
bf5xx_i2s.tcr2 |= 23;
bf5xx_i2s.rcr2 |= 23;
sport_handle->wdsize = 3;
break;
case SNDRV_PCM_FORMAT_S32_LE:
bf5xx_i2s.tcr2 |= 31;
bf5xx_i2s.rcr2 |= 31;
sport_handle->wdsize = 4;
break;
}
if (!bf5xx_i2s.configured) {
/*
* TX and RX are not independent,they are enabled at the
* same time, even if only one side is running. So, we
* need to configure both of them at the time when the first
* stream is opened.
*
* CPU DAI:slave mode.
*/
bf5xx_i2s.configured = 1;
ret = sport_config_rx(sport_handle, bf5xx_i2s.rcr1,
bf5xx_i2s.rcr2, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
ret = sport_config_tx(sport_handle, bf5xx_i2s.tcr1,
bf5xx_i2s.tcr2, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
}
return 0;
}
static void bf5xx_i2s_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
pr_debug("%s enter\n", __func__);
/* No active stream, SPORT is allowed to be configured again. */
if (!dai->active)
bf5xx_i2s.configured = 0;
}
static int bf5xx_i2s_probe(struct platform_device *pdev,
struct snd_soc_dai *dai)
{
pr_debug("%s enter\n", __func__);
if (peripheral_request_list(&sport_req[sport_num][0], "soc-audio")) {
pr_err("Requesting Peripherals failed\n");
return -EFAULT;
}
/* request DMA for SPORT */
sport_handle = sport_init(&sport_params[sport_num], 4, \
2 * sizeof(u32), NULL);
if (!sport_handle) {
peripheral_free_list(&sport_req[sport_num][0]);
return -ENODEV;
}
return 0;
}
static void bf5xx_i2s_remove(struct platform_device *pdev,
struct snd_soc_dai *dai)
{
pr_debug("%s enter\n", __func__);
peripheral_free_list(&sport_req[sport_num][0]);
}
#ifdef CONFIG_PM
static int bf5xx_i2s_suspend(struct snd_soc_dai *dai)
{
pr_debug("%s : sport %d\n", __func__, dai->id);
if (dai->capture.active)
sport_rx_stop(sport_handle);
if (dai->playback.active)
sport_tx_stop(sport_handle);
return 0;
}
static int bf5xx_i2s_resume(struct snd_soc_dai *dai)
{
int ret;
pr_debug("%s : sport %d\n", __func__, dai->id);
ret = sport_config_rx(sport_handle, bf5xx_i2s.rcr1,
bf5xx_i2s.rcr2, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
ret = sport_config_tx(sport_handle, bf5xx_i2s.tcr1,
bf5xx_i2s.tcr2, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
return 0;
}
#else
#define bf5xx_i2s_suspend NULL
#define bf5xx_i2s_resume NULL
#endif
#define BF5XX_I2S_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\
SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \
SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
SNDRV_PCM_RATE_96000)
#define BF5XX_I2S_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |\
SNDRV_PCM_FMTBIT_S32_LE)
static struct snd_soc_dai_ops bf5xx_i2s_dai_ops = {
.shutdown = bf5xx_i2s_shutdown,
.hw_params = bf5xx_i2s_hw_params,
.set_fmt = bf5xx_i2s_set_dai_fmt,
};
struct snd_soc_dai bf5xx_i2s_dai = {
.name = "bf5xx-i2s",
.id = 0,
.probe = bf5xx_i2s_probe,
.remove = bf5xx_i2s_remove,
.suspend = bf5xx_i2s_suspend,
.resume = bf5xx_i2s_resume,
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = BF5XX_I2S_RATES,
.formats = BF5XX_I2S_FORMATS,},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = BF5XX_I2S_RATES,
.formats = BF5XX_I2S_FORMATS,},
.ops = &bf5xx_i2s_dai_ops,
};
EXPORT_SYMBOL_GPL(bf5xx_i2s_dai);
static int __init bfin_i2s_init(void)
{
return snd_soc_register_dai(&bf5xx_i2s_dai);
}
module_init(bfin_i2s_init);
static void __exit bfin_i2s_exit(void)
{
snd_soc_unregister_dai(&bf5xx_i2s_dai);
}
module_exit(bfin_i2s_exit);
/* Module information */
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("I2S driver for ADI Blackfin");
MODULE_LICENSE("GPL");
| gpl-2.0 |
IndieBeto/Xiaomi_Kernel_OpenSource | fs/efs/dir.c | 2167 | 2664 | /*
* dir.c
*
* Copyright (c) 1999 Al Smith
*/
#include <linux/buffer_head.h>
#include "efs.h"
static int efs_readdir(struct file *, void *, filldir_t);
const struct file_operations efs_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.readdir = efs_readdir,
};
const struct inode_operations efs_dir_inode_operations = {
.lookup = efs_lookup,
};
static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) {
struct inode *inode = file_inode(filp);
struct buffer_head *bh;
struct efs_dir *dirblock;
struct efs_dentry *dirslot;
efs_ino_t inodenum;
efs_block_t block;
int slot, namelen;
char *nameptr;
if (inode->i_size & (EFS_DIRBSIZE-1))
printk(KERN_WARNING "EFS: WARNING: readdir(): directory size not a multiple of EFS_DIRBSIZE\n");
/* work out where this entry can be found */
block = filp->f_pos >> EFS_DIRBSIZE_BITS;
/* each block contains at most 256 slots */
slot = filp->f_pos & 0xff;
/* look at all blocks */
while (block < inode->i_blocks) {
/* read the dir block */
bh = sb_bread(inode->i_sb, efs_bmap(inode, block));
if (!bh) {
printk(KERN_ERR "EFS: readdir(): failed to read dir block %d\n", block);
break;
}
dirblock = (struct efs_dir *) bh->b_data;
if (be16_to_cpu(dirblock->magic) != EFS_DIRBLK_MAGIC) {
printk(KERN_ERR "EFS: readdir(): invalid directory block\n");
brelse(bh);
break;
}
while (slot < dirblock->slots) {
if (dirblock->space[slot] == 0) {
slot++;
continue;
}
dirslot = (struct efs_dentry *) (((char *) bh->b_data) + EFS_SLOTAT(dirblock, slot));
inodenum = be32_to_cpu(dirslot->inode);
namelen = dirslot->namelen;
nameptr = dirslot->name;
#ifdef DEBUG
printk(KERN_DEBUG "EFS: readdir(): block %d slot %d/%d: inode %u, name \"%s\", namelen %u\n", block, slot, dirblock->slots-1, inodenum, nameptr, namelen);
#endif
if (namelen > 0) {
/* found the next entry */
filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot;
/* copy filename and data in dirslot */
filldir(dirent, nameptr, namelen, filp->f_pos, inodenum, DT_UNKNOWN);
/* sanity check */
if (nameptr - (char *) dirblock + namelen > EFS_DIRBSIZE) {
printk(KERN_WARNING "EFS: directory entry %d exceeds directory block\n", slot);
slot++;
continue;
}
/* store position of next slot */
if (++slot == dirblock->slots) {
slot = 0;
block++;
}
brelse(bh);
filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot;
goto out;
}
slot++;
}
brelse(bh);
slot = 0;
block++;
}
filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot;
out:
return 0;
}
| gpl-2.0 |
auras76/32.1.A.1.163 | tools/perf/tests/open-syscall-tp-fields.c | 2167 | 2535 | #include "perf.h"
#include "evlist.h"
#include "evsel.h"
#include "thread_map.h"
#include "tests.h"
int test__syscall_open_tp_fields(void)
{
struct perf_record_opts opts = {
.target = {
.uid = UINT_MAX,
.uses_mmap = true,
},
.no_delay = true,
.freq = 1,
.mmap_pages = 256,
.raw_samples = true,
};
const char *filename = "/etc/passwd";
int flags = O_RDONLY | O_DIRECTORY;
struct perf_evlist *evlist = perf_evlist__new();
struct perf_evsel *evsel;
int err = -1, i, nr_events = 0, nr_polls = 0;
if (evlist == NULL) {
pr_debug("%s: perf_evlist__new\n", __func__);
goto out;
}
evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
if (evsel == NULL) {
pr_debug("%s: perf_evsel__newtp\n", __func__);
goto out_delete_evlist;
}
perf_evlist__add(evlist, evsel);
err = perf_evlist__create_maps(evlist, &opts.target);
if (err < 0) {
pr_debug("%s: perf_evlist__create_maps\n", __func__);
goto out_delete_evlist;
}
perf_evsel__config(evsel, &opts);
evlist->threads->map[0] = getpid();
err = perf_evlist__open(evlist);
if (err < 0) {
pr_debug("perf_evlist__open: %s\n", strerror(errno));
goto out_delete_maps;
}
err = perf_evlist__mmap(evlist, UINT_MAX, false);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
goto out_close_evlist;
}
perf_evlist__enable(evlist);
/*
* Generate the event:
*/
open(filename, flags);
while (1) {
int before = nr_events;
for (i = 0; i < evlist->nr_mmaps; i++) {
union perf_event *event;
while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
const u32 type = event->header.type;
int tp_flags;
struct perf_sample sample;
++nr_events;
if (type != PERF_RECORD_SAMPLE)
continue;
err = perf_evsel__parse_sample(evsel, event, &sample);
if (err) {
pr_err("Can't parse sample, err = %d\n", err);
goto out_munmap;
}
tp_flags = perf_evsel__intval(evsel, &sample, "flags");
if (flags != tp_flags) {
pr_debug("%s: Expected flags=%#x, got %#x\n",
__func__, flags, tp_flags);
goto out_munmap;
}
goto out_ok;
}
}
if (nr_events == before)
poll(evlist->pollfd, evlist->nr_fds, 10);
if (++nr_polls > 5) {
pr_debug("%s: no events!\n", __func__);
goto out_munmap;
}
}
out_ok:
err = 0;
out_munmap:
perf_evlist__munmap(evlist);
out_close_evlist:
perf_evlist__close(evlist);
out_delete_maps:
perf_evlist__delete_maps(evlist);
out_delete_evlist:
perf_evlist__delete(evlist);
out:
return err;
}
| gpl-2.0 |
lollipop-og/F93_LGE975_KK_Kernel | sound/soc/msm/msm7k-pcm.c | 3447 | 18311 | /* linux/sound/soc/msm/msm7k-pcm.c
*
* Copyright (c) 2008-2009, 2012 The Linux Foundation. All rights reserved.
*
* All source code in this file is licensed under the following license except
* where indicated.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* See the GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org.
*/
#include <linux/init.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/time.h>
#include <linux/wait.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/pcm.h>
#include <sound/initval.h>
#include <sound/control.h>
#include <asm/dma.h>
#include <linux/dma-mapping.h>
#include "msm-pcm.h"
#define SND_DRIVER "snd_msm"
#define MAX_PCM_DEVICES SNDRV_CARDS
#define MAX_PCM_SUBSTREAMS 1
struct snd_msm {
struct snd_card *card;
struct snd_pcm *pcm;
};
int copy_count;
struct audio_locks the_locks;
EXPORT_SYMBOL(the_locks);
struct msm_volume msm_vol_ctl;
EXPORT_SYMBOL(msm_vol_ctl);
static unsigned convert_dsp_samp_index(unsigned index)
{
switch (index) {
case 48000:
return AUDREC_CMD_SAMP_RATE_INDX_48000;
case 44100:
return AUDREC_CMD_SAMP_RATE_INDX_44100;
case 32000:
return AUDREC_CMD_SAMP_RATE_INDX_32000;
case 24000:
return AUDREC_CMD_SAMP_RATE_INDX_24000;
case 22050:
return AUDREC_CMD_SAMP_RATE_INDX_22050;
case 16000:
return AUDREC_CMD_SAMP_RATE_INDX_16000;
case 12000:
return AUDREC_CMD_SAMP_RATE_INDX_12000;
case 11025:
return AUDREC_CMD_SAMP_RATE_INDX_11025;
case 8000:
return AUDREC_CMD_SAMP_RATE_INDX_8000;
default:
return AUDREC_CMD_SAMP_RATE_INDX_44100;
}
}
static unsigned convert_samp_rate(unsigned hz)
{
switch (hz) {
case 48000:
return RPC_AUD_DEF_SAMPLE_RATE_48000;
case 44100:
return RPC_AUD_DEF_SAMPLE_RATE_44100;
case 32000:
return RPC_AUD_DEF_SAMPLE_RATE_32000;
case 24000:
return RPC_AUD_DEF_SAMPLE_RATE_24000;
case 22050:
return RPC_AUD_DEF_SAMPLE_RATE_22050;
case 16000:
return RPC_AUD_DEF_SAMPLE_RATE_16000;
case 12000:
return RPC_AUD_DEF_SAMPLE_RATE_12000;
case 11025:
return RPC_AUD_DEF_SAMPLE_RATE_11025;
case 8000:
return RPC_AUD_DEF_SAMPLE_RATE_8000;
default:
return RPC_AUD_DEF_SAMPLE_RATE_44100;
}
}
static struct snd_pcm_hardware msm_pcm_playback_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED,
.formats = USE_FORMATS,
.rates = USE_RATE,
.rate_min = USE_RATE_MIN,
.rate_max = USE_RATE_MAX,
.channels_min = USE_CHANNELS_MIN,
.channels_max = USE_CHANNELS_MAX,
.buffer_bytes_max = 4800 * 2,
.period_bytes_min = 4800,
.period_bytes_max = 4800,
.periods_min = 2,
.periods_max = 2,
.fifo_size = 0,
};
static struct snd_pcm_hardware msm_pcm_capture_hardware = {
.info = SNDRV_PCM_INFO_INTERLEAVED,
.formats = USE_FORMATS,
.rates = USE_RATE,
.rate_min = USE_RATE_MIN,
.rate_max = USE_RATE_MAX,
.channels_min = USE_CHANNELS_MIN,
.channels_max = USE_CHANNELS_MAX,
.buffer_bytes_max = MAX_BUFFER_CAPTURE_SIZE,
.period_bytes_min = CAPTURE_SIZE,
.period_bytes_max = CAPTURE_SIZE,
.periods_min = USE_PERIODS_MIN,
.periods_max = USE_PERIODS_MAX,
.fifo_size = 0,
};
/* Conventional and unconventional sample rate supported */
static unsigned int supported_sample_rates[] = {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
};
static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
.count = ARRAY_SIZE(supported_sample_rates),
.list = supported_sample_rates,
.mask = 0,
};
static void msm_pcm_enqueue_data(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
unsigned int period_size;
pr_debug("prtd->out_tail =%d mmap_flag=%d\n",
prtd->out_tail, prtd->mmap_flag);
period_size = snd_pcm_lib_period_bytes(substream);
alsa_dsp_send_buffer(prtd, prtd->out_tail, period_size);
prtd->out_tail ^= 1;
++copy_count;
prtd->period++;
if (unlikely(prtd->period >= runtime->periods))
prtd->period = 0;
}
static void playback_event_handler(void *data)
{
struct msm_audio *prtd = data;
snd_pcm_period_elapsed(prtd->playback_substream);
if (prtd->mmap_flag) {
if (prtd->dir == SNDRV_PCM_STREAM_CAPTURE)
return;
if (!prtd->stopped)
msm_pcm_enqueue_data(prtd->playback_substream);
else
prtd->out_needed++;
}
}
static void capture_event_handler(void *data)
{
struct msm_audio *prtd = data;
snd_pcm_period_elapsed(prtd->capture_substream);
}
static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
prtd->pcm_irq_pos = 0;
prtd->pcm_buf_pos = 0;
/* rate and channels are sent to audio driver */
prtd->out_sample_rate = runtime->rate;
prtd->out_channel_mode = runtime->channels;
if (prtd->enabled | !(prtd->mmap_flag))
return 0;
prtd->data = substream->dma_buffer.area;
prtd->phys = substream->dma_buffer.addr;
prtd->out[0].data = prtd->data + 0;
prtd->out[0].addr = prtd->phys + 0;
prtd->out[0].size = BUFSZ;
prtd->out[1].data = prtd->data + BUFSZ;
prtd->out[1].addr = prtd->phys + BUFSZ;
prtd->out[1].size = BUFSZ;
prtd->out[0].used = prtd->pcm_count;
prtd->out[1].used = prtd->pcm_count;
mutex_lock(&the_locks.lock);
alsa_audio_configure(prtd);
mutex_unlock(&the_locks.lock);
return 0;
}
static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
struct audmgr_config cfg;
int rc;
prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
prtd->pcm_irq_pos = 0;
prtd->pcm_buf_pos = 0;
/* rate and channels are sent to audio driver */
prtd->samp_rate = convert_samp_rate(runtime->rate);
prtd->samp_rate_index = convert_dsp_samp_index(runtime->rate);
prtd->channel_mode = (runtime->channels - 1);
prtd->buffer_size = prtd->channel_mode ? STEREO_DATA_SIZE : \
MONO_DATA_SIZE;
if (prtd->enabled == 1)
return 0;
prtd->type = AUDREC_CMD_TYPE_0_INDEX_WAV;
cfg.tx_rate = convert_samp_rate(runtime->rate);
cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE;
cfg.def_method = RPC_AUD_DEF_METHOD_RECORD;
cfg.codec = RPC_AUD_DEF_CODEC_PCM;
cfg.snd_method = RPC_SND_METHOD_MIDI;
rc = audmgr_enable(&prtd->audmgr, &cfg);
if (rc < 0)
return rc;
if (msm_adsp_enable(prtd->audpre)) {
audmgr_disable(&prtd->audmgr);
return -ENODEV;
}
if (msm_adsp_enable(prtd->audrec)) {
msm_adsp_disable(prtd->audpre);
audmgr_disable(&prtd->audmgr);
return -ENODEV;
}
prtd->enabled = 1;
alsa_rec_dsp_enable(prtd, 1);
return 0;
}
static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
unsigned long flag = 0;
int ret = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if ((substream->stream == SNDRV_PCM_STREAM_CAPTURE)
|| !prtd->mmap_flag)
break;
if (!prtd->out_needed) {
prtd->stopped = 0;
break;
}
spin_lock_irqsave(&the_locks.write_dsp_lock, flag);
if (prtd->running == 1) {
if (prtd->stopped == 1) {
prtd->stopped = 0;
prtd->period = 0;
if (prtd->pcm_irq_pos == 0) {
prtd->out_tail = 0;
msm_pcm_enqueue_data(
prtd->playback_substream);
prtd->out_needed--;
} else {
prtd->out_tail = 1;
msm_pcm_enqueue_data(
prtd->playback_substream);
prtd->out_needed--;
}
if (prtd->out_needed) {
prtd->out_tail ^= 1;
msm_pcm_enqueue_data(
prtd->playback_substream);
prtd->out_needed--;
}
}
}
spin_unlock_irqrestore(&the_locks.write_dsp_lock, flag);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if ((substream->stream == SNDRV_PCM_STREAM_CAPTURE)
|| !prtd->mmap_flag)
break;
prtd->stopped = 1;
break;
default:
ret = -EINVAL;
}
return ret;
}
static snd_pcm_uframes_t
msm_pcm_playback_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
if (prtd->pcm_irq_pos == prtd->pcm_size)
prtd->pcm_irq_pos = 0;
return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
}
static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
int channel, snd_pcm_uframes_t hwoff, void __user *buf,
snd_pcm_uframes_t frames)
{
int rc = 0, rc1 = 0, rc2 = 0;
int fbytes = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = substream->runtime->private_data;
int monofbytes = 0;
char *bufferp = NULL;
fbytes = frames_to_bytes(runtime, frames);
monofbytes = fbytes / 2;
if (runtime->channels == 2) {
rc = alsa_buffer_read(prtd, buf, fbytes, NULL);
} else {
bufferp = buf;
rc1 = alsa_buffer_read(prtd, bufferp, monofbytes, NULL);
bufferp = buf + monofbytes ;
rc2 = alsa_buffer_read(prtd, bufferp, monofbytes, NULL);
rc = rc1 + rc2;
}
prtd->pcm_buf_pos += fbytes;
return rc;
}
static snd_pcm_uframes_t
msm_pcm_capture_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
}
static int msm_pcm_capture_close(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
alsa_audrec_disable(prtd);
audmgr_close(&prtd->audmgr);
msm_adsp_put(prtd->audrec);
msm_adsp_put(prtd->audpre);
kfree(prtd);
return 0;
}
struct msm_audio_event_callbacks snd_msm_audio_ops = {
.playback = playback_event_handler,
.capture = capture_event_handler,
};
static int msm_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd;
int ret = 0;
prtd = kzalloc(sizeof(struct msm_audio), GFP_KERNEL);
if (prtd == NULL) {
ret = -ENOMEM;
return ret;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
runtime->hw = msm_pcm_playback_hardware;
prtd->dir = SNDRV_PCM_STREAM_PLAYBACK;
prtd->playback_substream = substream;
prtd->eos_ack = 0;
ret = msm_audio_volume_update(PCMPLAYBACK_DECODERID,
msm_vol_ctl.volume, msm_vol_ctl.pan);
} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
runtime->hw = msm_pcm_capture_hardware;
prtd->dir = SNDRV_PCM_STREAM_CAPTURE;
prtd->capture_substream = substream;
}
ret = snd_pcm_hw_constraint_list(runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&constraints_sample_rates);
if (ret < 0)
goto out;
/* Ensure that buffer size is a multiple of period size */
ret = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0)
goto out;
prtd->ops = &snd_msm_audio_ops;
prtd->out[0].used = BUF_INVALID_LEN;
prtd->out_head = 1; /* point to second buffer on startup */
runtime->private_data = prtd;
ret = alsa_adsp_configure(prtd);
if (ret)
goto out;
copy_count = 0;
return 0;
out:
kfree(prtd);
return ret;
}
static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
{
int rc = 1;
int fbytes = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
fbytes = frames_to_bytes(runtime, frames);
rc = alsa_send_buffer(prtd, buf, fbytes, NULL);
++copy_count;
if (copy_count == 1) {
mutex_lock(&the_locks.lock);
alsa_audio_configure(prtd);
mutex_unlock(&the_locks.lock);
}
return rc;
}
static int msm_pcm_playback_close(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
int rc = 0;
pr_debug("%s()\n", __func__);
/* pcm dmamiss message is sent continously
* when decoder is starved so no race
* condition concern
*/
if (prtd->enabled)
rc = wait_event_interruptible(the_locks.eos_wait,
prtd->eos_ack);
alsa_audio_disable(prtd);
audmgr_close(&prtd->audmgr);
kfree(prtd);
return 0;
}
static int msm_pcm_copy(struct snd_pcm_substream *substream, int a,
snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
{
int ret = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames);
else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames);
return ret;
}
static int msm_pcm_close(struct snd_pcm_substream *substream)
{
int ret = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
ret = msm_pcm_playback_close(substream);
else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
ret = msm_pcm_capture_close(substream);
return ret;
}
static int msm_pcm_prepare(struct snd_pcm_substream *substream)
{
int ret = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
ret = msm_pcm_playback_prepare(substream);
else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
ret = msm_pcm_capture_prepare(substream);
return ret;
}
static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
{
snd_pcm_uframes_t ret = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
ret = msm_pcm_playback_pointer(substream);
else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
ret = msm_pcm_capture_pointer(substream);
return ret;
}
int msm_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (substream->pcm->device & 1) {
runtime->hw.info &= ~SNDRV_PCM_INFO_INTERLEAVED;
runtime->hw.info |= SNDRV_PCM_INFO_NONINTERLEAVED;
}
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
return 0;
}
int msm_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
prtd->out_head = 0; /* point to First buffer on startup */
prtd->mmap_flag = 1;
runtime->dma_bytes = snd_pcm_lib_period_bytes(substream)*2;
dma_mmap_coherent(substream->pcm->card->dev, vma,
runtime->dma_area,
runtime->dma_addr,
runtime->dma_bytes);
return 0;
}
static struct snd_pcm_ops msm_pcm_ops = {
.open = msm_pcm_open,
.copy = msm_pcm_copy,
.hw_params = msm_pcm_hw_params,
.close = msm_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.prepare = msm_pcm_prepare,
.trigger = msm_pcm_trigger,
.pointer = msm_pcm_pointer,
.mmap = msm_pcm_mmap,
};
static int pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
int stream)
{
struct snd_pcm_substream *substream = pcm->streams[stream].substream;
struct snd_dma_buffer *buf = &substream->dma_buffer;
size_t size;
if (!stream)
size = PLAYBACK_DMASZ;
else
size = CAPTURE_DMASZ;
buf->dev.type = SNDRV_DMA_TYPE_DEV;
buf->dev.dev = pcm->card->dev;
buf->private_data = NULL;
buf->area = dma_alloc_coherent(pcm->card->dev, size,
&buf->addr, GFP_KERNEL);
if (!buf->area)
return -ENOMEM;
buf->bytes = size;
return 0;
}
static void msm_pcm_free_dma_buffers(struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
int stream;
for (stream = 0; stream < 2; stream++) {
substream = pcm->streams[stream].substream;
if (!substream)
continue;
buf = &substream->dma_buffer;
if (!buf->area)
continue;
dma_free_coherent(pcm->card->dev, buf->bytes,
buf->area, buf->addr);
buf->area = NULL;
}
}
static int msm_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
int ret;
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
ret = snd_pcm_new_stream(pcm, SNDRV_PCM_STREAM_PLAYBACK, 1);
if (ret)
return ret;
ret = snd_pcm_new_stream(pcm, SNDRV_PCM_STREAM_CAPTURE, 1);
if (ret)
return ret;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &msm_pcm_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &msm_pcm_ops);
ret = pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK);
if (ret)
return ret;
ret = pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE);
if (ret)
msm_pcm_free_dma_buffers(pcm);
return ret;
}
struct snd_soc_platform_driver msm_soc_platform = {
.ops = &msm_pcm_ops,
.pcm_new = msm_pcm_new,
.pcm_free = msm_pcm_free_dma_buffers,
};
EXPORT_SYMBOL(msm_soc_platform);
static __devinit int msm_pcm_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev,
&msm_soc_platform);
}
static int msm_pcm_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
}
static struct platform_driver msm_pcm_driver = {
.driver = {
.name = "msm-dsp-audio",
.owner = THIS_MODULE,
},
.probe = msm_pcm_probe,
.remove = __devexit_p(msm_pcm_remove),
};
static int __init msm_soc_platform_init(void)
{
return platform_driver_register(&msm_pcm_driver);
}
module_init(msm_soc_platform_init);
static void __exit msm_soc_platform_exit(void)
{
platform_driver_unregister(&msm_pcm_driver);
}
module_exit(msm_soc_platform_exit);
MODULE_DESCRIPTION("PCM module platform driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
shankarathi07/linux_lg_lollipop | arch/arm/mach-omap2/board-omap3logic.c | 4727 | 6442 | /*
* linux/arch/arm/mach-omap2/board-omap3logic.c
*
* Copyright (C) 2010 Li-Pro.Net
* Stephan Linz <linz@li-pro.net>
*
* Copyright (C) 2010 Logic Product Development, Inc.
* Peter Barada <peter.barada@logicpd.com>
*
* Modified from Beagle, EVM, and RX51
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/i2c/twl.h>
#include <linux/mmc/host.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include "mux.h"
#include "hsmmc.h"
#include "control.h"
#include "common-board-devices.h"
#include <plat/mux.h>
#include <plat/board.h>
#include "common.h"
#include <plat/gpmc-smsc911x.h>
#include <plat/gpmc.h>
#include <plat/sdrc.h>
#define OMAP3LOGIC_SMSC911X_CS 1
#define OMAP3530_LV_SOM_MMC_GPIO_CD 110
#define OMAP3530_LV_SOM_MMC_GPIO_WP 126
#define OMAP3530_LV_SOM_SMSC911X_GPIO_IRQ 152
#define OMAP3_TORPEDO_MMC_GPIO_CD 127
#define OMAP3_TORPEDO_SMSC911X_GPIO_IRQ 129
static struct regulator_consumer_supply omap3logic_vmmc1_supply[] = {
REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
};
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
static struct regulator_init_data omap3logic_vmmc1 = {
.constraints = {
.name = "VMMC1",
.min_uV = 1850000,
.max_uV = 3150000,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(omap3logic_vmmc1_supply),
.consumer_supplies = omap3logic_vmmc1_supply,
};
static struct twl4030_gpio_platform_data omap3logic_gpio_data = {
.gpio_base = OMAP_MAX_GPIO_LINES,
.irq_base = TWL4030_GPIO_IRQ_BASE,
.irq_end = TWL4030_GPIO_IRQ_END,
.use_leds = true,
.pullups = BIT(1),
.pulldowns = BIT(2) | BIT(6) | BIT(7) | BIT(8)
| BIT(13) | BIT(15) | BIT(16) | BIT(17),
};
static struct twl4030_platform_data omap3logic_twldata = {
.irq_base = TWL4030_IRQ_BASE,
.irq_end = TWL4030_IRQ_END,
/* platform_data for children goes here */
.gpio = &omap3logic_gpio_data,
.vmmc1 = &omap3logic_vmmc1,
};
static int __init omap3logic_i2c_init(void)
{
omap3_pmic_init("twl4030", &omap3logic_twldata);
return 0;
}
static struct omap2_hsmmc_info __initdata board_mmc_info[] = {
{
.name = "external",
.mmc = 1,
.caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
},
{} /* Terminator */
};
static void __init board_mmc_init(void)
{
if (machine_is_omap3530_lv_som()) {
/* OMAP3530 LV SOM board */
board_mmc_info[0].gpio_cd = OMAP3530_LV_SOM_MMC_GPIO_CD;
board_mmc_info[0].gpio_wp = OMAP3530_LV_SOM_MMC_GPIO_WP;
omap_mux_init_signal("gpio_110", OMAP_PIN_OUTPUT);
omap_mux_init_signal("gpio_126", OMAP_PIN_OUTPUT);
} else if (machine_is_omap3_torpedo()) {
/* OMAP3 Torpedo board */
board_mmc_info[0].gpio_cd = OMAP3_TORPEDO_MMC_GPIO_CD;
omap_mux_init_signal("gpio_127", OMAP_PIN_OUTPUT);
} else {
/* unsupported board */
printk(KERN_ERR "%s(): unknown machine type\n", __func__);
return;
}
omap_hsmmc_init(board_mmc_info);
}
static struct omap_smsc911x_platform_data __initdata board_smsc911x_data = {
.cs = OMAP3LOGIC_SMSC911X_CS,
.gpio_irq = -EINVAL,
.gpio_reset = -EINVAL,
};
/* TODO/FIXME (comment by Peter Barada, LogicPD):
* Fix the PBIAS voltage for Torpedo MMC1 pins that
* are used for other needs (IRQs, etc). */
static void omap3torpedo_fix_pbias_voltage(void)
{
u16 control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
u32 reg;
if (machine_is_omap3_torpedo())
{
/* Set the bias for the pin */
reg = omap_ctrl_readl(control_pbias_offset);
reg &= ~OMAP343X_PBIASLITEPWRDNZ1;
omap_ctrl_writel(reg, control_pbias_offset);
/* 100ms delay required for PBIAS configuration */
msleep(100);
reg |= OMAP343X_PBIASLITEVMODE1;
reg |= OMAP343X_PBIASLITEPWRDNZ1;
omap_ctrl_writel(reg | 0x300, control_pbias_offset);
}
}
static inline void __init board_smsc911x_init(void)
{
if (machine_is_omap3530_lv_som()) {
/* OMAP3530 LV SOM board */
board_smsc911x_data.gpio_irq =
OMAP3530_LV_SOM_SMSC911X_GPIO_IRQ;
omap_mux_init_signal("gpio_152", OMAP_PIN_INPUT);
} else if (machine_is_omap3_torpedo()) {
/* OMAP3 Torpedo board */
board_smsc911x_data.gpio_irq = OMAP3_TORPEDO_SMSC911X_GPIO_IRQ;
omap_mux_init_signal("gpio_129", OMAP_PIN_INPUT);
} else {
/* unsupported board */
printk(KERN_ERR "%s(): unknown machine type\n", __func__);
return;
}
gpmc_smsc911x_init(&board_smsc911x_data);
}
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#endif
static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
};
static void __init omap3logic_init(void)
{
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
omap3torpedo_fix_pbias_voltage();
omap3logic_i2c_init();
omap_serial_init();
omap_sdrc_init(NULL, NULL);
board_mmc_init();
board_smsc911x_init();
/* Ensure SDRC pins are mux'd for self-refresh */
omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
}
MACHINE_START(OMAP3_TORPEDO, "Logic OMAP3 Torpedo board")
.atag_offset = 0x100,
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3logic_init,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board")
.atag_offset = 0x100,
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3logic_init,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
| gpl-2.0 |
iJo09/Hybridmax_Kernel_I9505_Lollipop-1 | drivers/input/mouse/trackpoint.c | 4727 | 9796 | /*
* Stephen Evanchik <evanchsa@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* Trademarks are the property of their respective owners.
*/
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/serio.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/libps2.h>
#include <linux/proc_fs.h>
#include <asm/uaccess.h>
#include "psmouse.h"
#include "trackpoint.h"
/*
* Device IO: read, write and toggle bit
*/
static int trackpoint_read(struct ps2dev *ps2dev, unsigned char loc, unsigned char *results)
{
if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
ps2_command(ps2dev, results, MAKE_PS2_CMD(0, 1, loc))) {
return -1;
}
return 0;
}
static int trackpoint_write(struct ps2dev *ps2dev, unsigned char loc, unsigned char val)
{
if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_WRITE_MEM)) ||
ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, loc)) ||
ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, val))) {
return -1;
}
return 0;
}
static int trackpoint_toggle_bit(struct ps2dev *ps2dev, unsigned char loc, unsigned char mask)
{
/* Bad things will happen if the loc param isn't in this range */
if (loc < 0x20 || loc >= 0x2F)
return -1;
if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_TOGGLE)) ||
ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, loc)) ||
ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, mask))) {
return -1;
}
return 0;
}
/*
* Trackpoint-specific attributes
*/
struct trackpoint_attr_data {
size_t field_offset;
unsigned char command;
unsigned char mask;
unsigned char inverted;
};
static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf)
{
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
unsigned char value = *(unsigned char *)((char *)tp + attr->field_offset);
if (attr->inverted)
value = !value;
return sprintf(buf, "%u\n", value);
}
static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
unsigned char value;
int err;
err = kstrtou8(buf, 10, &value);
if (err)
return err;
*field = value;
trackpoint_write(&psmouse->ps2dev, attr->command, value);
return count;
}
#define TRACKPOINT_INT_ATTR(_name, _command) \
static struct trackpoint_attr_data trackpoint_attr_##_name = { \
.field_offset = offsetof(struct trackpoint_data, _name), \
.command = _command, \
}; \
PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \
&trackpoint_attr_##_name, \
trackpoint_show_int_attr, trackpoint_set_int_attr)
static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
unsigned int value;
int err;
err = kstrtouint(buf, 10, &value);
if (err)
return err;
if (value > 1)
return -EINVAL;
if (attr->inverted)
value = !value;
if (*field != value) {
*field = value;
trackpoint_toggle_bit(&psmouse->ps2dev, attr->command, attr->mask);
}
return count;
}
#define TRACKPOINT_BIT_ATTR(_name, _command, _mask, _inv) \
static struct trackpoint_attr_data trackpoint_attr_##_name = { \
.field_offset = offsetof(struct trackpoint_data, _name), \
.command = _command, \
.mask = _mask, \
.inverted = _inv, \
}; \
PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \
&trackpoint_attr_##_name, \
trackpoint_show_int_attr, trackpoint_set_bit_attr)
TRACKPOINT_INT_ATTR(sensitivity, TP_SENS);
TRACKPOINT_INT_ATTR(speed, TP_SPEED);
TRACKPOINT_INT_ATTR(inertia, TP_INERTIA);
TRACKPOINT_INT_ATTR(reach, TP_REACH);
TRACKPOINT_INT_ATTR(draghys, TP_DRAGHYS);
TRACKPOINT_INT_ATTR(mindrag, TP_MINDRAG);
TRACKPOINT_INT_ATTR(thresh, TP_THRESH);
TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH);
TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME);
TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV);
TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0);
TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, 0);
TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, 1);
static struct attribute *trackpoint_attrs[] = {
&psmouse_attr_sensitivity.dattr.attr,
&psmouse_attr_speed.dattr.attr,
&psmouse_attr_inertia.dattr.attr,
&psmouse_attr_reach.dattr.attr,
&psmouse_attr_draghys.dattr.attr,
&psmouse_attr_mindrag.dattr.attr,
&psmouse_attr_thresh.dattr.attr,
&psmouse_attr_upthresh.dattr.attr,
&psmouse_attr_ztime.dattr.attr,
&psmouse_attr_jenks.dattr.attr,
&psmouse_attr_press_to_select.dattr.attr,
&psmouse_attr_skipback.dattr.attr,
&psmouse_attr_ext_dev.dattr.attr,
NULL
};
static struct attribute_group trackpoint_attr_group = {
.attrs = trackpoint_attrs,
};
static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *firmware_id)
{
unsigned char param[2] = { 0 };
if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
return -1;
if (param[0] != TP_MAGIC_IDENT)
return -1;
if (firmware_id)
*firmware_id = param[1];
return 0;
}
static int trackpoint_sync(struct psmouse *psmouse)
{
struct trackpoint_data *tp = psmouse->private;
unsigned char toggle;
/* Disable features that may make device unusable with this driver */
trackpoint_read(&psmouse->ps2dev, TP_TOGGLE_TWOHAND, &toggle);
if (toggle & TP_MASK_TWOHAND)
trackpoint_toggle_bit(&psmouse->ps2dev, TP_TOGGLE_TWOHAND, TP_MASK_TWOHAND);
trackpoint_read(&psmouse->ps2dev, TP_TOGGLE_SOURCE_TAG, &toggle);
if (toggle & TP_MASK_SOURCE_TAG)
trackpoint_toggle_bit(&psmouse->ps2dev, TP_TOGGLE_SOURCE_TAG, TP_MASK_SOURCE_TAG);
trackpoint_read(&psmouse->ps2dev, TP_TOGGLE_MB, &toggle);
if (toggle & TP_MASK_MB)
trackpoint_toggle_bit(&psmouse->ps2dev, TP_TOGGLE_MB, TP_MASK_MB);
/* Push the config to the device */
trackpoint_write(&psmouse->ps2dev, TP_SENS, tp->sensitivity);
trackpoint_write(&psmouse->ps2dev, TP_INERTIA, tp->inertia);
trackpoint_write(&psmouse->ps2dev, TP_SPEED, tp->speed);
trackpoint_write(&psmouse->ps2dev, TP_REACH, tp->reach);
trackpoint_write(&psmouse->ps2dev, TP_DRAGHYS, tp->draghys);
trackpoint_write(&psmouse->ps2dev, TP_MINDRAG, tp->mindrag);
trackpoint_write(&psmouse->ps2dev, TP_THRESH, tp->thresh);
trackpoint_write(&psmouse->ps2dev, TP_UP_THRESH, tp->upthresh);
trackpoint_write(&psmouse->ps2dev, TP_Z_TIME, tp->ztime);
trackpoint_write(&psmouse->ps2dev, TP_JENKS_CURV, tp->jenks);
trackpoint_read(&psmouse->ps2dev, TP_TOGGLE_PTSON, &toggle);
if (((toggle & TP_MASK_PTSON) == TP_MASK_PTSON) != tp->press_to_select)
trackpoint_toggle_bit(&psmouse->ps2dev, TP_TOGGLE_PTSON, TP_MASK_PTSON);
trackpoint_read(&psmouse->ps2dev, TP_TOGGLE_SKIPBACK, &toggle);
if (((toggle & TP_MASK_SKIPBACK) == TP_MASK_SKIPBACK) != tp->skipback)
trackpoint_toggle_bit(&psmouse->ps2dev, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK);
trackpoint_read(&psmouse->ps2dev, TP_TOGGLE_EXT_DEV, &toggle);
if (((toggle & TP_MASK_EXT_DEV) == TP_MASK_EXT_DEV) != tp->ext_dev)
trackpoint_toggle_bit(&psmouse->ps2dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV);
return 0;
}
static void trackpoint_defaults(struct trackpoint_data *tp)
{
tp->press_to_select = TP_DEF_PTSON;
tp->sensitivity = TP_DEF_SENS;
tp->speed = TP_DEF_SPEED;
tp->reach = TP_DEF_REACH;
tp->draghys = TP_DEF_DRAGHYS;
tp->mindrag = TP_DEF_MINDRAG;
tp->thresh = TP_DEF_THRESH;
tp->upthresh = TP_DEF_UP_THRESH;
tp->ztime = TP_DEF_Z_TIME;
tp->jenks = TP_DEF_JENKS_CURV;
tp->inertia = TP_DEF_INERTIA;
tp->skipback = TP_DEF_SKIPBACK;
tp->ext_dev = TP_DEF_EXT_DEV;
}
static void trackpoint_disconnect(struct psmouse *psmouse)
{
sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &trackpoint_attr_group);
kfree(psmouse->private);
psmouse->private = NULL;
}
static int trackpoint_reconnect(struct psmouse *psmouse)
{
if (trackpoint_start_protocol(psmouse, NULL))
return -1;
if (trackpoint_sync(psmouse))
return -1;
return 0;
}
int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char firmware_id;
unsigned char button_info;
int error;
if (trackpoint_start_protocol(psmouse, &firmware_id))
return -1;
if (!set_properties)
return 0;
if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
psmouse_warn(psmouse, "failed to get extended button data\n");
button_info = 0;
}
psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
if (!psmouse->private)
return -ENOMEM;
psmouse->vendor = "IBM";
psmouse->name = "TrackPoint";
psmouse->reconnect = trackpoint_reconnect;
psmouse->disconnect = trackpoint_disconnect;
if ((button_info & 0x0f) >= 3)
__set_bit(BTN_MIDDLE, psmouse->dev->keybit);
trackpoint_defaults(psmouse->private);
trackpoint_sync(psmouse);
error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group);
if (error) {
psmouse_err(psmouse,
"failed to create sysfs attributes, error: %d\n",
error);
kfree(psmouse->private);
psmouse->private = NULL;
return -1;
}
psmouse_info(psmouse,
"IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
firmware_id,
(button_info & 0xf0) >> 4, button_info & 0x0f);
return 0;
}
| gpl-2.0 |
j03lpr86/android_kernel_samsung_afyonltetmo | arch/powerpc/mm/slice.c | 4727 | 20490 | /*
* address space "slices" (meta-segments) support
*
* Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
*
* Based on hugetlb implementation
*
* Copyright (C) 2003 David Gibson, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <asm/mman.h>
#include <asm/mmu.h>
#include <asm/spu.h>
static DEFINE_SPINLOCK(slice_convert_lock);
#ifdef DEBUG
int _slice_debug = 1;
static void slice_print_mask(const char *label, struct slice_mask mask)
{
char *p, buf[16 + 3 + 16 + 1];
int i;
if (!_slice_debug)
return;
p = buf;
for (i = 0; i < SLICE_NUM_LOW; i++)
*(p++) = (mask.low_slices & (1 << i)) ? '1' : '0';
*(p++) = ' ';
*(p++) = '-';
*(p++) = ' ';
for (i = 0; i < SLICE_NUM_HIGH; i++)
*(p++) = (mask.high_slices & (1 << i)) ? '1' : '0';
*(p++) = 0;
printk(KERN_DEBUG "%s:%s\n", label, buf);
}
#define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)
#else
static void slice_print_mask(const char *label, struct slice_mask mask) {}
#define slice_dbg(fmt...)
#endif
static struct slice_mask slice_range_to_mask(unsigned long start,
unsigned long len)
{
unsigned long end = start + len - 1;
struct slice_mask ret = { 0, 0 };
if (start < SLICE_LOW_TOP) {
unsigned long mend = min(end, SLICE_LOW_TOP);
unsigned long mstart = min(start, SLICE_LOW_TOP);
ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
- (1u << GET_LOW_SLICE_INDEX(mstart));
}
if ((start + len) > SLICE_LOW_TOP)
ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1))
- (1u << GET_HIGH_SLICE_INDEX(start));
return ret;
}
static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
unsigned long len)
{
struct vm_area_struct *vma;
if ((mm->task_size - len) < addr)
return 0;
vma = find_vma(mm, addr);
return (!vma || (addr + len) <= vma->vm_start);
}
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
{
return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
1ul << SLICE_LOW_SHIFT);
}
static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
{
unsigned long start = slice << SLICE_HIGH_SHIFT;
unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
/* Hack, so that each addresses is controlled by exactly one
* of the high or low area bitmaps, the first high area starts
* at 4GB, not 0 */
if (start == 0)
start = SLICE_LOW_TOP;
return !slice_area_is_free(mm, start, end - start);
}
static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
{
struct slice_mask ret = { 0, 0 };
unsigned long i;
for (i = 0; i < SLICE_NUM_LOW; i++)
if (!slice_low_has_vma(mm, i))
ret.low_slices |= 1u << i;
if (mm->task_size <= SLICE_LOW_TOP)
return ret;
for (i = 0; i < SLICE_NUM_HIGH; i++)
if (!slice_high_has_vma(mm, i))
ret.high_slices |= 1u << i;
return ret;
}
static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
{
struct slice_mask ret = { 0, 0 };
unsigned long i;
u64 psizes;
psizes = mm->context.low_slices_psize;
for (i = 0; i < SLICE_NUM_LOW; i++)
if (((psizes >> (i * 4)) & 0xf) == psize)
ret.low_slices |= 1u << i;
psizes = mm->context.high_slices_psize;
for (i = 0; i < SLICE_NUM_HIGH; i++)
if (((psizes >> (i * 4)) & 0xf) == psize)
ret.high_slices |= 1u << i;
return ret;
}
static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
{
return (mask.low_slices & available.low_slices) == mask.low_slices &&
(mask.high_slices & available.high_slices) == mask.high_slices;
}
static void slice_flush_segments(void *parm)
{
struct mm_struct *mm = parm;
unsigned long flags;
if (mm != current->active_mm)
return;
/* update the paca copy of the context struct */
get_paca()->context = current->active_mm->context;
local_irq_save(flags);
slb_flush_and_rebolt();
local_irq_restore(flags);
}
static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
{
/* Write the new slice psize bits */
u64 lpsizes, hpsizes;
unsigned long i, flags;
slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
slice_print_mask(" mask", mask);
/* We need to use a spinlock here to protect against
* concurrent 64k -> 4k demotion ...
*/
spin_lock_irqsave(&slice_convert_lock, flags);
lpsizes = mm->context.low_slices_psize;
for (i = 0; i < SLICE_NUM_LOW; i++)
if (mask.low_slices & (1u << i))
lpsizes = (lpsizes & ~(0xful << (i * 4))) |
(((unsigned long)psize) << (i * 4));
hpsizes = mm->context.high_slices_psize;
for (i = 0; i < SLICE_NUM_HIGH; i++)
if (mask.high_slices & (1u << i))
hpsizes = (hpsizes & ~(0xful << (i * 4))) |
(((unsigned long)psize) << (i * 4));
mm->context.low_slices_psize = lpsizes;
mm->context.high_slices_psize = hpsizes;
slice_dbg(" lsps=%lx, hsps=%lx\n",
mm->context.low_slices_psize,
mm->context.high_slices_psize);
spin_unlock_irqrestore(&slice_convert_lock, flags);
#ifdef CONFIG_SPU_BASE
spu_flush_all_slbs(mm);
#endif
}
static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
unsigned long len,
struct slice_mask available,
int psize, int use_cache)
{
struct vm_area_struct *vma;
unsigned long start_addr, addr;
struct slice_mask mask;
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
if (use_cache) {
if (len <= mm->cached_hole_size) {
start_addr = addr = TASK_UNMAPPED_BASE;
mm->cached_hole_size = 0;
} else
start_addr = addr = mm->free_area_cache;
} else
start_addr = addr = TASK_UNMAPPED_BASE;
full_search:
for (;;) {
addr = _ALIGN_UP(addr, 1ul << pshift);
if ((TASK_SIZE - len) < addr)
break;
vma = find_vma(mm, addr);
BUG_ON(vma && (addr >= vma->vm_end));
mask = slice_range_to_mask(addr, len);
if (!slice_check_fit(mask, available)) {
if (addr < SLICE_LOW_TOP)
addr = _ALIGN_UP(addr + 1, 1ul << SLICE_LOW_SHIFT);
else
addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
continue;
}
if (!vma || addr + len <= vma->vm_start) {
/*
* Remember the place where we stopped the search:
*/
if (use_cache)
mm->free_area_cache = addr + len;
return addr;
}
if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;
addr = vma->vm_end;
}
/* Make sure we didn't miss any holes */
if (use_cache && start_addr != TASK_UNMAPPED_BASE) {
start_addr = addr = TASK_UNMAPPED_BASE;
mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
}
static unsigned long slice_find_area_topdown(struct mm_struct *mm,
unsigned long len,
struct slice_mask available,
int psize, int use_cache)
{
struct vm_area_struct *vma;
unsigned long addr;
struct slice_mask mask;
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
/* check if free_area_cache is useful for us */
if (use_cache) {
if (len <= mm->cached_hole_size) {
mm->cached_hole_size = 0;
mm->free_area_cache = mm->mmap_base;
}
/* either no address requested or can't fit in requested
* address hole
*/
addr = mm->free_area_cache;
/* make sure it can fit in the remaining address space */
if (addr > len) {
addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
mask = slice_range_to_mask(addr, len);
if (slice_check_fit(mask, available) &&
slice_area_is_free(mm, addr, len))
/* remember the address as a hint for
* next time
*/
return (mm->free_area_cache = addr);
}
}
addr = mm->mmap_base;
while (addr > len) {
/* Go down by chunk size */
addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
/* Check for hit with different page size */
mask = slice_range_to_mask(addr, len);
if (!slice_check_fit(mask, available)) {
if (addr < SLICE_LOW_TOP)
addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT);
else if (addr < (1ul << SLICE_HIGH_SHIFT))
addr = SLICE_LOW_TOP;
else
addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT);
continue;
}
/*
* Lookup failure means no vma is above this address,
* else if new region fits below vma->vm_start,
* return with success:
*/
vma = find_vma(mm, addr);
if (!vma || (addr + len) <= vma->vm_start) {
/* remember the address as a hint for next time */
if (use_cache)
mm->free_area_cache = addr;
return addr;
}
/* remember the largest hole we saw so far */
if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;
/* try just below the current vma->vm_start */
addr = vma->vm_start;
}
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
addr = slice_find_area_bottomup(mm, len, available, psize, 0);
/*
* Restore the topdown base:
*/
if (use_cache) {
mm->free_area_cache = mm->mmap_base;
mm->cached_hole_size = ~0UL;
}
return addr;
}
static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
struct slice_mask mask, int psize,
int topdown, int use_cache)
{
if (topdown)
return slice_find_area_topdown(mm, len, mask, psize, use_cache);
else
return slice_find_area_bottomup(mm, len, mask, psize, use_cache);
}
#define or_mask(dst, src) do { \
(dst).low_slices |= (src).low_slices; \
(dst).high_slices |= (src).high_slices; \
} while (0)
#define andnot_mask(dst, src) do { \
(dst).low_slices &= ~(src).low_slices; \
(dst).high_slices &= ~(src).high_slices; \
} while (0)
#ifdef CONFIG_PPC_64K_PAGES
#define MMU_PAGE_BASE MMU_PAGE_64K
#else
#define MMU_PAGE_BASE MMU_PAGE_4K
#endif
unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
unsigned long flags, unsigned int psize,
int topdown, int use_cache)
{
struct slice_mask mask = {0, 0};
struct slice_mask good_mask;
struct slice_mask potential_mask = {0,0} /* silence stupid warning */;
struct slice_mask compat_mask = {0, 0};
int fixed = (flags & MAP_FIXED);
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
struct mm_struct *mm = current->mm;
unsigned long newaddr;
/* Sanity checks */
BUG_ON(mm->task_size == 0);
slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n",
addr, len, flags, topdown, use_cache);
if (len > mm->task_size)
return -ENOMEM;
if (len & ((1ul << pshift) - 1))
return -EINVAL;
if (fixed && (addr & ((1ul << pshift) - 1)))
return -EINVAL;
if (fixed && addr > (mm->task_size - len))
return -EINVAL;
/* If hint, make sure it matches our alignment restrictions */
if (!fixed && addr) {
addr = _ALIGN_UP(addr, 1ul << pshift);
slice_dbg(" aligned addr=%lx\n", addr);
/* Ignore hint if it's too large or overlaps a VMA */
if (addr > mm->task_size - len ||
!slice_area_is_free(mm, addr, len))
addr = 0;
}
/* First make up a "good" mask of slices that have the right size
* already
*/
good_mask = slice_mask_for_size(mm, psize);
slice_print_mask(" good_mask", good_mask);
/*
* Here "good" means slices that are already the right page size,
* "compat" means slices that have a compatible page size (i.e.
* 4k in a 64k pagesize kernel), and "free" means slices without
* any VMAs.
*
* If MAP_FIXED:
* check if fits in good | compat => OK
* check if fits in good | compat | free => convert free
* else bad
* If have hint:
* check if hint fits in good => OK
* check if hint fits in good | free => convert free
* Otherwise:
* search in good, found => OK
* search in good | free, found => convert free
* search in good | compat | free, found => convert free.
*/
#ifdef CONFIG_PPC_64K_PAGES
/* If we support combo pages, we can allow 64k pages in 4k slices */
if (psize == MMU_PAGE_64K) {
compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
if (fixed)
or_mask(good_mask, compat_mask);
}
#endif
/* First check hint if it's valid or if we have MAP_FIXED */
if (addr != 0 || fixed) {
/* Build a mask for the requested range */
mask = slice_range_to_mask(addr, len);
slice_print_mask(" mask", mask);
/* Check if we fit in the good mask. If we do, we just return,
* nothing else to do
*/
if (slice_check_fit(mask, good_mask)) {
slice_dbg(" fits good !\n");
return addr;
}
} else {
/* Now let's see if we can find something in the existing
* slices for that size
*/
newaddr = slice_find_area(mm, len, good_mask, psize, topdown,
use_cache);
if (newaddr != -ENOMEM) {
/* Found within the good mask, we don't have to setup,
* we thus return directly
*/
slice_dbg(" found area at 0x%lx\n", newaddr);
return newaddr;
}
}
/* We don't fit in the good mask, check what other slices are
* empty and thus can be converted
*/
potential_mask = slice_mask_for_free(mm);
or_mask(potential_mask, good_mask);
slice_print_mask(" potential", potential_mask);
if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) {
slice_dbg(" fits potential !\n");
goto convert;
}
/* If we have MAP_FIXED and failed the above steps, then error out */
if (fixed)
return -EBUSY;
slice_dbg(" search...\n");
/* If we had a hint that didn't work out, see if we can fit
* anywhere in the good area.
*/
if (addr) {
addr = slice_find_area(mm, len, good_mask, psize, topdown,
use_cache);
if (addr != -ENOMEM) {
slice_dbg(" found area at 0x%lx\n", addr);
return addr;
}
}
/* Now let's see if we can find something in the existing slices
* for that size plus free slices
*/
addr = slice_find_area(mm, len, potential_mask, psize, topdown,
use_cache);
#ifdef CONFIG_PPC_64K_PAGES
if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
/* retry the search with 4k-page slices included */
or_mask(potential_mask, compat_mask);
addr = slice_find_area(mm, len, potential_mask, psize,
topdown, use_cache);
}
#endif
if (addr == -ENOMEM)
return -ENOMEM;
mask = slice_range_to_mask(addr, len);
slice_dbg(" found potential area at 0x%lx\n", addr);
slice_print_mask(" mask", mask);
convert:
andnot_mask(mask, good_mask);
andnot_mask(mask, compat_mask);
if (mask.low_slices || mask.high_slices) {
slice_convert(mm, mask, psize);
if (psize > MMU_PAGE_BASE)
on_each_cpu(slice_flush_segments, mm, 1);
}
return addr;
}
EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
unsigned long arch_get_unmapped_area(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags)
{
return slice_get_unmapped_area(addr, len, flags,
current->mm->context.user_psize,
0, 1);
}
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
const unsigned long addr0,
const unsigned long len,
const unsigned long pgoff,
const unsigned long flags)
{
return slice_get_unmapped_area(addr0, len, flags,
current->mm->context.user_psize,
1, 1);
}
unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
{
u64 psizes;
int index;
if (addr < SLICE_LOW_TOP) {
psizes = mm->context.low_slices_psize;
index = GET_LOW_SLICE_INDEX(addr);
} else {
psizes = mm->context.high_slices_psize;
index = GET_HIGH_SLICE_INDEX(addr);
}
return (psizes >> (index * 4)) & 0xf;
}
EXPORT_SYMBOL_GPL(get_slice_psize);
/*
* This is called by hash_page when it needs to do a lazy conversion of
* an address space from real 64K pages to combo 4K pages (typically
* when hitting a non cacheable mapping on a processor or hypervisor
* that won't allow them for 64K pages).
*
* This is also called in init_new_context() to change back the user
* psize from whatever the parent context had it set to
* N.B. This may be called before mm->context.id has been set.
*
* This function will only change the content of the {low,high)_slice_psize
* masks, it will not flush SLBs as this shall be handled lazily by the
* caller.
*/
void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
{
unsigned long flags, lpsizes, hpsizes;
unsigned int old_psize;
int i;
slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);
spin_lock_irqsave(&slice_convert_lock, flags);
old_psize = mm->context.user_psize;
slice_dbg(" old_psize=%d\n", old_psize);
if (old_psize == psize)
goto bail;
mm->context.user_psize = psize;
wmb();
lpsizes = mm->context.low_slices_psize;
for (i = 0; i < SLICE_NUM_LOW; i++)
if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
lpsizes = (lpsizes & ~(0xful << (i * 4))) |
(((unsigned long)psize) << (i * 4));
hpsizes = mm->context.high_slices_psize;
for (i = 0; i < SLICE_NUM_HIGH; i++)
if (((hpsizes >> (i * 4)) & 0xf) == old_psize)
hpsizes = (hpsizes & ~(0xful << (i * 4))) |
(((unsigned long)psize) << (i * 4));
mm->context.low_slices_psize = lpsizes;
mm->context.high_slices_psize = hpsizes;
slice_dbg(" lsps=%lx, hsps=%lx\n",
mm->context.low_slices_psize,
mm->context.high_slices_psize);
bail:
spin_unlock_irqrestore(&slice_convert_lock, flags);
}
void slice_set_psize(struct mm_struct *mm, unsigned long address,
unsigned int psize)
{
unsigned long i, flags;
u64 *p;
spin_lock_irqsave(&slice_convert_lock, flags);
if (address < SLICE_LOW_TOP) {
i = GET_LOW_SLICE_INDEX(address);
p = &mm->context.low_slices_psize;
} else {
i = GET_HIGH_SLICE_INDEX(address);
p = &mm->context.high_slices_psize;
}
*p = (*p & ~(0xful << (i * 4))) | ((unsigned long) psize << (i * 4));
spin_unlock_irqrestore(&slice_convert_lock, flags);
#ifdef CONFIG_SPU_BASE
spu_flush_all_slbs(mm);
#endif
}
void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long len, unsigned int psize)
{
struct slice_mask mask = slice_range_to_mask(start, len);
slice_convert(mm, mask, psize);
}
/*
* is_hugepage_only_range() is used by generic code to verify wether
* a normal mmap mapping (non hugetlbfs) is valid on a given area.
*
* until the generic code provides a more generic hook and/or starts
* calling arch get_unmapped_area for MAP_FIXED (which our implementation
* here knows how to deal with), we hijack it to keep standard mappings
* away from us.
*
* because of that generic code limitation, MAP_FIXED mapping cannot
* "convert" back a slice with no VMAs to the standard page size, only
* get_unmapped_area() can. It would be possible to fix it here but I
* prefer working on fixing the generic code instead.
*
* WARNING: This will not work if hugetlbfs isn't enabled since the
* generic code will redefine that function as 0 in that. This is ok
* for now as we only use slices with hugetlbfs enabled. This should
* be fixed as the generic code gets fixed.
*/
int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len)
{
struct slice_mask mask, available;
unsigned int psize = mm->context.user_psize;
mask = slice_range_to_mask(addr, len);
available = slice_mask_for_size(mm, psize);
#ifdef CONFIG_PPC_64K_PAGES
/* We need to account for 4k slices too */
if (psize == MMU_PAGE_64K) {
struct slice_mask compat_mask;
compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
or_mask(available, compat_mask);
}
#endif
#if 0 /* too verbose */
slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
mm, addr, len);
slice_print_mask(" mask", mask);
slice_print_mask(" available", available);
#endif
return !slice_check_fit(mask, available);
}
| gpl-2.0 |
SnowDroid/kernel_lge_hammerhead | arch/arm/mach-omap2/board-omap3logic.c | 4727 | 6442 | /*
* linux/arch/arm/mach-omap2/board-omap3logic.c
*
* Copyright (C) 2010 Li-Pro.Net
* Stephan Linz <linz@li-pro.net>
*
* Copyright (C) 2010 Logic Product Development, Inc.
* Peter Barada <peter.barada@logicpd.com>
*
* Modified from Beagle, EVM, and RX51
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/i2c/twl.h>
#include <linux/mmc/host.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include "mux.h"
#include "hsmmc.h"
#include "control.h"
#include "common-board-devices.h"
#include <plat/mux.h>
#include <plat/board.h>
#include "common.h"
#include <plat/gpmc-smsc911x.h>
#include <plat/gpmc.h>
#include <plat/sdrc.h>
#define OMAP3LOGIC_SMSC911X_CS 1
#define OMAP3530_LV_SOM_MMC_GPIO_CD 110
#define OMAP3530_LV_SOM_MMC_GPIO_WP 126
#define OMAP3530_LV_SOM_SMSC911X_GPIO_IRQ 152
#define OMAP3_TORPEDO_MMC_GPIO_CD 127
#define OMAP3_TORPEDO_SMSC911X_GPIO_IRQ 129
static struct regulator_consumer_supply omap3logic_vmmc1_supply[] = {
REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
};
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
static struct regulator_init_data omap3logic_vmmc1 = {
.constraints = {
.name = "VMMC1",
.min_uV = 1850000,
.max_uV = 3150000,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(omap3logic_vmmc1_supply),
.consumer_supplies = omap3logic_vmmc1_supply,
};
static struct twl4030_gpio_platform_data omap3logic_gpio_data = {
.gpio_base = OMAP_MAX_GPIO_LINES,
.irq_base = TWL4030_GPIO_IRQ_BASE,
.irq_end = TWL4030_GPIO_IRQ_END,
.use_leds = true,
.pullups = BIT(1),
.pulldowns = BIT(2) | BIT(6) | BIT(7) | BIT(8)
| BIT(13) | BIT(15) | BIT(16) | BIT(17),
};
static struct twl4030_platform_data omap3logic_twldata = {
.irq_base = TWL4030_IRQ_BASE,
.irq_end = TWL4030_IRQ_END,
/* platform_data for children goes here */
.gpio = &omap3logic_gpio_data,
.vmmc1 = &omap3logic_vmmc1,
};
static int __init omap3logic_i2c_init(void)
{
omap3_pmic_init("twl4030", &omap3logic_twldata);
return 0;
}
static struct omap2_hsmmc_info __initdata board_mmc_info[] = {
{
.name = "external",
.mmc = 1,
.caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
},
{} /* Terminator */
};
static void __init board_mmc_init(void)
{
if (machine_is_omap3530_lv_som()) {
/* OMAP3530 LV SOM board */
board_mmc_info[0].gpio_cd = OMAP3530_LV_SOM_MMC_GPIO_CD;
board_mmc_info[0].gpio_wp = OMAP3530_LV_SOM_MMC_GPIO_WP;
omap_mux_init_signal("gpio_110", OMAP_PIN_OUTPUT);
omap_mux_init_signal("gpio_126", OMAP_PIN_OUTPUT);
} else if (machine_is_omap3_torpedo()) {
/* OMAP3 Torpedo board */
board_mmc_info[0].gpio_cd = OMAP3_TORPEDO_MMC_GPIO_CD;
omap_mux_init_signal("gpio_127", OMAP_PIN_OUTPUT);
} else {
/* unsupported board */
printk(KERN_ERR "%s(): unknown machine type\n", __func__);
return;
}
omap_hsmmc_init(board_mmc_info);
}
static struct omap_smsc911x_platform_data __initdata board_smsc911x_data = {
.cs = OMAP3LOGIC_SMSC911X_CS,
.gpio_irq = -EINVAL,
.gpio_reset = -EINVAL,
};
/* TODO/FIXME (comment by Peter Barada, LogicPD):
* Fix the PBIAS voltage for Torpedo MMC1 pins that
* are used for other needs (IRQs, etc). */
static void omap3torpedo_fix_pbias_voltage(void)
{
u16 control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
u32 reg;
if (machine_is_omap3_torpedo())
{
/* Set the bias for the pin */
reg = omap_ctrl_readl(control_pbias_offset);
reg &= ~OMAP343X_PBIASLITEPWRDNZ1;
omap_ctrl_writel(reg, control_pbias_offset);
/* 100ms delay required for PBIAS configuration */
msleep(100);
reg |= OMAP343X_PBIASLITEVMODE1;
reg |= OMAP343X_PBIASLITEPWRDNZ1;
omap_ctrl_writel(reg | 0x300, control_pbias_offset);
}
}
static inline void __init board_smsc911x_init(void)
{
if (machine_is_omap3530_lv_som()) {
/* OMAP3530 LV SOM board */
board_smsc911x_data.gpio_irq =
OMAP3530_LV_SOM_SMSC911X_GPIO_IRQ;
omap_mux_init_signal("gpio_152", OMAP_PIN_INPUT);
} else if (machine_is_omap3_torpedo()) {
/* OMAP3 Torpedo board */
board_smsc911x_data.gpio_irq = OMAP3_TORPEDO_SMSC911X_GPIO_IRQ;
omap_mux_init_signal("gpio_129", OMAP_PIN_INPUT);
} else {
/* unsupported board */
printk(KERN_ERR "%s(): unknown machine type\n", __func__);
return;
}
gpmc_smsc911x_init(&board_smsc911x_data);
}
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#endif
static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
};
static void __init omap3logic_init(void)
{
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
omap3torpedo_fix_pbias_voltage();
omap3logic_i2c_init();
omap_serial_init();
omap_sdrc_init(NULL, NULL);
board_mmc_init();
board_smsc911x_init();
/* Ensure SDRC pins are mux'd for self-refresh */
omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
}
MACHINE_START(OMAP3_TORPEDO, "Logic OMAP3 Torpedo board")
.atag_offset = 0x100,
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3logic_init,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board")
.atag_offset = 0x100,
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3logic_init,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
| gpl-2.0 |
Ander-Alvarez/UltraPluscondor | arch/arm/mach-spear6xx/spear6xx.c | 4727 | 3057 | /*
* arch/arm/mach-spear6xx/spear6xx.c
*
* SPEAr6XX machines common source file
*
* Copyright (C) 2009 ST Microelectronics
* Rajeev Kumar<rajeev-dlh.kumar@st.com>
*
* Copyright 2012 Stefan Roese <sr@denx.de>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <mach/generic.h>
#include <mach/hardware.h>
/* Following will create static virtual/physical mappings */
static struct map_desc spear6xx_io_desc[] __initdata = {
{
.virtual = VA_SPEAR6XX_ICM1_UART0_BASE,
.pfn = __phys_to_pfn(SPEAR6XX_ICM1_UART0_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
.virtual = VA_SPEAR6XX_CPU_VIC_PRI_BASE,
.pfn = __phys_to_pfn(SPEAR6XX_CPU_VIC_PRI_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
.virtual = VA_SPEAR6XX_CPU_VIC_SEC_BASE,
.pfn = __phys_to_pfn(SPEAR6XX_CPU_VIC_SEC_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
.virtual = VA_SPEAR6XX_ICM3_SYS_CTRL_BASE,
.pfn = __phys_to_pfn(SPEAR6XX_ICM3_SYS_CTRL_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
.virtual = VA_SPEAR6XX_ICM3_MISC_REG_BASE,
.pfn = __phys_to_pfn(SPEAR6XX_ICM3_MISC_REG_BASE),
.length = SZ_4K,
.type = MT_DEVICE
},
};
/* This will create static memory mapping for selected devices */
void __init spear6xx_map_io(void)
{
iotable_init(spear6xx_io_desc, ARRAY_SIZE(spear6xx_io_desc));
/* This will initialize clock framework */
spear6xx_clk_init();
}
static void __init spear6xx_timer_init(void)
{
char pclk_name[] = "pll3_48m_clk";
struct clk *gpt_clk, *pclk;
/* get the system timer clock */
gpt_clk = clk_get_sys("gpt0", NULL);
if (IS_ERR(gpt_clk)) {
pr_err("%s:couldn't get clk for gpt\n", __func__);
BUG();
}
/* get the suitable parent clock for timer*/
pclk = clk_get(NULL, pclk_name);
if (IS_ERR(pclk)) {
pr_err("%s:couldn't get %s as parent for gpt\n",
__func__, pclk_name);
BUG();
}
clk_set_parent(gpt_clk, pclk);
clk_put(gpt_clk);
clk_put(pclk);
spear_setup_timer();
}
struct sys_timer spear6xx_timer = {
.init = spear6xx_timer_init,
};
static void __init spear600_dt_init(void)
{
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static const char *spear600_dt_board_compat[] = {
"st,spear600",
NULL
};
static const struct of_device_id vic_of_match[] __initconst = {
{ .compatible = "arm,pl190-vic", .data = vic_of_init, },
{ /* Sentinel */ }
};
static void __init spear6xx_dt_init_irq(void)
{
of_irq_init(vic_of_match);
}
DT_MACHINE_START(SPEAR600_DT, "ST SPEAr600 (Flattened Device Tree)")
.map_io = spear6xx_map_io,
.init_irq = spear6xx_dt_init_irq,
.handle_irq = vic_handle_irq,
.timer = &spear6xx_timer,
.init_machine = spear600_dt_init,
.restart = spear_restart,
.dt_compat = spear600_dt_board_compat,
MACHINE_END
| gpl-2.0 |
civato/CivZ-KatEngine-SM9005 | drivers/scsi/ips.c | 4983 | 241346 | /*****************************************************************************/
/* ips.c -- driver for the Adaptec / IBM ServeRAID controller */
/* */
/* Written By: Keith Mitchell, IBM Corporation */
/* Jack Hammer, Adaptec, Inc. */
/* David Jeffery, Adaptec, Inc. */
/* */
/* Copyright (C) 2000 IBM Corporation */
/* Copyright (C) 2002,2003 Adaptec, Inc. */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* NO WARRANTY */
/* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR */
/* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT */
/* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, */
/* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is */
/* solely responsible for determining the appropriateness of using and */
/* distributing the Program and assumes all risks associated with its */
/* exercise of rights under this Agreement, including but not limited to */
/* the risks and costs of program errors, damage to or loss of data, */
/* programs or equipment, and unavailability or interruption of operations. */
/* */
/* DISCLAIMER OF LIABILITY */
/* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY */
/* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL */
/* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND */
/* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR */
/* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE */
/* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED */
/* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/* */
/* Bugs/Comments/Suggestions about this driver should be mailed to: */
/* ipslinux@adaptec.com */
/* */
/* For system support issues, contact your local IBM Customer support. */
/* Directions to find IBM Customer Support for each country can be found at: */
/* http://www.ibm.com/planetwide/ */
/* */
/*****************************************************************************/
/*****************************************************************************/
/* Change Log */
/* */
/* 0.99.02 - Breakup commands that are bigger than 8 * the stripe size */
/* 0.99.03 - Make interrupt routine handle all completed request on the */
/* adapter not just the first one */
/* - Make sure passthru commands get woken up if we run out of */
/* SCBs */
/* - Send all of the commands on the queue at once rather than */
/* one at a time since the card will support it. */
/* 0.99.04 - Fix race condition in the passthru mechanism -- this required */
/* the interface to the utilities to change */
/* - Fix error recovery code */
/* 0.99.05 - Fix an oops when we get certain passthru commands */
/* 1.00.00 - Initial Public Release */
/* Functionally equivalent to 0.99.05 */
/* 3.60.00 - Bump max commands to 128 for use with firmware 3.60 */
/* - Change version to 3.60 to coincide with release numbering. */
/* 3.60.01 - Remove bogus error check in passthru routine */
/* 3.60.02 - Make DCDB direction based on lookup table */
/* - Only allow one DCDB command to a SCSI ID at a time */
/* 4.00.00 - Add support for ServeRAID 4 */
/* 4.00.01 - Add support for First Failure Data Capture */
/* 4.00.02 - Fix problem with PT DCDB with no buffer */
/* 4.00.03 - Add alternative passthru interface */
/* - Add ability to flash BIOS */
/* 4.00.04 - Rename structures/constants to be prefixed with IPS_ */
/* 4.00.05 - Remove wish_block from init routine */
/* - Use linux/spinlock.h instead of asm/spinlock.h for kernels */
/* 2.3.18 and later */
/* - Sync with other changes from the 2.3 kernels */
/* 4.00.06 - Fix timeout with initial FFDC command */
/* 4.00.06a - Port to 2.4 (trivial) -- Christoph Hellwig <hch@infradead.org> */
/* 4.10.00 - Add support for ServeRAID 4M/4L */
/* 4.10.13 - Fix for dynamic unload and proc file system */
/* 4.20.03 - Rename version to coincide with new release schedules */
/* Performance fixes */
/* Fix truncation of /proc files with cat */
/* Merge in changes through kernel 2.4.0test1ac21 */
/* 4.20.13 - Fix some failure cases / reset code */
/* - Hook into the reboot_notifier to flush the controller cache */
/* 4.50.01 - Fix problem when there is a hole in logical drive numbering */
/* 4.70.09 - Use a Common ( Large Buffer ) for Flashing from the JCRM CD */
/* - Add IPSSEND Flash Support */
/* - Set Sense Data for Unknown SCSI Command */
/* - Use Slot Number from NVRAM Page 5 */
/* - Restore caller's DCDB Structure */
/* 4.70.12 - Corrective actions for bad controller ( during initialization )*/
/* 4.70.13 - Don't Send CDB's if we already know the device is not present */
/* - Don't release HA Lock in ips_next() until SC taken off queue */
/* - Unregister SCSI device in ips_release() */
/* 4.70.15 - Fix Breakup for very large ( non-SG ) requests in ips_done() */
/* 4.71.00 - Change all memory allocations to not use GFP_DMA flag */
/* Code Clean-Up for 2.4.x kernel */
/* 4.72.00 - Allow for a Scatter-Gather Element to exceed MAX_XFER Size */
/* 4.72.01 - I/O Mapped Memory release ( so "insmod ips" does not Fail ) */
/* - Don't Issue Internal FFDC Command if there are Active Commands */
/* - Close Window for getting too many IOCTL's active */
/* 4.80.00 - Make ia64 Safe */
/* 4.80.04 - Eliminate calls to strtok() if 2.4.x or greater */
/* - Adjustments to Device Queue Depth */
/* 4.80.14 - Take all semaphores off stack */
/* - Clean Up New_IOCTL path */
/* 4.80.20 - Set max_sectors in Scsi_Host structure ( if >= 2.4.7 kernel ) */
/* - 5 second delay needed after resetting an i960 adapter */
/* 4.80.26 - Clean up potential code problems ( Arjan's recommendations ) */
/* 4.90.01 - Version Matching for FirmWare, BIOS, and Driver */
/* 4.90.05 - Use New PCI Architecture to facilitate Hot Plug Development */
/* 4.90.08 - Increase Delays in Flashing ( Trombone Only - 4H ) */
/* 4.90.08 - Data Corruption if First Scatter Gather Element is > 64K */
/* 4.90.11 - Don't actually RESET unless it's physically required */
/* - Remove unused compile options */
/* 5.00.01 - Sarasota ( 5i ) adapters must always be scanned first */
/* - Get rid on IOCTL_NEW_COMMAND code */
/* - Add Extended DCDB Commands for Tape Support in 5I */
/* 5.10.12 - use pci_dma interfaces, update for 2.5 kernel changes */
/* 5.10.15 - remove unused code (sem, macros, etc.) */
/* 5.30.00 - use __devexit_p() */
/* 6.00.00 - Add 6x Adapters and Battery Flash */
/* 6.10.00 - Remove 1G Addressing Limitations */
/* 6.11.xx - Get VersionInfo buffer off the stack ! DDTS 60401 */
/* 6.11.xx - Make Logical Drive Info structure safe for DMA DDTS 60639 */
/* 7.10.18 - Add highmem_io flag in SCSI Templete for 2.4 kernels */
/* - Fix path/name for scsi_hosts.h include for 2.6 kernels */
/* - Fix sort order of 7k */
/* - Remove 3 unused "inline" functions */
/* 7.12.xx - Use STATIC functions wherever possible */
/* - Clean up deprecated MODULE_PARM calls */
/* 7.12.05 - Remove Version Matching per IBM request */
/*****************************************************************************/
/*
* Conditional Compilation directives for this driver:
*
* IPS_DEBUG - Turn on debugging info
*
* Parameters:
*
* debug:<number> - Set debug level to <number>
* NOTE: only works when IPS_DEBUG compile directive is used.
* 1 - Normal debug messages
* 2 - Verbose debug messages
* 11 - Method trace (non interrupt)
* 12 - Method trace (includes interrupt)
*
* noi2o - Don't use I2O Queues (ServeRAID 4 only)
* nommap - Don't use memory mapped I/O
* ioctlsize - Initial size of the IOCTL buffer
*/
#include <asm/io.h>
#include <asm/byteorder.h>
#include <asm/page.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/reboot.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <scsi/sg.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "ips.h"
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/smp.h>
#ifdef MODULE
static char *ips = NULL;
module_param(ips, charp, 0);
#endif
/*
* DRIVER_VER
*/
#define IPS_VERSION_HIGH IPS_VER_MAJOR_STRING "." IPS_VER_MINOR_STRING
#define IPS_VERSION_LOW "." IPS_VER_BUILD_STRING " "
#if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__)
#warning "This driver has only been tested on the x86/ia64/x86_64 platforms"
#endif
#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
PCI_DMA_BIDIRECTIONAL : \
scb->scsi_cmd->sc_data_direction)
#ifdef IPS_DEBUG
#define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n");
#define DEBUG(i, s) if (ips_debug >= i) printk(KERN_NOTICE s "\n");
#define DEBUG_VAR(i, s, v...) if (ips_debug >= i) printk(KERN_NOTICE s "\n", v);
#else
#define METHOD_TRACE(s, i)
#define DEBUG(i, s)
#define DEBUG_VAR(i, s, v...)
#endif
/*
* Function prototypes
*/
static int ips_detect(struct scsi_host_template *);
static int ips_release(struct Scsi_Host *);
static int ips_eh_abort(struct scsi_cmnd *);
static int ips_eh_reset(struct scsi_cmnd *);
static int ips_queue(struct Scsi_Host *, struct scsi_cmnd *);
static const char *ips_info(struct Scsi_Host *);
static irqreturn_t do_ipsintr(int, void *);
static int ips_hainit(ips_ha_t *);
static int ips_map_status(ips_ha_t *, ips_scb_t *, ips_stat_t *);
static int ips_send_wait(ips_ha_t *, ips_scb_t *, int, int);
static int ips_send_cmd(ips_ha_t *, ips_scb_t *);
static int ips_online(ips_ha_t *, ips_scb_t *);
static int ips_inquiry(ips_ha_t *, ips_scb_t *);
static int ips_rdcap(ips_ha_t *, ips_scb_t *);
static int ips_msense(ips_ha_t *, ips_scb_t *);
static int ips_reqsen(ips_ha_t *, ips_scb_t *);
static int ips_deallocatescbs(ips_ha_t *, int);
static int ips_allocatescbs(ips_ha_t *);
static int ips_reset_copperhead(ips_ha_t *);
static int ips_reset_copperhead_memio(ips_ha_t *);
static int ips_reset_morpheus(ips_ha_t *);
static int ips_issue_copperhead(ips_ha_t *, ips_scb_t *);
static int ips_issue_copperhead_memio(ips_ha_t *, ips_scb_t *);
static int ips_issue_i2o(ips_ha_t *, ips_scb_t *);
static int ips_issue_i2o_memio(ips_ha_t *, ips_scb_t *);
static int ips_isintr_copperhead(ips_ha_t *);
static int ips_isintr_copperhead_memio(ips_ha_t *);
static int ips_isintr_morpheus(ips_ha_t *);
static int ips_wait(ips_ha_t *, int, int);
static int ips_write_driver_status(ips_ha_t *, int);
static int ips_read_adapter_status(ips_ha_t *, int);
static int ips_read_subsystem_parameters(ips_ha_t *, int);
static int ips_read_config(ips_ha_t *, int);
static int ips_clear_adapter(ips_ha_t *, int);
static int ips_readwrite_page5(ips_ha_t *, int, int);
static int ips_init_copperhead(ips_ha_t *);
static int ips_init_copperhead_memio(ips_ha_t *);
static int ips_init_morpheus(ips_ha_t *);
static int ips_isinit_copperhead(ips_ha_t *);
static int ips_isinit_copperhead_memio(ips_ha_t *);
static int ips_isinit_morpheus(ips_ha_t *);
static int ips_erase_bios(ips_ha_t *);
static int ips_program_bios(ips_ha_t *, char *, uint32_t, uint32_t);
static int ips_verify_bios(ips_ha_t *, char *, uint32_t, uint32_t);
static int ips_erase_bios_memio(ips_ha_t *);
static int ips_program_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t);
static int ips_verify_bios_memio(ips_ha_t *, char *, uint32_t, uint32_t);
static int ips_flash_copperhead(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
static int ips_flash_bios(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
static int ips_flash_firmware(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
static void ips_free_flash_copperhead(ips_ha_t * ha);
static void ips_get_bios_version(ips_ha_t *, int);
static void ips_identify_controller(ips_ha_t *);
static void ips_chkstatus(ips_ha_t *, IPS_STATUS *);
static void ips_enable_int_copperhead(ips_ha_t *);
static void ips_enable_int_copperhead_memio(ips_ha_t *);
static void ips_enable_int_morpheus(ips_ha_t *);
static int ips_intr_copperhead(ips_ha_t *);
static int ips_intr_morpheus(ips_ha_t *);
static void ips_next(ips_ha_t *, int);
static void ipsintr_blocking(ips_ha_t *, struct ips_scb *);
static void ipsintr_done(ips_ha_t *, struct ips_scb *);
static void ips_done(ips_ha_t *, ips_scb_t *);
static void ips_free(ips_ha_t *);
static void ips_init_scb(ips_ha_t *, ips_scb_t *);
static void ips_freescb(ips_ha_t *, ips_scb_t *);
static void ips_setup_funclist(ips_ha_t *);
static void ips_statinit(ips_ha_t *);
static void ips_statinit_memio(ips_ha_t *);
static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time_t);
static void ips_ffdc_reset(ips_ha_t *, int);
static void ips_ffdc_time(ips_ha_t *);
static uint32_t ips_statupd_copperhead(ips_ha_t *);
static uint32_t ips_statupd_copperhead_memio(ips_ha_t *);
static uint32_t ips_statupd_morpheus(ips_ha_t *);
static ips_scb_t *ips_getscb(ips_ha_t *);
static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *);
static void ips_putq_wait_tail(ips_wait_queue_t *, struct scsi_cmnd *);
static void ips_putq_copp_tail(ips_copp_queue_t *,
ips_copp_wait_item_t *);
static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *);
static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *);
static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *);
static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *,
struct scsi_cmnd *);
static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *,
ips_copp_wait_item_t *);
static ips_copp_wait_item_t *ips_removeq_copp_head(ips_copp_queue_t *);
static int ips_is_passthru(struct scsi_cmnd *);
static int ips_make_passthru(ips_ha_t *, struct scsi_cmnd *, ips_scb_t *, int);
static int ips_usrcmd(ips_ha_t *, ips_passthru_t *, ips_scb_t *);
static void ips_cleanup_passthru(ips_ha_t *, ips_scb_t *);
static void ips_scmd_buf_write(struct scsi_cmnd * scmd, void *data,
unsigned int count);
static void ips_scmd_buf_read(struct scsi_cmnd * scmd, void *data,
unsigned int count);
static int ips_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
static int ips_host_info(ips_ha_t *, char *, off_t, int);
static void copy_mem_info(IPS_INFOSTR *, char *, int);
static int copy_info(IPS_INFOSTR *, char *, ...);
static int ips_abort_init(ips_ha_t * ha, int index);
static int ips_init_phase2(int index);
static int ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr);
static int ips_register_scsi(int index);
static int ips_poll_for_flush_complete(ips_ha_t * ha);
static void ips_flush_and_reset(ips_ha_t *ha);
/*
* global variables
*/
static const char ips_name[] = "ips";
static struct Scsi_Host *ips_sh[IPS_MAX_ADAPTERS]; /* Array of host controller structures */
static ips_ha_t *ips_ha[IPS_MAX_ADAPTERS]; /* Array of HA structures */
static unsigned int ips_next_controller;
static unsigned int ips_num_controllers;
static unsigned int ips_released_controllers;
static int ips_hotplug;
static int ips_cmd_timeout = 60;
static int ips_reset_timeout = 60 * 5;
static int ips_force_memio = 1; /* Always use Memory Mapped I/O */
static int ips_force_i2o = 1; /* Always use I2O command delivery */
static int ips_ioctlsize = IPS_IOCTL_SIZE; /* Size of the ioctl buffer */
static int ips_cd_boot; /* Booting from Manager CD */
static char *ips_FlashData = NULL; /* CD Boot - Flash Data Buffer */
static dma_addr_t ips_flashbusaddr;
static long ips_FlashDataInUse; /* CD Boot - Flash Data In Use Flag */
static uint32_t MaxLiteCmds = 32; /* Max Active Cmds for a Lite Adapter */
static struct scsi_host_template ips_driver_template = {
.detect = ips_detect,
.release = ips_release,
.info = ips_info,
.queuecommand = ips_queue,
.eh_abort_handler = ips_eh_abort,
.eh_host_reset_handler = ips_eh_reset,
.proc_name = "ips",
.proc_info = ips_proc_info,
.slave_configure = ips_slave_configure,
.bios_param = ips_biosparam,
.this_id = -1,
.sg_tablesize = IPS_MAX_SG,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
};
/* This table describes all ServeRAID Adapters */
static struct pci_device_id ips_pci_table[] = {
{ 0x1014, 0x002E, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
{ 0x1014, 0x01BD, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
{ 0x9005, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
{ 0, }
};
MODULE_DEVICE_TABLE( pci, ips_pci_table );
static char ips_hot_plug_name[] = "ips";
static int __devinit ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent);
static void __devexit ips_remove_device(struct pci_dev *pci_dev);
static struct pci_driver ips_pci_driver = {
.name = ips_hot_plug_name,
.id_table = ips_pci_table,
.probe = ips_insert_device,
.remove = __devexit_p(ips_remove_device),
};
/*
* Necessary forward function protoypes
*/
static int ips_halt(struct notifier_block *nb, ulong event, void *buf);
#define MAX_ADAPTER_NAME 15
static char ips_adapter_name[][30] = {
"ServeRAID",
"ServeRAID II",
"ServeRAID on motherboard",
"ServeRAID on motherboard",
"ServeRAID 3H",
"ServeRAID 3L",
"ServeRAID 4H",
"ServeRAID 4M",
"ServeRAID 4L",
"ServeRAID 4Mx",
"ServeRAID 4Lx",
"ServeRAID 5i",
"ServeRAID 5i",
"ServeRAID 6M",
"ServeRAID 6i",
"ServeRAID 7t",
"ServeRAID 7k",
"ServeRAID 7M"
};
static struct notifier_block ips_notifier = {
ips_halt, NULL, 0
};
/*
* Direction table
*/
static char ips_command_direction[] = {
IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT,
IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK,
IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT,
IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_OUT,
IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_OUT,
IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_IN,
IPS_DATA_UNK, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_UNK,
IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT,
IPS_DATA_OUT, IPS_DATA_NONE, IPS_DATA_IN, IPS_DATA_NONE, IPS_DATA_NONE,
IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT,
IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_OUT,
IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_IN, IPS_DATA_NONE,
IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK,
IPS_DATA_NONE, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_IN, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_NONE, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_NONE,
IPS_DATA_OUT, IPS_DATA_UNK, IPS_DATA_NONE, IPS_DATA_UNK, IPS_DATA_OUT,
IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_OUT, IPS_DATA_NONE,
IPS_DATA_UNK, IPS_DATA_IN, IPS_DATA_OUT, IPS_DATA_IN, IPS_DATA_IN,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_OUT,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK,
IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK, IPS_DATA_UNK
};
/****************************************************************************/
/* */
/* Routine Name: ips_setup */
/* */
/* Routine Description: */
/* */
/* setup parameters to the driver */
/* */
/****************************************************************************/
static int
ips_setup(char *ips_str)
{
int i;
char *key;
char *value;
IPS_OPTION options[] = {
{"noi2o", &ips_force_i2o, 0},
{"nommap", &ips_force_memio, 0},
{"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE},
{"cdboot", &ips_cd_boot, 0},
{"maxcmds", &MaxLiteCmds, 32},
};
/* Don't use strtok() anymore ( if 2.4 Kernel or beyond ) */
/* Search for value */
while ((key = strsep(&ips_str, ",."))) {
if (!*key)
continue;
value = strchr(key, ':');
if (value)
*value++ = '\0';
/*
* We now have key/value pairs.
* Update the variables
*/
for (i = 0; i < ARRAY_SIZE(options); i++) {
if (strnicmp
(key, options[i].option_name,
strlen(options[i].option_name)) == 0) {
if (value)
*options[i].option_flag =
simple_strtoul(value, NULL, 0);
else
*options[i].option_flag =
options[i].option_value;
break;
}
}
}
return (1);
}
__setup("ips=", ips_setup);
/****************************************************************************/
/* */
/* Routine Name: ips_detect */
/* */
/* Routine Description: */
/* */
/* Detect and initialize the driver */
/* */
/* NOTE: this routine is called under the io_request_lock spinlock */
/* */
/****************************************************************************/
static int
ips_detect(struct scsi_host_template * SHT)
{
int i;
METHOD_TRACE("ips_detect", 1);
#ifdef MODULE
if (ips)
ips_setup(ips);
#endif
for (i = 0; i < ips_num_controllers; i++) {
if (ips_register_scsi(i))
ips_free(ips_ha[i]);
ips_released_controllers++;
}
ips_hotplug = 1;
return (ips_num_controllers);
}
/****************************************************************************/
/* configure the function pointers to use the functions that will work */
/* with the found version of the adapter */
/****************************************************************************/
static void
ips_setup_funclist(ips_ha_t * ha)
{
/*
* Setup Functions
*/
if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) {
/* morpheus / marco / sebring */
ha->func.isintr = ips_isintr_morpheus;
ha->func.isinit = ips_isinit_morpheus;
ha->func.issue = ips_issue_i2o_memio;
ha->func.init = ips_init_morpheus;
ha->func.statupd = ips_statupd_morpheus;
ha->func.reset = ips_reset_morpheus;
ha->func.intr = ips_intr_morpheus;
ha->func.enableint = ips_enable_int_morpheus;
} else if (IPS_USE_MEMIO(ha)) {
/* copperhead w/MEMIO */
ha->func.isintr = ips_isintr_copperhead_memio;
ha->func.isinit = ips_isinit_copperhead_memio;
ha->func.init = ips_init_copperhead_memio;
ha->func.statupd = ips_statupd_copperhead_memio;
ha->func.statinit = ips_statinit_memio;
ha->func.reset = ips_reset_copperhead_memio;
ha->func.intr = ips_intr_copperhead;
ha->func.erasebios = ips_erase_bios_memio;
ha->func.programbios = ips_program_bios_memio;
ha->func.verifybios = ips_verify_bios_memio;
ha->func.enableint = ips_enable_int_copperhead_memio;
if (IPS_USE_I2O_DELIVER(ha))
ha->func.issue = ips_issue_i2o_memio;
else
ha->func.issue = ips_issue_copperhead_memio;
} else {
/* copperhead */
ha->func.isintr = ips_isintr_copperhead;
ha->func.isinit = ips_isinit_copperhead;
ha->func.init = ips_init_copperhead;
ha->func.statupd = ips_statupd_copperhead;
ha->func.statinit = ips_statinit;
ha->func.reset = ips_reset_copperhead;
ha->func.intr = ips_intr_copperhead;
ha->func.erasebios = ips_erase_bios;
ha->func.programbios = ips_program_bios;
ha->func.verifybios = ips_verify_bios;
ha->func.enableint = ips_enable_int_copperhead;
if (IPS_USE_I2O_DELIVER(ha))
ha->func.issue = ips_issue_i2o;
else
ha->func.issue = ips_issue_copperhead;
}
}
/****************************************************************************/
/* */
/* Routine Name: ips_release */
/* */
/* Routine Description: */
/* */
/* Remove a driver */
/* */
/****************************************************************************/
static int
ips_release(struct Scsi_Host *sh)
{
ips_scb_t *scb;
ips_ha_t *ha;
int i;
METHOD_TRACE("ips_release", 1);
scsi_remove_host(sh);
for (i = 0; i < IPS_MAX_ADAPTERS && ips_sh[i] != sh; i++) ;
if (i == IPS_MAX_ADAPTERS) {
printk(KERN_WARNING
"(%s) release, invalid Scsi_Host pointer.\n", ips_name);
BUG();
return (FALSE);
}
ha = IPS_HA(sh);
if (!ha)
return (FALSE);
/* flush the cache on the controller */
scb = &ha->scbs[ha->max_cmds - 1];
ips_init_scb(ha, scb);
scb->timeout = ips_cmd_timeout;
scb->cdb[0] = IPS_CMD_FLUSH;
scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.flush_cache.state = IPS_NORM_STATE;
scb->cmd.flush_cache.reserved = 0;
scb->cmd.flush_cache.reserved2 = 0;
scb->cmd.flush_cache.reserved3 = 0;
scb->cmd.flush_cache.reserved4 = 0;
IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n");
/* send command */
if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) == IPS_FAILURE)
IPS_PRINTK(KERN_WARNING, ha->pcidev, "Incomplete Flush.\n");
IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Complete.\n");
ips_sh[i] = NULL;
ips_ha[i] = NULL;
/* free extra memory */
ips_free(ha);
/* free IRQ */
free_irq(ha->pcidev->irq, ha);
scsi_host_put(sh);
ips_released_controllers++;
return (FALSE);
}
/****************************************************************************/
/* */
/* Routine Name: ips_halt */
/* */
/* Routine Description: */
/* */
/* Perform cleanup when the system reboots */
/* */
/****************************************************************************/
static int
ips_halt(struct notifier_block *nb, ulong event, void *buf)
{
ips_scb_t *scb;
ips_ha_t *ha;
int i;
if ((event != SYS_RESTART) && (event != SYS_HALT) &&
(event != SYS_POWER_OFF))
return (NOTIFY_DONE);
for (i = 0; i < ips_next_controller; i++) {
ha = (ips_ha_t *) ips_ha[i];
if (!ha)
continue;
if (!ha->active)
continue;
/* flush the cache on the controller */
scb = &ha->scbs[ha->max_cmds - 1];
ips_init_scb(ha, scb);
scb->timeout = ips_cmd_timeout;
scb->cdb[0] = IPS_CMD_FLUSH;
scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.flush_cache.state = IPS_NORM_STATE;
scb->cmd.flush_cache.reserved = 0;
scb->cmd.flush_cache.reserved2 = 0;
scb->cmd.flush_cache.reserved3 = 0;
scb->cmd.flush_cache.reserved4 = 0;
IPS_PRINTK(KERN_WARNING, ha->pcidev, "Flushing Cache.\n");
/* send command */
if (ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_ON) ==
IPS_FAILURE)
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Incomplete Flush.\n");
else
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Flushing Complete.\n");
}
return (NOTIFY_OK);
}
/****************************************************************************/
/* */
/* Routine Name: ips_eh_abort */
/* */
/* Routine Description: */
/* */
/* Abort a command (using the new error code stuff) */
/* Note: this routine is called under the io_request_lock */
/****************************************************************************/
int ips_eh_abort(struct scsi_cmnd *SC)
{
ips_ha_t *ha;
ips_copp_wait_item_t *item;
int ret;
struct Scsi_Host *host;
METHOD_TRACE("ips_eh_abort", 1);
if (!SC)
return (FAILED);
host = SC->device->host;
ha = (ips_ha_t *) SC->device->host->hostdata;
if (!ha)
return (FAILED);
if (!ha->active)
return (FAILED);
spin_lock(host->host_lock);
/* See if the command is on the copp queue */
item = ha->copp_waitlist.head;
while ((item) && (item->scsi_cmd != SC))
item = item->next;
if (item) {
/* Found it */
ips_removeq_copp(&ha->copp_waitlist, item);
ret = (SUCCESS);
/* See if the command is on the wait queue */
} else if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
/* command not sent yet */
ret = (SUCCESS);
} else {
/* command must have already been sent */
ret = (FAILED);
}
spin_unlock(host->host_lock);
return ret;
}
/****************************************************************************/
/* */
/* Routine Name: ips_eh_reset */
/* */
/* Routine Description: */
/* */
/* Reset the controller (with new eh error code) */
/* */
/* NOTE: this routine is called under the io_request_lock spinlock */
/* */
/****************************************************************************/
static int __ips_eh_reset(struct scsi_cmnd *SC)
{
int ret;
int i;
ips_ha_t *ha;
ips_scb_t *scb;
ips_copp_wait_item_t *item;
METHOD_TRACE("ips_eh_reset", 1);
#ifdef NO_IPS_RESET
return (FAILED);
#else
if (!SC) {
DEBUG(1, "Reset called with NULL scsi command");
return (FAILED);
}
ha = (ips_ha_t *) SC->device->host->hostdata;
if (!ha) {
DEBUG(1, "Reset called with NULL ha struct");
return (FAILED);
}
if (!ha->active)
return (FAILED);
/* See if the command is on the copp queue */
item = ha->copp_waitlist.head;
while ((item) && (item->scsi_cmd != SC))
item = item->next;
if (item) {
/* Found it */
ips_removeq_copp(&ha->copp_waitlist, item);
return (SUCCESS);
}
/* See if the command is on the wait queue */
if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
/* command not sent yet */
return (SUCCESS);
}
/* An explanation for the casual observer: */
/* Part of the function of a RAID controller is automatic error */
/* detection and recovery. As such, the only problem that physically */
/* resetting an adapter will ever fix is when, for some reason, */
/* the driver is not successfully communicating with the adapter. */
/* Therefore, we will attempt to flush this adapter. If that succeeds, */
/* then there's no real purpose in a physical reset. This will complete */
/* much faster and avoids any problems that might be caused by a */
/* physical reset ( such as having to fail all the outstanding I/O's ). */
if (ha->ioctl_reset == 0) { /* IF Not an IOCTL Requested Reset */
scb = &ha->scbs[ha->max_cmds - 1];
ips_init_scb(ha, scb);
scb->timeout = ips_cmd_timeout;
scb->cdb[0] = IPS_CMD_FLUSH;
scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
scb->cmd.flush_cache.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.flush_cache.state = IPS_NORM_STATE;
scb->cmd.flush_cache.reserved = 0;
scb->cmd.flush_cache.reserved2 = 0;
scb->cmd.flush_cache.reserved3 = 0;
scb->cmd.flush_cache.reserved4 = 0;
/* Attempt the flush command */
ret = ips_send_wait(ha, scb, ips_cmd_timeout, IPS_INTR_IORL);
if (ret == IPS_SUCCESS) {
IPS_PRINTK(KERN_NOTICE, ha->pcidev,
"Reset Request - Flushed Cache\n");
return (SUCCESS);
}
}
/* Either we can't communicate with the adapter or it's an IOCTL request */
/* from a utility. A physical reset is needed at this point. */
ha->ioctl_reset = 0; /* Reset the IOCTL Requested Reset Flag */
/*
* command must have already been sent
* reset the controller
*/
IPS_PRINTK(KERN_NOTICE, ha->pcidev, "Resetting controller.\n");
ret = (*ha->func.reset) (ha);
if (!ret) {
struct scsi_cmnd *scsi_cmd;
IPS_PRINTK(KERN_NOTICE, ha->pcidev,
"Controller reset failed - controller now offline.\n");
/* Now fail all of the active commands */
DEBUG_VAR(1, "(%s%d) Failing active commands",
ips_name, ha->host_num);
while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
scb->scsi_cmd->result = DID_ERROR << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}
/* Now fail all of the pending commands */
DEBUG_VAR(1, "(%s%d) Failing pending commands",
ips_name, ha->host_num);
while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
scsi_cmd->result = DID_ERROR;
scsi_cmd->scsi_done(scsi_cmd);
}
ha->active = FALSE;
return (FAILED);
}
if (!ips_clear_adapter(ha, IPS_INTR_IORL)) {
struct scsi_cmnd *scsi_cmd;
IPS_PRINTK(KERN_NOTICE, ha->pcidev,
"Controller reset failed - controller now offline.\n");
/* Now fail all of the active commands */
DEBUG_VAR(1, "(%s%d) Failing active commands",
ips_name, ha->host_num);
while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
scb->scsi_cmd->result = DID_ERROR << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}
/* Now fail all of the pending commands */
DEBUG_VAR(1, "(%s%d) Failing pending commands",
ips_name, ha->host_num);
while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
scsi_cmd->result = DID_ERROR << 16;
scsi_cmd->scsi_done(scsi_cmd);
}
ha->active = FALSE;
return (FAILED);
}
/* FFDC */
if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) {
struct timeval tv;
do_gettimeofday(&tv);
ha->last_ffdc = tv.tv_sec;
ha->reset_count++;
ips_ffdc_reset(ha, IPS_INTR_IORL);
}
/* Now fail all of the active commands */
DEBUG_VAR(1, "(%s%d) Failing active commands", ips_name, ha->host_num);
while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
scb->scsi_cmd->result = DID_RESET << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}
/* Reset DCDB active command bits */
for (i = 1; i < ha->nbus; i++)
ha->dcdb_active[i - 1] = 0;
/* Reset the number of active IOCTLs */
ha->num_ioctl = 0;
ips_next(ha, IPS_INTR_IORL);
return (SUCCESS);
#endif /* NO_IPS_RESET */
}
static int ips_eh_reset(struct scsi_cmnd *SC)
{
int rc;
spin_lock_irq(SC->device->host->host_lock);
rc = __ips_eh_reset(SC);
spin_unlock_irq(SC->device->host->host_lock);
return rc;
}
/****************************************************************************/
/* */
/* Routine Name: ips_queue */
/* */
/* Routine Description: */
/* */
/* Send a command to the controller */
/* */
/* NOTE: */
/* Linux obtains io_request_lock before calling this function */
/* */
/****************************************************************************/
static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
{
ips_ha_t *ha;
ips_passthru_t *pt;
METHOD_TRACE("ips_queue", 1);
ha = (ips_ha_t *) SC->device->host->hostdata;
if (!ha)
return (1);
if (!ha->active)
return (DID_ERROR);
if (ips_is_passthru(SC)) {
if (ha->copp_waitlist.count == IPS_MAX_IOCTL_QUEUE) {
SC->result = DID_BUS_BUSY << 16;
done(SC);
return (0);
}
} else if (ha->scb_waitlist.count == IPS_MAX_QUEUE) {
SC->result = DID_BUS_BUSY << 16;
done(SC);
return (0);
}
SC->scsi_done = done;
DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)",
ips_name,
ha->host_num,
SC->cmnd[0],
SC->device->channel, SC->device->id, SC->device->lun);
/* Check for command to initiator IDs */
if ((scmd_channel(SC) > 0)
&& (scmd_id(SC) == ha->ha_id[scmd_channel(SC)])) {
SC->result = DID_NO_CONNECT << 16;
done(SC);
return (0);
}
if (ips_is_passthru(SC)) {
ips_copp_wait_item_t *scratch;
/* A Reset IOCTL is only sent by the boot CD in extreme cases. */
/* There can never be any system activity ( network or disk ), but check */
/* anyway just as a good practice. */
pt = (ips_passthru_t *) scsi_sglist(SC);
if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) &&
(pt->CoppCP.cmd.reset.adapter_flag == 1)) {
if (ha->scb_activelist.count != 0) {
SC->result = DID_BUS_BUSY << 16;
done(SC);
return (0);
}
ha->ioctl_reset = 1; /* This reset request is from an IOCTL */
__ips_eh_reset(SC);
SC->result = DID_OK << 16;
SC->scsi_done(SC);
return (0);
}
/* allocate space for the scribble */
scratch = kmalloc(sizeof (ips_copp_wait_item_t), GFP_ATOMIC);
if (!scratch) {
SC->result = DID_ERROR << 16;
done(SC);
return (0);
}
scratch->scsi_cmd = SC;
scratch->next = NULL;
ips_putq_copp_tail(&ha->copp_waitlist, scratch);
} else {
ips_putq_wait_tail(&ha->scb_waitlist, SC);
}
ips_next(ha, IPS_INTR_IORL);
return (0);
}
static DEF_SCSI_QCMD(ips_queue)
/****************************************************************************/
/* */
/* Routine Name: ips_biosparam */
/* */
/* Routine Description: */
/* */
/* Set bios geometry for the controller */
/* */
/****************************************************************************/
static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[])
{
ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata;
int heads;
int sectors;
int cylinders;
METHOD_TRACE("ips_biosparam", 1);
if (!ha)
/* ?!?! host adater info invalid */
return (0);
if (!ha->active)
return (0);
if (!ips_read_adapter_status(ha, IPS_INTR_ON))
/* ?!?! Enquiry command failed */
return (0);
if ((capacity > 0x400000) && ((ha->enq->ucMiscFlag & 0x8) == 0)) {
heads = IPS_NORM_HEADS;
sectors = IPS_NORM_SECTORS;
} else {
heads = IPS_COMP_HEADS;
sectors = IPS_COMP_SECTORS;
}
cylinders = (unsigned long) capacity / (heads * sectors);
DEBUG_VAR(2, "Geometry: heads: %d, sectors: %d, cylinders: %d",
heads, sectors, cylinders);
geom[0] = heads;
geom[1] = sectors;
geom[2] = cylinders;
return (0);
}
/****************************************************************************/
/* */
/* Routine Name: ips_slave_configure */
/* */
/* Routine Description: */
/* */
/* Set queue depths on devices once scan is complete */
/* */
/****************************************************************************/
static int
ips_slave_configure(struct scsi_device * SDptr)
{
ips_ha_t *ha;
int min;
ha = IPS_HA(SDptr->host);
if (SDptr->tagged_supported && SDptr->type == TYPE_DISK) {
min = ha->max_cmds / 2;
if (ha->enq->ucLogDriveCount <= 2)
min = ha->max_cmds - 1;
scsi_adjust_queue_depth(SDptr, MSG_ORDERED_TAG, min);
}
SDptr->skip_ms_page_8 = 1;
SDptr->skip_ms_page_3f = 1;
return 0;
}
/****************************************************************************/
/* */
/* Routine Name: do_ipsintr */
/* */
/* Routine Description: */
/* */
/* Wrapper for the interrupt handler */
/* */
/****************************************************************************/
static irqreturn_t
do_ipsintr(int irq, void *dev_id)
{
ips_ha_t *ha;
struct Scsi_Host *host;
int irqstatus;
METHOD_TRACE("do_ipsintr", 2);
ha = (ips_ha_t *) dev_id;
if (!ha)
return IRQ_NONE;
host = ips_sh[ha->host_num];
/* interrupt during initialization */
if (!host) {
(*ha->func.intr) (ha);
return IRQ_HANDLED;
}
spin_lock(host->host_lock);
if (!ha->active) {
spin_unlock(host->host_lock);
return IRQ_HANDLED;
}
irqstatus = (*ha->func.intr) (ha);
spin_unlock(host->host_lock);
/* start the next command */
ips_next(ha, IPS_INTR_ON);
return IRQ_RETVAL(irqstatus);
}
/****************************************************************************/
/* */
/* Routine Name: ips_intr_copperhead */
/* */
/* Routine Description: */
/* */
/* Polling interrupt handler */
/* */
/* ASSUMES interrupts are disabled */
/* */
/****************************************************************************/
int
ips_intr_copperhead(ips_ha_t * ha)
{
ips_stat_t *sp;
ips_scb_t *scb;
IPS_STATUS cstatus;
int intrstatus;
METHOD_TRACE("ips_intr", 2);
if (!ha)
return 0;
if (!ha->active)
return 0;
intrstatus = (*ha->func.isintr) (ha);
if (!intrstatus) {
/*
* Unexpected/Shared interrupt
*/
return 0;
}
while (TRUE) {
sp = &ha->sp;
intrstatus = (*ha->func.isintr) (ha);
if (!intrstatus)
break;
else
cstatus.value = (*ha->func.statupd) (ha);
if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) {
/* Spurious Interrupt ? */
continue;
}
ips_chkstatus(ha, &cstatus);
scb = (ips_scb_t *) sp->scb_addr;
/*
* use the callback function to finish things up
* NOTE: interrupts are OFF for this
*/
(*scb->callback) (ha, scb);
} /* end while */
return 1;
}
/****************************************************************************/
/* */
/* Routine Name: ips_intr_morpheus */
/* */
/* Routine Description: */
/* */
/* Polling interrupt handler */
/* */
/* ASSUMES interrupts are disabled */
/* */
/****************************************************************************/
int
ips_intr_morpheus(ips_ha_t * ha)
{
ips_stat_t *sp;
ips_scb_t *scb;
IPS_STATUS cstatus;
int intrstatus;
METHOD_TRACE("ips_intr_morpheus", 2);
if (!ha)
return 0;
if (!ha->active)
return 0;
intrstatus = (*ha->func.isintr) (ha);
if (!intrstatus) {
/*
* Unexpected/Shared interrupt
*/
return 0;
}
while (TRUE) {
sp = &ha->sp;
intrstatus = (*ha->func.isintr) (ha);
if (!intrstatus)
break;
else
cstatus.value = (*ha->func.statupd) (ha);
if (cstatus.value == 0xffffffff)
/* No more to process */
break;
if (cstatus.fields.command_id > (IPS_MAX_CMDS - 1)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Spurious interrupt; no ccb.\n");
continue;
}
ips_chkstatus(ha, &cstatus);
scb = (ips_scb_t *) sp->scb_addr;
/*
* use the callback function to finish things up
* NOTE: interrupts are OFF for this
*/
(*scb->callback) (ha, scb);
} /* end while */
return 1;
}
/****************************************************************************/
/* */
/* Routine Name: ips_info */
/* */
/* Routine Description: */
/* */
/* Return info about the driver */
/* */
/****************************************************************************/
static const char *
ips_info(struct Scsi_Host *SH)
{
static char buffer[256];
char *bp;
ips_ha_t *ha;
METHOD_TRACE("ips_info", 1);
ha = IPS_HA(SH);
if (!ha)
return (NULL);
bp = &buffer[0];
memset(bp, 0, sizeof (buffer));
sprintf(bp, "%s%s%s Build %d", "IBM PCI ServeRAID ",
IPS_VERSION_HIGH, IPS_VERSION_LOW, IPS_BUILD_IDENT);
if (ha->ad_type > 0 && ha->ad_type <= MAX_ADAPTER_NAME) {
strcat(bp, " <");
strcat(bp, ips_adapter_name[ha->ad_type - 1]);
strcat(bp, ">");
}
return (bp);
}
/****************************************************************************/
/* */
/* Routine Name: ips_proc_info */
/* */
/* Routine Description: */
/* */
/* The passthru interface for the driver */
/* */
/****************************************************************************/
static int
ips_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
int length, int func)
{
int i;
int ret;
ips_ha_t *ha = NULL;
METHOD_TRACE("ips_proc_info", 1);
/* Find our host structure */
for (i = 0; i < ips_next_controller; i++) {
if (ips_sh[i]) {
if (ips_sh[i] == host) {
ha = (ips_ha_t *) ips_sh[i]->hostdata;
break;
}
}
}
if (!ha)
return (-EINVAL);
if (func) {
/* write */
return (0);
} else {
/* read */
if (start)
*start = buffer;
ret = ips_host_info(ha, buffer, offset, length);
return (ret);
}
}
/*--------------------------------------------------------------------------*/
/* Helper Functions */
/*--------------------------------------------------------------------------*/
/****************************************************************************/
/* */
/* Routine Name: ips_is_passthru */
/* */
/* Routine Description: */
/* */
/* Determine if the specified SCSI command is really a passthru command */
/* */
/****************************************************************************/
static int ips_is_passthru(struct scsi_cmnd *SC)
{
unsigned long flags;
METHOD_TRACE("ips_is_passthru", 1);
if (!SC)
return (0);
if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) &&
(SC->device->channel == 0) &&
(SC->device->id == IPS_ADAPTER_ID) &&
(SC->device->lun == 0) && scsi_sglist(SC)) {
struct scatterlist *sg = scsi_sglist(SC);
char *buffer;
/* kmap_atomic() ensures addressability of the user buffer.*/
/* local_irq_save() protects the KM_IRQ0 address slot. */
local_irq_save(flags);
buffer = kmap_atomic(sg_page(sg)) + sg->offset;
if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
buffer[2] == 'P' && buffer[3] == 'P') {
kunmap_atomic(buffer - sg->offset);
local_irq_restore(flags);
return 1;
}
kunmap_atomic(buffer - sg->offset);
local_irq_restore(flags);
}
return 0;
}
/****************************************************************************/
/* */
/* Routine Name: ips_alloc_passthru_buffer */
/* */
/* Routine Description: */
/* allocate a buffer large enough for the ioctl data if the ioctl buffer */
/* is too small or doesn't exist */
/****************************************************************************/
static int
ips_alloc_passthru_buffer(ips_ha_t * ha, int length)
{
void *bigger_buf;
dma_addr_t dma_busaddr;
if (ha->ioctl_data && length <= ha->ioctl_len)
return 0;
/* there is no buffer or it's not big enough, allocate a new one */
bigger_buf = pci_alloc_consistent(ha->pcidev, length, &dma_busaddr);
if (bigger_buf) {
/* free the old memory */
pci_free_consistent(ha->pcidev, ha->ioctl_len, ha->ioctl_data,
ha->ioctl_busaddr);
/* use the new memory */
ha->ioctl_data = (char *) bigger_buf;
ha->ioctl_len = length;
ha->ioctl_busaddr = dma_busaddr;
} else {
return -1;
}
return 0;
}
/****************************************************************************/
/* */
/* Routine Name: ips_make_passthru */
/* */
/* Routine Description: */
/* */
/* Make a passthru command out of the info in the Scsi block */
/* */
/****************************************************************************/
static int
ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
{
ips_passthru_t *pt;
int length = 0;
int i, ret;
struct scatterlist *sg = scsi_sglist(SC);
METHOD_TRACE("ips_make_passthru", 1);
scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
length += sg->length;
if (length < sizeof (ips_passthru_t)) {
/* wrong size */
DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
ips_name, ha->host_num);
return (IPS_FAILURE);
}
if (ips_alloc_passthru_buffer(ha, length)) {
/* allocation failure! If ha->ioctl_data exists, use it to return
some error codes. Return a failed command to the scsi layer. */
if (ha->ioctl_data) {
pt = (ips_passthru_t *) ha->ioctl_data;
ips_scmd_buf_read(SC, pt, sizeof (ips_passthru_t));
pt->BasicStatus = 0x0B;
pt->ExtendedStatus = 0x00;
ips_scmd_buf_write(SC, pt, sizeof (ips_passthru_t));
}
return IPS_FAILURE;
}
ha->ioctl_datasize = length;
ips_scmd_buf_read(SC, ha->ioctl_data, ha->ioctl_datasize);
pt = (ips_passthru_t *) ha->ioctl_data;
/*
* Some notes about the passthru interface used
*
* IF the scsi op_code == 0x0d then we assume
* that the data came along with/goes with the
* packet we received from the sg driver. In this
* case the CmdBSize field of the pt structure is
* used for the size of the buffer.
*/
switch (pt->CoppCmd) {
case IPS_NUMCTRLS:
memcpy(ha->ioctl_data + sizeof (ips_passthru_t),
&ips_num_controllers, sizeof (int));
ips_scmd_buf_write(SC, ha->ioctl_data,
sizeof (ips_passthru_t) + sizeof (int));
SC->result = DID_OK << 16;
return (IPS_SUCCESS_IMM);
case IPS_COPPUSRCMD:
case IPS_COPPIOCCMD:
if (SC->cmnd[0] == IPS_IOCTL_COMMAND) {
if (length < (sizeof (ips_passthru_t) + pt->CmdBSize)) {
/* wrong size */
DEBUG_VAR(1,
"(%s%d) Passthru structure wrong size",
ips_name, ha->host_num);
return (IPS_FAILURE);
}
if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
pt->CoppCP.cmd.flashfw.op_code ==
IPS_CMD_RW_BIOSFW) {
ret = ips_flash_copperhead(ha, pt, scb);
ips_scmd_buf_write(SC, ha->ioctl_data,
sizeof (ips_passthru_t));
return ret;
}
if (ips_usrcmd(ha, pt, scb))
return (IPS_SUCCESS);
else
return (IPS_FAILURE);
}
break;
} /* end switch */
return (IPS_FAILURE);
}
/****************************************************************************/
/* Routine Name: ips_flash_copperhead */
/* Routine Description: */
/* Flash the BIOS/FW on a Copperhead style controller */
/****************************************************************************/
static int
ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
{
int datasize;
/* Trombone is the only copperhead that can do packet flash, but only
* for firmware. No one said it had to make sense. */
if (IPS_IS_TROMBONE(ha) && pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE) {
if (ips_usrcmd(ha, pt, scb))
return IPS_SUCCESS;
else
return IPS_FAILURE;
}
pt->BasicStatus = 0x0B;
pt->ExtendedStatus = 0;
scb->scsi_cmd->result = DID_OK << 16;
/* IF it's OK to Use the "CD BOOT" Flash Buffer, then you can */
/* avoid allocating a huge buffer per adapter ( which can fail ). */
if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) {
pt->BasicStatus = 0;
return ips_flash_bios(ha, pt, scb);
} else if (pt->CoppCP.cmd.flashfw.packet_num == 0) {
if (ips_FlashData && !test_and_set_bit(0, &ips_FlashDataInUse)){
ha->flash_data = ips_FlashData;
ha->flash_busaddr = ips_flashbusaddr;
ha->flash_len = PAGE_SIZE << 7;
ha->flash_datasize = 0;
} else if (!ha->flash_data) {
datasize = pt->CoppCP.cmd.flashfw.total_packets *
pt->CoppCP.cmd.flashfw.count;
ha->flash_data = pci_alloc_consistent(ha->pcidev,
datasize,
&ha->flash_busaddr);
if (!ha->flash_data){
printk(KERN_WARNING "Unable to allocate a flash buffer\n");
return IPS_FAILURE;
}
ha->flash_datasize = 0;
ha->flash_len = datasize;
} else
return IPS_FAILURE;
} else {
if (pt->CoppCP.cmd.flashfw.count + ha->flash_datasize >
ha->flash_len) {
ips_free_flash_copperhead(ha);
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"failed size sanity check\n");
return IPS_FAILURE;
}
}
if (!ha->flash_data)
return IPS_FAILURE;
pt->BasicStatus = 0;
memcpy(&ha->flash_data[ha->flash_datasize], pt + 1,
pt->CoppCP.cmd.flashfw.count);
ha->flash_datasize += pt->CoppCP.cmd.flashfw.count;
if (pt->CoppCP.cmd.flashfw.packet_num ==
pt->CoppCP.cmd.flashfw.total_packets - 1) {
if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE)
return ips_flash_bios(ha, pt, scb);
else if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE)
return ips_flash_firmware(ha, pt, scb);
}
return IPS_SUCCESS_IMM;
}
/****************************************************************************/
/* Routine Name: ips_flash_bios */
/* Routine Description: */
/* flashes the bios of a copperhead adapter */
/****************************************************************************/
static int
ips_flash_bios(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
{
if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_BIOS) {
if ((!ha->func.programbios) || (!ha->func.erasebios) ||
(!ha->func.verifybios))
goto error;
if ((*ha->func.erasebios) (ha)) {
DEBUG_VAR(1,
"(%s%d) flash bios failed - unable to erase flash",
ips_name, ha->host_num);
goto error;
} else
if ((*ha->func.programbios) (ha,
ha->flash_data +
IPS_BIOS_HEADER,
ha->flash_datasize -
IPS_BIOS_HEADER, 0)) {
DEBUG_VAR(1,
"(%s%d) flash bios failed - unable to flash",
ips_name, ha->host_num);
goto error;
} else
if ((*ha->func.verifybios) (ha,
ha->flash_data +
IPS_BIOS_HEADER,
ha->flash_datasize -
IPS_BIOS_HEADER, 0)) {
DEBUG_VAR(1,
"(%s%d) flash bios failed - unable to verify flash",
ips_name, ha->host_num);
goto error;
}
ips_free_flash_copperhead(ha);
return IPS_SUCCESS_IMM;
} else if (pt->CoppCP.cmd.flashfw.type == IPS_BIOS_IMAGE &&
pt->CoppCP.cmd.flashfw.direction == IPS_ERASE_BIOS) {
if (!ha->func.erasebios)
goto error;
if ((*ha->func.erasebios) (ha)) {
DEBUG_VAR(1,
"(%s%d) flash bios failed - unable to erase flash",
ips_name, ha->host_num);
goto error;
}
return IPS_SUCCESS_IMM;
}
error:
pt->BasicStatus = 0x0B;
pt->ExtendedStatus = 0x00;
ips_free_flash_copperhead(ha);
return IPS_FAILURE;
}
/****************************************************************************/
/* */
/* Routine Name: ips_fill_scb_sg_single */
/* */
/* Routine Description: */
/* Fill in a single scb sg_list element from an address */
/* return a -1 if a breakup occurred */
/****************************************************************************/
static int
ips_fill_scb_sg_single(ips_ha_t * ha, dma_addr_t busaddr,
ips_scb_t * scb, int indx, unsigned int e_len)
{
int ret_val = 0;
if ((scb->data_len + e_len) > ha->max_xfer) {
e_len = ha->max_xfer - scb->data_len;
scb->breakup = indx;
++scb->sg_break;
ret_val = -1;
} else {
scb->breakup = 0;
scb->sg_break = 0;
}
if (IPS_USE_ENH_SGLIST(ha)) {
scb->sg_list.enh_list[indx].address_lo =
cpu_to_le32(pci_dma_lo32(busaddr));
scb->sg_list.enh_list[indx].address_hi =
cpu_to_le32(pci_dma_hi32(busaddr));
scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len);
} else {
scb->sg_list.std_list[indx].address =
cpu_to_le32(pci_dma_lo32(busaddr));
scb->sg_list.std_list[indx].length = cpu_to_le32(e_len);
}
++scb->sg_len;
scb->data_len += e_len;
return ret_val;
}
/****************************************************************************/
/* Routine Name: ips_flash_firmware */
/* Routine Description: */
/* flashes the firmware of a copperhead adapter */
/****************************************************************************/
static int
ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
{
IPS_SG_LIST sg_list;
uint32_t cmd_busaddr;
if (pt->CoppCP.cmd.flashfw.type == IPS_FW_IMAGE &&
pt->CoppCP.cmd.flashfw.direction == IPS_WRITE_FW) {
memset(&pt->CoppCP.cmd, 0, sizeof (IPS_HOST_COMMAND));
pt->CoppCP.cmd.flashfw.op_code = IPS_CMD_DOWNLOAD;
pt->CoppCP.cmd.flashfw.count = cpu_to_le32(ha->flash_datasize);
} else {
pt->BasicStatus = 0x0B;
pt->ExtendedStatus = 0x00;
ips_free_flash_copperhead(ha);
return IPS_FAILURE;
}
/* Save the S/G list pointer so it doesn't get clobbered */
sg_list.list = scb->sg_list.list;
cmd_busaddr = scb->scb_busaddr;
/* copy in the CP */
memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD));
/* FIX stuff that might be wrong */
scb->sg_list.list = sg_list.list;
scb->scb_busaddr = cmd_busaddr;
scb->bus = scb->scsi_cmd->device->channel;
scb->target_id = scb->scsi_cmd->device->id;
scb->lun = scb->scsi_cmd->device->lun;
scb->sg_len = 0;
scb->data_len = 0;
scb->flags = 0;
scb->op_code = 0;
scb->callback = ipsintr_done;
scb->timeout = ips_cmd_timeout;
scb->data_len = ha->flash_datasize;
scb->data_busaddr =
pci_map_single(ha->pcidev, ha->flash_data, scb->data_len,
IPS_DMA_DIR(scb));
scb->flags |= IPS_SCB_MAP_SINGLE;
scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.flashfw.buffer_addr = cpu_to_le32(scb->data_busaddr);
if (pt->TimeOut)
scb->timeout = pt->TimeOut;
scb->scsi_cmd->result = DID_OK << 16;
return IPS_SUCCESS;
}
/****************************************************************************/
/* Routine Name: ips_free_flash_copperhead */
/* Routine Description: */
/* release the memory resources used to hold the flash image */
/****************************************************************************/
static void
ips_free_flash_copperhead(ips_ha_t * ha)
{
if (ha->flash_data == ips_FlashData)
test_and_clear_bit(0, &ips_FlashDataInUse);
else if (ha->flash_data)
pci_free_consistent(ha->pcidev, ha->flash_len, ha->flash_data,
ha->flash_busaddr);
ha->flash_data = NULL;
}
/****************************************************************************/
/* */
/* Routine Name: ips_usrcmd */
/* */
/* Routine Description: */
/* */
/* Process a user command and make it ready to send */
/* */
/****************************************************************************/
static int
ips_usrcmd(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
{
IPS_SG_LIST sg_list;
uint32_t cmd_busaddr;
METHOD_TRACE("ips_usrcmd", 1);
if ((!scb) || (!pt) || (!ha))
return (0);
/* Save the S/G list pointer so it doesn't get clobbered */
sg_list.list = scb->sg_list.list;
cmd_busaddr = scb->scb_busaddr;
/* copy in the CP */
memcpy(&scb->cmd, &pt->CoppCP.cmd, sizeof (IPS_IOCTL_CMD));
memcpy(&scb->dcdb, &pt->CoppCP.dcdb, sizeof (IPS_DCDB_TABLE));
/* FIX stuff that might be wrong */
scb->sg_list.list = sg_list.list;
scb->scb_busaddr = cmd_busaddr;
scb->bus = scb->scsi_cmd->device->channel;
scb->target_id = scb->scsi_cmd->device->id;
scb->lun = scb->scsi_cmd->device->lun;
scb->sg_len = 0;
scb->data_len = 0;
scb->flags = 0;
scb->op_code = 0;
scb->callback = ipsintr_done;
scb->timeout = ips_cmd_timeout;
scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
/* we don't support DCDB/READ/WRITE Scatter Gather */
if ((scb->cmd.basic_io.op_code == IPS_CMD_READ_SG) ||
(scb->cmd.basic_io.op_code == IPS_CMD_WRITE_SG) ||
(scb->cmd.basic_io.op_code == IPS_CMD_DCDB_SG))
return (0);
if (pt->CmdBSize) {
scb->data_len = pt->CmdBSize;
scb->data_busaddr = ha->ioctl_busaddr + sizeof (ips_passthru_t);
} else {
scb->data_busaddr = 0L;
}
if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr +
(unsigned long) &scb->
dcdb -
(unsigned long) scb);
if (pt->CmdBSize) {
if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB)
scb->dcdb.buffer_pointer =
cpu_to_le32(scb->data_busaddr);
else
scb->cmd.basic_io.sg_addr =
cpu_to_le32(scb->data_busaddr);
}
/* set timeouts */
if (pt->TimeOut) {
scb->timeout = pt->TimeOut;
if (pt->TimeOut <= 10)
scb->dcdb.cmd_attribute |= IPS_TIMEOUT10;
else if (pt->TimeOut <= 60)
scb->dcdb.cmd_attribute |= IPS_TIMEOUT60;
else
scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M;
}
/* assume success */
scb->scsi_cmd->result = DID_OK << 16;
/* success */
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_cleanup_passthru */
/* */
/* Routine Description: */
/* */
/* Cleanup after a passthru command */
/* */
/****************************************************************************/
static void
ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
{
ips_passthru_t *pt;
METHOD_TRACE("ips_cleanup_passthru", 1);
if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) {
DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru",
ips_name, ha->host_num);
return;
}
pt = (ips_passthru_t *) ha->ioctl_data;
/* Copy data back to the user */
if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB) /* Copy DCDB Back to Caller's Area */
memcpy(&pt->CoppCP.dcdb, &scb->dcdb, sizeof (IPS_DCDB_TABLE));
pt->BasicStatus = scb->basic_status;
pt->ExtendedStatus = scb->extended_status;
pt->AdapterType = ha->ad_type;
if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
(scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD ||
scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW))
ips_free_flash_copperhead(ha);
ips_scmd_buf_write(scb->scsi_cmd, ha->ioctl_data, ha->ioctl_datasize);
}
/****************************************************************************/
/* */
/* Routine Name: ips_host_info */
/* */
/* Routine Description: */
/* */
/* The passthru interface for the driver */
/* */
/****************************************************************************/
static int
ips_host_info(ips_ha_t * ha, char *ptr, off_t offset, int len)
{
IPS_INFOSTR info;
METHOD_TRACE("ips_host_info", 1);
info.buffer = ptr;
info.length = len;
info.offset = offset;
info.pos = 0;
info.localpos = 0;
copy_info(&info, "\nIBM ServeRAID General Information:\n\n");
if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) &&
(le16_to_cpu(ha->nvram->adapter_type) != 0))
copy_info(&info, "\tController Type : %s\n",
ips_adapter_name[ha->ad_type - 1]);
else
copy_info(&info,
"\tController Type : Unknown\n");
if (ha->io_addr)
copy_info(&info,
"\tIO region : 0x%lx (%d bytes)\n",
ha->io_addr, ha->io_len);
if (ha->mem_addr) {
copy_info(&info,
"\tMemory region : 0x%lx (%d bytes)\n",
ha->mem_addr, ha->mem_len);
copy_info(&info,
"\tShared memory address : 0x%lx\n",
ha->mem_ptr);
}
copy_info(&info, "\tIRQ number : %d\n", ha->pcidev->irq);
/* For the Next 3 lines Check for Binary 0 at the end and don't include it if it's there. */
/* That keeps everything happy for "text" operations on the proc file. */
if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) {
if (ha->nvram->bios_low[3] == 0) {
copy_info(&info,
"\tBIOS Version : %c%c%c%c%c%c%c\n",
ha->nvram->bios_high[0], ha->nvram->bios_high[1],
ha->nvram->bios_high[2], ha->nvram->bios_high[3],
ha->nvram->bios_low[0], ha->nvram->bios_low[1],
ha->nvram->bios_low[2]);
} else {
copy_info(&info,
"\tBIOS Version : %c%c%c%c%c%c%c%c\n",
ha->nvram->bios_high[0], ha->nvram->bios_high[1],
ha->nvram->bios_high[2], ha->nvram->bios_high[3],
ha->nvram->bios_low[0], ha->nvram->bios_low[1],
ha->nvram->bios_low[2], ha->nvram->bios_low[3]);
}
}
if (ha->enq->CodeBlkVersion[7] == 0) {
copy_info(&info,
"\tFirmware Version : %c%c%c%c%c%c%c\n",
ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
ha->enq->CodeBlkVersion[6]);
} else {
copy_info(&info,
"\tFirmware Version : %c%c%c%c%c%c%c%c\n",
ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1],
ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3],
ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5],
ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]);
}
if (ha->enq->BootBlkVersion[7] == 0) {
copy_info(&info,
"\tBoot Block Version : %c%c%c%c%c%c%c\n",
ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
ha->enq->BootBlkVersion[6]);
} else {
copy_info(&info,
"\tBoot Block Version : %c%c%c%c%c%c%c%c\n",
ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1],
ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3],
ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5],
ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]);
}
copy_info(&info, "\tDriver Version : %s%s\n",
IPS_VERSION_HIGH, IPS_VERSION_LOW);
copy_info(&info, "\tDriver Build : %d\n",
IPS_BUILD_IDENT);
copy_info(&info, "\tMax Physical Devices : %d\n",
ha->enq->ucMaxPhysicalDevices);
copy_info(&info, "\tMax Active Commands : %d\n",
ha->max_cmds);
copy_info(&info, "\tCurrent Queued Commands : %d\n",
ha->scb_waitlist.count);
copy_info(&info, "\tCurrent Active Commands : %d\n",
ha->scb_activelist.count - ha->num_ioctl);
copy_info(&info, "\tCurrent Queued PT Commands : %d\n",
ha->copp_waitlist.count);
copy_info(&info, "\tCurrent Active PT Commands : %d\n",
ha->num_ioctl);
copy_info(&info, "\n");
return (info.localpos);
}
/****************************************************************************/
/* */
/* Routine Name: copy_mem_info */
/* */
/* Routine Description: */
/* */
/* Copy data into an IPS_INFOSTR structure */
/* */
/****************************************************************************/
static void
copy_mem_info(IPS_INFOSTR * info, char *data, int len)
{
METHOD_TRACE("copy_mem_info", 1);
if (info->pos + len < info->offset) {
info->pos += len;
return;
}
if (info->pos < info->offset) {
data += (info->offset - info->pos);
len -= (info->offset - info->pos);
info->pos += (info->offset - info->pos);
}
if (info->localpos + len > info->length)
len = info->length - info->localpos;
if (len > 0) {
memcpy(info->buffer + info->localpos, data, len);
info->pos += len;
info->localpos += len;
}
}
/****************************************************************************/
/* */
/* Routine Name: copy_info */
/* */
/* Routine Description: */
/* */
/* printf style wrapper for an info structure */
/* */
/****************************************************************************/
static int
copy_info(IPS_INFOSTR * info, char *fmt, ...)
{
va_list args;
char buf[128];
int len;
METHOD_TRACE("copy_info", 1);
va_start(args, fmt);
len = vsprintf(buf, fmt, args);
va_end(args);
copy_mem_info(info, buf, len);
return (len);
}
/****************************************************************************/
/* */
/* Routine Name: ips_identify_controller */
/* */
/* Routine Description: */
/* */
/* Identify this controller */
/* */
/****************************************************************************/
static void
ips_identify_controller(ips_ha_t * ha)
{
METHOD_TRACE("ips_identify_controller", 1);
switch (ha->pcidev->device) {
case IPS_DEVICEID_COPPERHEAD:
if (ha->pcidev->revision <= IPS_REVID_SERVERAID) {
ha->ad_type = IPS_ADTYPE_SERVERAID;
} else if (ha->pcidev->revision == IPS_REVID_SERVERAID2) {
ha->ad_type = IPS_ADTYPE_SERVERAID2;
} else if (ha->pcidev->revision == IPS_REVID_NAVAJO) {
ha->ad_type = IPS_ADTYPE_NAVAJO;
} else if ((ha->pcidev->revision == IPS_REVID_SERVERAID2)
&& (ha->slot_num == 0)) {
ha->ad_type = IPS_ADTYPE_KIOWA;
} else if ((ha->pcidev->revision >= IPS_REVID_CLARINETP1) &&
(ha->pcidev->revision <= IPS_REVID_CLARINETP3)) {
if (ha->enq->ucMaxPhysicalDevices == 15)
ha->ad_type = IPS_ADTYPE_SERVERAID3L;
else
ha->ad_type = IPS_ADTYPE_SERVERAID3;
} else if ((ha->pcidev->revision >= IPS_REVID_TROMBONE32) &&
(ha->pcidev->revision <= IPS_REVID_TROMBONE64)) {
ha->ad_type = IPS_ADTYPE_SERVERAID4H;
}
break;
case IPS_DEVICEID_MORPHEUS:
switch (ha->pcidev->subsystem_device) {
case IPS_SUBDEVICEID_4L:
ha->ad_type = IPS_ADTYPE_SERVERAID4L;
break;
case IPS_SUBDEVICEID_4M:
ha->ad_type = IPS_ADTYPE_SERVERAID4M;
break;
case IPS_SUBDEVICEID_4MX:
ha->ad_type = IPS_ADTYPE_SERVERAID4MX;
break;
case IPS_SUBDEVICEID_4LX:
ha->ad_type = IPS_ADTYPE_SERVERAID4LX;
break;
case IPS_SUBDEVICEID_5I2:
ha->ad_type = IPS_ADTYPE_SERVERAID5I2;
break;
case IPS_SUBDEVICEID_5I1:
ha->ad_type = IPS_ADTYPE_SERVERAID5I1;
break;
}
break;
case IPS_DEVICEID_MARCO:
switch (ha->pcidev->subsystem_device) {
case IPS_SUBDEVICEID_6M:
ha->ad_type = IPS_ADTYPE_SERVERAID6M;
break;
case IPS_SUBDEVICEID_6I:
ha->ad_type = IPS_ADTYPE_SERVERAID6I;
break;
case IPS_SUBDEVICEID_7k:
ha->ad_type = IPS_ADTYPE_SERVERAID7k;
break;
case IPS_SUBDEVICEID_7M:
ha->ad_type = IPS_ADTYPE_SERVERAID7M;
break;
}
break;
}
}
/****************************************************************************/
/* */
/* Routine Name: ips_get_bios_version */
/* */
/* Routine Description: */
/* */
/* Get the BIOS revision number */
/* */
/****************************************************************************/
static void
ips_get_bios_version(ips_ha_t * ha, int intr)
{
ips_scb_t *scb;
int ret;
uint8_t major;
uint8_t minor;
uint8_t subminor;
uint8_t *buffer;
char hexDigits[] =
{ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C',
'D', 'E', 'F' };
METHOD_TRACE("ips_get_bios_version", 1);
major = 0;
minor = 0;
strncpy(ha->bios_version, " ?", 8);
if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) {
if (IPS_USE_MEMIO(ha)) {
/* Memory Mapped I/O */
/* test 1st byte */
writel(0, ha->mem_ptr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
return;
writel(1, ha->mem_ptr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
return;
/* Get Major version */
writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
major = readb(ha->mem_ptr + IPS_REG_FLDP);
/* Get Minor version */
writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
minor = readb(ha->mem_ptr + IPS_REG_FLDP);
/* Get SubMinor version */
writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
subminor = readb(ha->mem_ptr + IPS_REG_FLDP);
} else {
/* Programmed I/O */
/* test 1st byte */
outl(0, ha->io_addr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
return;
outl(1, ha->io_addr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
return;
/* Get Major version */
outl(0x1FF, ha->io_addr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
major = inb(ha->io_addr + IPS_REG_FLDP);
/* Get Minor version */
outl(0x1FE, ha->io_addr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
minor = inb(ha->io_addr + IPS_REG_FLDP);
/* Get SubMinor version */
outl(0x1FD, ha->io_addr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
subminor = inb(ha->io_addr + IPS_REG_FLDP);
}
} else {
/* Morpheus Family - Send Command to the card */
buffer = ha->ioctl_data;
memset(buffer, 0, 0x1000);
scb = &ha->scbs[ha->max_cmds - 1];
ips_init_scb(ha, scb);
scb->timeout = ips_cmd_timeout;
scb->cdb[0] = IPS_CMD_RW_BIOSFW;
scb->cmd.flashfw.op_code = IPS_CMD_RW_BIOSFW;
scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.flashfw.type = 1;
scb->cmd.flashfw.direction = 0;
scb->cmd.flashfw.count = cpu_to_le32(0x800);
scb->cmd.flashfw.total_packets = 1;
scb->cmd.flashfw.packet_num = 0;
scb->data_len = 0x1000;
scb->cmd.flashfw.buffer_addr = ha->ioctl_busaddr;
/* issue the command */
if (((ret =
ips_send_wait(ha, scb, ips_cmd_timeout,
intr)) == IPS_FAILURE)
|| (ret == IPS_SUCCESS_IMM)
|| ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
/* Error occurred */
return;
}
if ((buffer[0xC0] == 0x55) && (buffer[0xC1] == 0xAA)) {
major = buffer[0x1ff + 0xC0]; /* Offset 0x1ff after the header (0xc0) */
minor = buffer[0x1fe + 0xC0]; /* Offset 0x1fe after the header (0xc0) */
subminor = buffer[0x1fd + 0xC0]; /* Offset 0x1fd after the header (0xc0) */
} else {
return;
}
}
ha->bios_version[0] = hexDigits[(major & 0xF0) >> 4];
ha->bios_version[1] = '.';
ha->bios_version[2] = hexDigits[major & 0x0F];
ha->bios_version[3] = hexDigits[subminor];
ha->bios_version[4] = '.';
ha->bios_version[5] = hexDigits[(minor & 0xF0) >> 4];
ha->bios_version[6] = hexDigits[minor & 0x0F];
ha->bios_version[7] = 0;
}
/****************************************************************************/
/* */
/* Routine Name: ips_hainit */
/* */
/* Routine Description: */
/* */
/* Initialize the controller */
/* */
/* NOTE: Assumes to be called from with a lock */
/* */
/****************************************************************************/
static int
ips_hainit(ips_ha_t * ha)
{
int i;
struct timeval tv;
METHOD_TRACE("ips_hainit", 1);
if (!ha)
return (0);
if (ha->func.statinit)
(*ha->func.statinit) (ha);
if (ha->func.enableint)
(*ha->func.enableint) (ha);
/* Send FFDC */
ha->reset_count = 1;
do_gettimeofday(&tv);
ha->last_ffdc = tv.tv_sec;
ips_ffdc_reset(ha, IPS_INTR_IORL);
if (!ips_read_config(ha, IPS_INTR_IORL)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"unable to read config from controller.\n");
return (0);
}
/* end if */
if (!ips_read_adapter_status(ha, IPS_INTR_IORL)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"unable to read controller status.\n");
return (0);
}
/* Identify this controller */
ips_identify_controller(ha);
if (!ips_read_subsystem_parameters(ha, IPS_INTR_IORL)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"unable to read subsystem parameters.\n");
return (0);
}
/* write nvram user page 5 */
if (!ips_write_driver_status(ha, IPS_INTR_IORL)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"unable to write driver info to controller.\n");
return (0);
}
/* If there are Logical Drives and a Reset Occurred, then an EraseStripeLock is Needed */
if ((ha->conf->ucLogDriveCount > 0) && (ha->requires_esl == 1))
ips_clear_adapter(ha, IPS_INTR_IORL);
/* set limits on SID, LUN, BUS */
ha->ntargets = IPS_MAX_TARGETS + 1;
ha->nlun = 1;
ha->nbus = (ha->enq->ucMaxPhysicalDevices / IPS_MAX_TARGETS) + 1;
switch (ha->conf->logical_drive[0].ucStripeSize) {
case 4:
ha->max_xfer = 0x10000;
break;
case 5:
ha->max_xfer = 0x20000;
break;
case 6:
ha->max_xfer = 0x40000;
break;
case 7:
default:
ha->max_xfer = 0x80000;
break;
}
/* setup max concurrent commands */
if (le32_to_cpu(ha->subsys->param[4]) & 0x1) {
/* Use the new method */
ha->max_cmds = ha->enq->ucConcurrentCmdCount;
} else {
/* use the old method */
switch (ha->conf->logical_drive[0].ucStripeSize) {
case 4:
ha->max_cmds = 32;
break;
case 5:
ha->max_cmds = 16;
break;
case 6:
ha->max_cmds = 8;
break;
case 7:
default:
ha->max_cmds = 4;
break;
}
}
/* Limit the Active Commands on a Lite Adapter */
if ((ha->ad_type == IPS_ADTYPE_SERVERAID3L) ||
(ha->ad_type == IPS_ADTYPE_SERVERAID4L) ||
(ha->ad_type == IPS_ADTYPE_SERVERAID4LX)) {
if ((ha->max_cmds > MaxLiteCmds) && (MaxLiteCmds))
ha->max_cmds = MaxLiteCmds;
}
/* set controller IDs */
ha->ha_id[0] = IPS_ADAPTER_ID;
for (i = 1; i < ha->nbus; i++) {
ha->ha_id[i] = ha->conf->init_id[i - 1] & 0x1f;
ha->dcdb_active[i - 1] = 0;
}
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_next */
/* */
/* Routine Description: */
/* */
/* Take the next command off the queue and send it to the controller */
/* */
/****************************************************************************/
static void
ips_next(ips_ha_t * ha, int intr)
{
ips_scb_t *scb;
struct scsi_cmnd *SC;
struct scsi_cmnd *p;
struct scsi_cmnd *q;
ips_copp_wait_item_t *item;
int ret;
struct Scsi_Host *host;
METHOD_TRACE("ips_next", 1);
if (!ha)
return;
host = ips_sh[ha->host_num];
/*
* Block access to the queue function so
* this command won't time out
*/
if (intr == IPS_INTR_ON)
spin_lock(host->host_lock);
if ((ha->subsys->param[3] & 0x300000)
&& (ha->scb_activelist.count == 0)) {
struct timeval tv;
do_gettimeofday(&tv);
if (tv.tv_sec - ha->last_ffdc > IPS_SECS_8HOURS) {
ha->last_ffdc = tv.tv_sec;
ips_ffdc_time(ha);
}
}
/*
* Send passthru commands
* These have priority over normal I/O
* but shouldn't affect performance too much
* since we limit the number that can be active
* on the card at any one time
*/
while ((ha->num_ioctl < IPS_MAX_IOCTL) &&
(ha->copp_waitlist.head) && (scb = ips_getscb(ha))) {
item = ips_removeq_copp_head(&ha->copp_waitlist);
ha->num_ioctl++;
if (intr == IPS_INTR_ON)
spin_unlock(host->host_lock);
scb->scsi_cmd = item->scsi_cmd;
kfree(item);
ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr);
if (intr == IPS_INTR_ON)
spin_lock(host->host_lock);
switch (ret) {
case IPS_FAILURE:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
break;
case IPS_SUCCESS_IMM:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_OK << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
break;
default:
break;
} /* end case */
if (ret != IPS_SUCCESS) {
ha->num_ioctl--;
continue;
}
ret = ips_send_cmd(ha, scb);
if (ret == IPS_SUCCESS)
ips_putq_scb_head(&ha->scb_activelist, scb);
else
ha->num_ioctl--;
switch (ret) {
case IPS_FAILURE:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
}
ips_freescb(ha, scb);
break;
case IPS_SUCCESS_IMM:
ips_freescb(ha, scb);
break;
default:
break;
} /* end case */
}
/*
* Send "Normal" I/O commands
*/
p = ha->scb_waitlist.head;
while ((p) && (scb = ips_getscb(ha))) {
if ((scmd_channel(p) > 0)
&& (ha->
dcdb_active[scmd_channel(p) -
1] & (1 << scmd_id(p)))) {
ips_freescb(ha, scb);
p = (struct scsi_cmnd *) p->host_scribble;
continue;
}
q = p;
SC = ips_removeq_wait(&ha->scb_waitlist, q);
if (intr == IPS_INTR_ON)
spin_unlock(host->host_lock); /* Unlock HA after command is taken off queue */
SC->result = DID_OK;
SC->host_scribble = NULL;
scb->target_id = SC->device->id;
scb->lun = SC->device->lun;
scb->bus = SC->device->channel;
scb->scsi_cmd = SC;
scb->breakup = 0;
scb->data_len = 0;
scb->callback = ipsintr_done;
scb->timeout = ips_cmd_timeout;
memset(&scb->cmd, 0, 16);
/* copy in the CDB */
memcpy(scb->cdb, SC->cmnd, SC->cmd_len);
scb->sg_count = scsi_dma_map(SC);
BUG_ON(scb->sg_count < 0);
if (scb->sg_count) {
struct scatterlist *sg;
int i;
scb->flags |= IPS_SCB_MAP_SG;
scsi_for_each_sg(SC, sg, scb->sg_count, i) {
if (ips_fill_scb_sg_single
(ha, sg_dma_address(sg), scb, i,
sg_dma_len(sg)) < 0)
break;
}
scb->dcdb.transfer_length = scb->data_len;
} else {
scb->data_busaddr = 0L;
scb->sg_len = 0;
scb->data_len = 0;
scb->dcdb.transfer_length = 0;
}
scb->dcdb.cmd_attribute =
ips_command_direction[scb->scsi_cmd->cmnd[0]];
/* Allow a WRITE BUFFER Command to Have no Data */
/* This is Used by Tape Flash Utilites */
if ((scb->scsi_cmd->cmnd[0] == WRITE_BUFFER) &&
(scb->data_len == 0))
scb->dcdb.cmd_attribute = 0;
if (!(scb->dcdb.cmd_attribute & 0x3))
scb->dcdb.transfer_length = 0;
if (scb->data_len >= IPS_MAX_XFER) {
scb->dcdb.cmd_attribute |= IPS_TRANSFER64K;
scb->dcdb.transfer_length = 0;
}
if (intr == IPS_INTR_ON)
spin_lock(host->host_lock);
ret = ips_send_cmd(ha, scb);
switch (ret) {
case IPS_SUCCESS:
ips_putq_scb_head(&ha->scb_activelist, scb);
break;
case IPS_FAILURE:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
}
if (scb->bus)
ha->dcdb_active[scb->bus - 1] &=
~(1 << scb->target_id);
ips_freescb(ha, scb);
break;
case IPS_SUCCESS_IMM:
if (scb->scsi_cmd)
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
if (scb->bus)
ha->dcdb_active[scb->bus - 1] &=
~(1 << scb->target_id);
ips_freescb(ha, scb);
break;
default:
break;
} /* end case */
p = (struct scsi_cmnd *) p->host_scribble;
} /* end while */
if (intr == IPS_INTR_ON)
spin_unlock(host->host_lock);
}
/****************************************************************************/
/* */
/* Routine Name: ips_putq_scb_head */
/* */
/* Routine Description: */
/* */
/* Add an item to the head of the queue */
/* */
/* ASSUMED to be called from within the HA lock */
/* */
/****************************************************************************/
static void
ips_putq_scb_head(ips_scb_queue_t * queue, ips_scb_t * item)
{
METHOD_TRACE("ips_putq_scb_head", 1);
if (!item)
return;
item->q_next = queue->head;
queue->head = item;
if (!queue->tail)
queue->tail = item;
queue->count++;
}
/****************************************************************************/
/* */
/* Routine Name: ips_removeq_scb_head */
/* */
/* Routine Description: */
/* */
/* Remove the head of the queue */
/* */
/* ASSUMED to be called from within the HA lock */
/* */
/****************************************************************************/
static ips_scb_t *
ips_removeq_scb_head(ips_scb_queue_t * queue)
{
ips_scb_t *item;
METHOD_TRACE("ips_removeq_scb_head", 1);
item = queue->head;
if (!item) {
return (NULL);
}
queue->head = item->q_next;
item->q_next = NULL;
if (queue->tail == item)
queue->tail = NULL;
queue->count--;
return (item);
}
/****************************************************************************/
/* */
/* Routine Name: ips_removeq_scb */
/* */
/* Routine Description: */
/* */
/* Remove an item from a queue */
/* */
/* ASSUMED to be called from within the HA lock */
/* */
/****************************************************************************/
static ips_scb_t *
ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item)
{
ips_scb_t *p;
METHOD_TRACE("ips_removeq_scb", 1);
if (!item)
return (NULL);
if (item == queue->head) {
return (ips_removeq_scb_head(queue));
}
p = queue->head;
while ((p) && (item != p->q_next))
p = p->q_next;
if (p) {
/* found a match */
p->q_next = item->q_next;
if (!item->q_next)
queue->tail = p;
item->q_next = NULL;
queue->count--;
return (item);
}
return (NULL);
}
/****************************************************************************/
/* */
/* Routine Name: ips_putq_wait_tail */
/* */
/* Routine Description: */
/* */
/* Add an item to the tail of the queue */
/* */
/* ASSUMED to be called from within the HA lock */
/* */
/****************************************************************************/
static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item)
{
METHOD_TRACE("ips_putq_wait_tail", 1);
if (!item)
return;
item->host_scribble = NULL;
if (queue->tail)
queue->tail->host_scribble = (char *) item;
queue->tail = item;
if (!queue->head)
queue->head = item;
queue->count++;
}
/****************************************************************************/
/* */
/* Routine Name: ips_removeq_wait_head */
/* */
/* Routine Description: */
/* */
/* Remove the head of the queue */
/* */
/* ASSUMED to be called from within the HA lock */
/* */
/****************************************************************************/
static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue)
{
struct scsi_cmnd *item;
METHOD_TRACE("ips_removeq_wait_head", 1);
item = queue->head;
if (!item) {
return (NULL);
}
queue->head = (struct scsi_cmnd *) item->host_scribble;
item->host_scribble = NULL;
if (queue->tail == item)
queue->tail = NULL;
queue->count--;
return (item);
}
/****************************************************************************/
/* */
/* Routine Name: ips_removeq_wait */
/* */
/* Routine Description: */
/* */
/* Remove an item from a queue */
/* */
/* ASSUMED to be called from within the HA lock */
/* */
/****************************************************************************/
static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *queue,
struct scsi_cmnd *item)
{
struct scsi_cmnd *p;
METHOD_TRACE("ips_removeq_wait", 1);
if (!item)
return (NULL);
if (item == queue->head) {
return (ips_removeq_wait_head(queue));
}
p = queue->head;
while ((p) && (item != (struct scsi_cmnd *) p->host_scribble))
p = (struct scsi_cmnd *) p->host_scribble;
if (p) {
/* found a match */
p->host_scribble = item->host_scribble;
if (!item->host_scribble)
queue->tail = p;
item->host_scribble = NULL;
queue->count--;
return (item);
}
return (NULL);
}
/****************************************************************************/
/* */
/* Routine Name: ips_putq_copp_tail */
/* */
/* Routine Description: */
/* */
/* Add an item to the tail of the queue */
/* */
/* ASSUMED to be called from within the HA lock */
/* */
/****************************************************************************/
static void
ips_putq_copp_tail(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
{
METHOD_TRACE("ips_putq_copp_tail", 1);
if (!item)
return;
item->next = NULL;
if (queue->tail)
queue->tail->next = item;
queue->tail = item;
if (!queue->head)
queue->head = item;
queue->count++;
}
/****************************************************************************/
/* */
/* Routine Name: ips_removeq_copp_head */
/* */
/* Routine Description: */
/* */
/* Remove the head of the queue */
/* */
/* ASSUMED to be called from within the HA lock */
/* */
/****************************************************************************/
static ips_copp_wait_item_t *
ips_removeq_copp_head(ips_copp_queue_t * queue)
{
ips_copp_wait_item_t *item;
METHOD_TRACE("ips_removeq_copp_head", 1);
item = queue->head;
if (!item) {
return (NULL);
}
queue->head = item->next;
item->next = NULL;
if (queue->tail == item)
queue->tail = NULL;
queue->count--;
return (item);
}
/****************************************************************************/
/* */
/* Routine Name: ips_removeq_copp */
/* */
/* Routine Description: */
/* */
/* Remove an item from a queue */
/* */
/* ASSUMED to be called from within the HA lock */
/* */
/****************************************************************************/
static ips_copp_wait_item_t *
ips_removeq_copp(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
{
ips_copp_wait_item_t *p;
METHOD_TRACE("ips_removeq_copp", 1);
if (!item)
return (NULL);
if (item == queue->head) {
return (ips_removeq_copp_head(queue));
}
p = queue->head;
while ((p) && (item != p->next))
p = p->next;
if (p) {
/* found a match */
p->next = item->next;
if (!item->next)
queue->tail = p;
item->next = NULL;
queue->count--;
return (item);
}
return (NULL);
}
/****************************************************************************/
/* */
/* Routine Name: ipsintr_blocking */
/* */
/* Routine Description: */
/* */
/* Finalize an interrupt for internal commands */
/* */
/****************************************************************************/
static void
ipsintr_blocking(ips_ha_t * ha, ips_scb_t * scb)
{
METHOD_TRACE("ipsintr_blocking", 2);
ips_freescb(ha, scb);
if ((ha->waitflag == TRUE) && (ha->cmd_in_progress == scb->cdb[0])) {
ha->waitflag = FALSE;
return;
}
}
/****************************************************************************/
/* */
/* Routine Name: ipsintr_done */
/* */
/* Routine Description: */
/* */
/* Finalize an interrupt for non-internal commands */
/* */
/****************************************************************************/
static void
ipsintr_done(ips_ha_t * ha, ips_scb_t * scb)
{
METHOD_TRACE("ipsintr_done", 2);
if (!scb) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Spurious interrupt; scb NULL.\n");
return;
}
if (scb->scsi_cmd == NULL) {
/* unexpected interrupt */
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Spurious interrupt; scsi_cmd not set.\n");
return;
}
ips_done(ha, scb);
}
/****************************************************************************/
/* */
/* Routine Name: ips_done */
/* */
/* Routine Description: */
/* */
/* Do housekeeping on completed commands */
/* ASSUMED to be called form within the request lock */
/****************************************************************************/
static void
ips_done(ips_ha_t * ha, ips_scb_t * scb)
{
int ret;
METHOD_TRACE("ips_done", 1);
if (!scb)
return;
if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd))) {
ips_cleanup_passthru(ha, scb);
ha->num_ioctl--;
} else {
/*
* Check to see if this command had too much
* data and had to be broke up. If so, queue
* the rest of the data and continue.
*/
if ((scb->breakup) || (scb->sg_break)) {
struct scatterlist *sg;
int i, sg_dma_index, ips_sg_index = 0;
/* we had a data breakup */
scb->data_len = 0;
sg = scsi_sglist(scb->scsi_cmd);
/* Spin forward to last dma chunk */
sg_dma_index = scb->breakup;
for (i = 0; i < scb->breakup; i++)
sg = sg_next(sg);
/* Take care of possible partial on last chunk */
ips_fill_scb_sg_single(ha,
sg_dma_address(sg),
scb, ips_sg_index++,
sg_dma_len(sg));
for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
sg_dma_index++, sg = sg_next(sg)) {
if (ips_fill_scb_sg_single
(ha,
sg_dma_address(sg),
scb, ips_sg_index++,
sg_dma_len(sg)) < 0)
break;
}
scb->dcdb.transfer_length = scb->data_len;
scb->dcdb.cmd_attribute |=
ips_command_direction[scb->scsi_cmd->cmnd[0]];
if (!(scb->dcdb.cmd_attribute & 0x3))
scb->dcdb.transfer_length = 0;
if (scb->data_len >= IPS_MAX_XFER) {
scb->dcdb.cmd_attribute |= IPS_TRANSFER64K;
scb->dcdb.transfer_length = 0;
}
ret = ips_send_cmd(ha, scb);
switch (ret) {
case IPS_FAILURE:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
break;
case IPS_SUCCESS_IMM:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
break;
default:
break;
} /* end case */
return;
}
} /* end if passthru */
if (scb->bus) {
ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id);
}
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}
/****************************************************************************/
/* */
/* Routine Name: ips_map_status */
/* */
/* Routine Description: */
/* */
/* Map Controller Error codes to Linux Error Codes */
/* */
/****************************************************************************/
static int
ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
{
int errcode;
int device_error;
uint32_t transfer_len;
IPS_DCDB_TABLE_TAPE *tapeDCDB;
IPS_SCSI_INQ_DATA inquiryData;
METHOD_TRACE("ips_map_status", 1);
if (scb->bus) {
DEBUG_VAR(2,
"(%s%d) Physical device error (%d %d %d): %x %x, Sense Key: %x, ASC: %x, ASCQ: %x",
ips_name, ha->host_num,
scb->scsi_cmd->device->channel,
scb->scsi_cmd->device->id, scb->scsi_cmd->device->lun,
scb->basic_status, scb->extended_status,
scb->extended_status ==
IPS_ERR_CKCOND ? scb->dcdb.sense_info[2] & 0xf : 0,
scb->extended_status ==
IPS_ERR_CKCOND ? scb->dcdb.sense_info[12] : 0,
scb->extended_status ==
IPS_ERR_CKCOND ? scb->dcdb.sense_info[13] : 0);
}
/* default driver error */
errcode = DID_ERROR;
device_error = 0;
switch (scb->basic_status & IPS_GSC_STATUS_MASK) {
case IPS_CMD_TIMEOUT:
errcode = DID_TIME_OUT;
break;
case IPS_INVAL_OPCO:
case IPS_INVAL_CMD_BLK:
case IPS_INVAL_PARM_BLK:
case IPS_LD_ERROR:
case IPS_CMD_CMPLT_WERROR:
break;
case IPS_PHYS_DRV_ERROR:
switch (scb->extended_status) {
case IPS_ERR_SEL_TO:
if (scb->bus)
errcode = DID_NO_CONNECT;
break;
case IPS_ERR_OU_RUN:
if ((scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB) ||
(scb->cmd.dcdb.op_code ==
IPS_CMD_EXTENDED_DCDB_SG)) {
tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
transfer_len = tapeDCDB->transfer_length;
} else {
transfer_len =
(uint32_t) scb->dcdb.transfer_length;
}
if ((scb->bus) && (transfer_len < scb->data_len)) {
/* Underrun - set default to no error */
errcode = DID_OK;
/* Restrict access to physical DASD */
if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
ips_scmd_buf_read(scb->scsi_cmd,
&inquiryData, sizeof (inquiryData));
if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) {
errcode = DID_TIME_OUT;
break;
}
}
} else
errcode = DID_ERROR;
break;
case IPS_ERR_RECOVERY:
/* don't fail recovered errors */
if (scb->bus)
errcode = DID_OK;
break;
case IPS_ERR_HOST_RESET:
case IPS_ERR_DEV_RESET:
errcode = DID_RESET;
break;
case IPS_ERR_CKCOND:
if (scb->bus) {
if ((scb->cmd.dcdb.op_code ==
IPS_CMD_EXTENDED_DCDB)
|| (scb->cmd.dcdb.op_code ==
IPS_CMD_EXTENDED_DCDB_SG)) {
tapeDCDB =
(IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
memcpy(scb->scsi_cmd->sense_buffer,
tapeDCDB->sense_info,
SCSI_SENSE_BUFFERSIZE);
} else {
memcpy(scb->scsi_cmd->sense_buffer,
scb->dcdb.sense_info,
SCSI_SENSE_BUFFERSIZE);
}
device_error = 2; /* check condition */
}
errcode = DID_OK;
break;
default:
errcode = DID_ERROR;
break;
} /* end switch */
} /* end switch */
scb->scsi_cmd->result = device_error | (errcode << 16);
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_send_wait */
/* */
/* Routine Description: */
/* */
/* Send a command to the controller and wait for it to return */
/* */
/* The FFDC Time Stamp use this function for the callback, but doesn't */
/* actually need to wait. */
/****************************************************************************/
static int
ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
{
int ret;
METHOD_TRACE("ips_send_wait", 1);
if (intr != IPS_FFDC) { /* Won't be Waiting if this is a Time Stamp */
ha->waitflag = TRUE;
ha->cmd_in_progress = scb->cdb[0];
}
scb->callback = ipsintr_blocking;
ret = ips_send_cmd(ha, scb);
if ((ret == IPS_FAILURE) || (ret == IPS_SUCCESS_IMM))
return (ret);
if (intr != IPS_FFDC) /* Don't Wait around if this is a Time Stamp */
ret = ips_wait(ha, timeout, intr);
return (ret);
}
/****************************************************************************/
/* */
/* Routine Name: ips_scmd_buf_write */
/* */
/* Routine Description: */
/* Write data to struct scsi_cmnd request_buffer at proper offsets */
/****************************************************************************/
static void
ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
{
unsigned long flags;
local_irq_save(flags);
scsi_sg_copy_from_buffer(scmd, data, count);
local_irq_restore(flags);
}
/****************************************************************************/
/* */
/* Routine Name: ips_scmd_buf_read */
/* */
/* Routine Description: */
/* Copy data from a struct scsi_cmnd to a new, linear buffer */
/****************************************************************************/
static void
ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
{
unsigned long flags;
local_irq_save(flags);
scsi_sg_copy_to_buffer(scmd, data, count);
local_irq_restore(flags);
}
/****************************************************************************/
/* */
/* Routine Name: ips_send_cmd */
/* */
/* Routine Description: */
/* */
/* Map SCSI commands to ServeRAID commands for logical drives */
/* */
/****************************************************************************/
static int
ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
{
int ret;
char *sp;
int device_error;
IPS_DCDB_TABLE_TAPE *tapeDCDB;
int TimeOut;
METHOD_TRACE("ips_send_cmd", 1);
ret = IPS_SUCCESS;
if (!scb->scsi_cmd) {
/* internal command */
if (scb->bus > 0) {
/* Controller commands can't be issued */
/* to real devices -- fail them */
if ((ha->waitflag == TRUE) &&
(ha->cmd_in_progress == scb->cdb[0])) {
ha->waitflag = FALSE;
}
return (1);
}
} else if ((scb->bus == 0) && (!ips_is_passthru(scb->scsi_cmd))) {
/* command to logical bus -- interpret */
ret = IPS_SUCCESS_IMM;
switch (scb->scsi_cmd->cmnd[0]) {
case ALLOW_MEDIUM_REMOVAL:
case REZERO_UNIT:
case ERASE:
case WRITE_FILEMARKS:
case SPACE:
scb->scsi_cmd->result = DID_ERROR << 16;
break;
case START_STOP:
scb->scsi_cmd->result = DID_OK << 16;
case TEST_UNIT_READY:
case INQUIRY:
if (scb->target_id == IPS_ADAPTER_ID) {
/*
* Either we have a TUR
* or we have a SCSI inquiry
*/
if (scb->scsi_cmd->cmnd[0] == TEST_UNIT_READY)
scb->scsi_cmd->result = DID_OK << 16;
if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
IPS_SCSI_INQ_DATA inquiry;
memset(&inquiry, 0,
sizeof (IPS_SCSI_INQ_DATA));
inquiry.DeviceType =
IPS_SCSI_INQ_TYPE_PROCESSOR;
inquiry.DeviceTypeQualifier =
IPS_SCSI_INQ_LU_CONNECTED;
inquiry.Version = IPS_SCSI_INQ_REV2;
inquiry.ResponseDataFormat =
IPS_SCSI_INQ_RD_REV2;
inquiry.AdditionalLength = 31;
inquiry.Flags[0] =
IPS_SCSI_INQ_Address16;
inquiry.Flags[1] =
IPS_SCSI_INQ_WBus16 |
IPS_SCSI_INQ_Sync;
strncpy(inquiry.VendorId, "IBM ",
8);
strncpy(inquiry.ProductId,
"SERVERAID ", 16);
strncpy(inquiry.ProductRevisionLevel,
"1.00", 4);
ips_scmd_buf_write(scb->scsi_cmd,
&inquiry,
sizeof (inquiry));
scb->scsi_cmd->result = DID_OK << 16;
}
} else {
scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO;
scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.logical_info.reserved = 0;
scb->cmd.logical_info.reserved2 = 0;
scb->data_len = sizeof (IPS_LD_INFO);
scb->data_busaddr = ha->logical_drive_info_dma_addr;
scb->flags = 0;
scb->cmd.logical_info.buffer_addr = scb->data_busaddr;
ret = IPS_SUCCESS;
}
break;
case REQUEST_SENSE:
ips_reqsen(ha, scb);
scb->scsi_cmd->result = DID_OK << 16;
break;
case READ_6:
case WRITE_6:
if (!scb->sg_len) {
scb->cmd.basic_io.op_code =
(scb->scsi_cmd->cmnd[0] ==
READ_6) ? IPS_CMD_READ : IPS_CMD_WRITE;
scb->cmd.basic_io.enhanced_sg = 0;
scb->cmd.basic_io.sg_addr =
cpu_to_le32(scb->data_busaddr);
} else {
scb->cmd.basic_io.op_code =
(scb->scsi_cmd->cmnd[0] ==
READ_6) ? IPS_CMD_READ_SG :
IPS_CMD_WRITE_SG;
scb->cmd.basic_io.enhanced_sg =
IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
scb->cmd.basic_io.sg_addr =
cpu_to_le32(scb->sg_busaddr);
}
scb->cmd.basic_io.segment_4G = 0;
scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.basic_io.log_drv = scb->target_id;
scb->cmd.basic_io.sg_count = scb->sg_len;
if (scb->cmd.basic_io.lba)
le32_add_cpu(&scb->cmd.basic_io.lba,
le16_to_cpu(scb->cmd.basic_io.
sector_count));
else
scb->cmd.basic_io.lba =
(((scb->scsi_cmd->
cmnd[1] & 0x1f) << 16) | (scb->scsi_cmd->
cmnd[2] << 8) |
(scb->scsi_cmd->cmnd[3]));
scb->cmd.basic_io.sector_count =
cpu_to_le16(scb->data_len / IPS_BLKSIZE);
if (le16_to_cpu(scb->cmd.basic_io.sector_count) == 0)
scb->cmd.basic_io.sector_count =
cpu_to_le16(256);
ret = IPS_SUCCESS;
break;
case READ_10:
case WRITE_10:
if (!scb->sg_len) {
scb->cmd.basic_io.op_code =
(scb->scsi_cmd->cmnd[0] ==
READ_10) ? IPS_CMD_READ : IPS_CMD_WRITE;
scb->cmd.basic_io.enhanced_sg = 0;
scb->cmd.basic_io.sg_addr =
cpu_to_le32(scb->data_busaddr);
} else {
scb->cmd.basic_io.op_code =
(scb->scsi_cmd->cmnd[0] ==
READ_10) ? IPS_CMD_READ_SG :
IPS_CMD_WRITE_SG;
scb->cmd.basic_io.enhanced_sg =
IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
scb->cmd.basic_io.sg_addr =
cpu_to_le32(scb->sg_busaddr);
}
scb->cmd.basic_io.segment_4G = 0;
scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.basic_io.log_drv = scb->target_id;
scb->cmd.basic_io.sg_count = scb->sg_len;
if (scb->cmd.basic_io.lba)
le32_add_cpu(&scb->cmd.basic_io.lba,
le16_to_cpu(scb->cmd.basic_io.
sector_count));
else
scb->cmd.basic_io.lba =
((scb->scsi_cmd->cmnd[2] << 24) | (scb->
scsi_cmd->
cmnd[3]
<< 16) |
(scb->scsi_cmd->cmnd[4] << 8) | scb->
scsi_cmd->cmnd[5]);
scb->cmd.basic_io.sector_count =
cpu_to_le16(scb->data_len / IPS_BLKSIZE);
if (cpu_to_le16(scb->cmd.basic_io.sector_count) == 0) {
/*
* This is a null condition
* we don't have to do anything
* so just return
*/
scb->scsi_cmd->result = DID_OK << 16;
} else
ret = IPS_SUCCESS;
break;
case RESERVE:
case RELEASE:
scb->scsi_cmd->result = DID_OK << 16;
break;
case MODE_SENSE:
scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY;
scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.basic_io.segment_4G = 0;
scb->cmd.basic_io.enhanced_sg = 0;
scb->data_len = sizeof (*ha->enq);
scb->cmd.basic_io.sg_addr = ha->enq_busaddr;
ret = IPS_SUCCESS;
break;
case READ_CAPACITY:
scb->cmd.logical_info.op_code = IPS_CMD_GET_LD_INFO;
scb->cmd.logical_info.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.logical_info.reserved = 0;
scb->cmd.logical_info.reserved2 = 0;
scb->cmd.logical_info.reserved3 = 0;
scb->data_len = sizeof (IPS_LD_INFO);
scb->data_busaddr = ha->logical_drive_info_dma_addr;
scb->flags = 0;
scb->cmd.logical_info.buffer_addr = scb->data_busaddr;
ret = IPS_SUCCESS;
break;
case SEND_DIAGNOSTIC:
case REASSIGN_BLOCKS:
case FORMAT_UNIT:
case SEEK_10:
case VERIFY:
case READ_DEFECT_DATA:
case READ_BUFFER:
case WRITE_BUFFER:
scb->scsi_cmd->result = DID_OK << 16;
break;
default:
/* Set the Return Info to appear like the Command was */
/* attempted, a Check Condition occurred, and Sense */
/* Data indicating an Invalid CDB OpCode is returned. */
sp = (char *) scb->scsi_cmd->sense_buffer;
sp[0] = 0x70; /* Error Code */
sp[2] = ILLEGAL_REQUEST; /* Sense Key 5 Illegal Req. */
sp[7] = 0x0A; /* Additional Sense Length */
sp[12] = 0x20; /* ASC = Invalid OpCode */
sp[13] = 0x00; /* ASCQ */
device_error = 2; /* Indicate Check Condition */
scb->scsi_cmd->result = device_error | (DID_OK << 16);
break;
} /* end switch */
}
/* end if */
if (ret == IPS_SUCCESS_IMM)
return (ret);
/* setup DCDB */
if (scb->bus > 0) {
/* If we already know the Device is Not there, no need to attempt a Command */
/* This also protects an NT FailOver Controller from getting CDB's sent to it */
if (ha->conf->dev[scb->bus - 1][scb->target_id].ucState == 0) {
scb->scsi_cmd->result = DID_NO_CONNECT << 16;
return (IPS_SUCCESS_IMM);
}
ha->dcdb_active[scb->bus - 1] |= (1 << scb->target_id);
scb->cmd.dcdb.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.dcdb.dcdb_address = cpu_to_le32(scb->scb_busaddr +
(unsigned long) &scb->
dcdb -
(unsigned long) scb);
scb->cmd.dcdb.reserved = 0;
scb->cmd.dcdb.reserved2 = 0;
scb->cmd.dcdb.reserved3 = 0;
scb->cmd.dcdb.segment_4G = 0;
scb->cmd.dcdb.enhanced_sg = 0;
TimeOut = scb->scsi_cmd->request->timeout;
if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
if (!scb->sg_len) {
scb->cmd.dcdb.op_code = IPS_CMD_EXTENDED_DCDB;
} else {
scb->cmd.dcdb.op_code =
IPS_CMD_EXTENDED_DCDB_SG;
scb->cmd.dcdb.enhanced_sg =
IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
}
tapeDCDB = (IPS_DCDB_TABLE_TAPE *) & scb->dcdb; /* Use Same Data Area as Old DCDB Struct */
tapeDCDB->device_address =
((scb->bus - 1) << 4) | scb->target_id;
tapeDCDB->cmd_attribute |= IPS_DISCONNECT_ALLOWED;
tapeDCDB->cmd_attribute &= ~IPS_TRANSFER64K; /* Always Turn OFF 64K Size Flag */
if (TimeOut) {
if (TimeOut < (10 * HZ))
tapeDCDB->cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */
else if (TimeOut < (60 * HZ))
tapeDCDB->cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */
else if (TimeOut < (1200 * HZ))
tapeDCDB->cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */
}
tapeDCDB->cdb_length = scb->scsi_cmd->cmd_len;
tapeDCDB->reserved_for_LUN = 0;
tapeDCDB->transfer_length = scb->data_len;
if (scb->cmd.dcdb.op_code == IPS_CMD_EXTENDED_DCDB_SG)
tapeDCDB->buffer_pointer =
cpu_to_le32(scb->sg_busaddr);
else
tapeDCDB->buffer_pointer =
cpu_to_le32(scb->data_busaddr);
tapeDCDB->sg_count = scb->sg_len;
tapeDCDB->sense_length = sizeof (tapeDCDB->sense_info);
tapeDCDB->scsi_status = 0;
tapeDCDB->reserved = 0;
memcpy(tapeDCDB->scsi_cdb, scb->scsi_cmd->cmnd,
scb->scsi_cmd->cmd_len);
} else {
if (!scb->sg_len) {
scb->cmd.dcdb.op_code = IPS_CMD_DCDB;
} else {
scb->cmd.dcdb.op_code = IPS_CMD_DCDB_SG;
scb->cmd.dcdb.enhanced_sg =
IPS_USE_ENH_SGLIST(ha) ? 0xFF : 0;
}
scb->dcdb.device_address =
((scb->bus - 1) << 4) | scb->target_id;
scb->dcdb.cmd_attribute |= IPS_DISCONNECT_ALLOWED;
if (TimeOut) {
if (TimeOut < (10 * HZ))
scb->dcdb.cmd_attribute |= IPS_TIMEOUT10; /* TimeOut is 10 Seconds */
else if (TimeOut < (60 * HZ))
scb->dcdb.cmd_attribute |= IPS_TIMEOUT60; /* TimeOut is 60 Seconds */
else if (TimeOut < (1200 * HZ))
scb->dcdb.cmd_attribute |= IPS_TIMEOUT20M; /* TimeOut is 20 Minutes */
}
scb->dcdb.transfer_length = scb->data_len;
if (scb->dcdb.cmd_attribute & IPS_TRANSFER64K)
scb->dcdb.transfer_length = 0;
if (scb->cmd.dcdb.op_code == IPS_CMD_DCDB_SG)
scb->dcdb.buffer_pointer =
cpu_to_le32(scb->sg_busaddr);
else
scb->dcdb.buffer_pointer =
cpu_to_le32(scb->data_busaddr);
scb->dcdb.cdb_length = scb->scsi_cmd->cmd_len;
scb->dcdb.sense_length = sizeof (scb->dcdb.sense_info);
scb->dcdb.sg_count = scb->sg_len;
scb->dcdb.reserved = 0;
memcpy(scb->dcdb.scsi_cdb, scb->scsi_cmd->cmnd,
scb->scsi_cmd->cmd_len);
scb->dcdb.scsi_status = 0;
scb->dcdb.reserved2[0] = 0;
scb->dcdb.reserved2[1] = 0;
scb->dcdb.reserved2[2] = 0;
}
}
return ((*ha->func.issue) (ha, scb));
}
/****************************************************************************/
/* */
/* Routine Name: ips_chk_status */
/* */
/* Routine Description: */
/* */
/* Check the status of commands to logical drives */
/* Assumed to be called with the HA lock */
/****************************************************************************/
static void
ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
{
ips_scb_t *scb;
ips_stat_t *sp;
uint8_t basic_status;
uint8_t ext_status;
int errcode;
IPS_SCSI_INQ_DATA inquiryData;
METHOD_TRACE("ips_chkstatus", 1);
scb = &ha->scbs[pstatus->fields.command_id];
scb->basic_status = basic_status =
pstatus->fields.basic_status & IPS_BASIC_STATUS_MASK;
scb->extended_status = ext_status = pstatus->fields.extended_status;
sp = &ha->sp;
sp->residue_len = 0;
sp->scb_addr = (void *) scb;
/* Remove the item from the active queue */
ips_removeq_scb(&ha->scb_activelist, scb);
if (!scb->scsi_cmd)
/* internal commands are handled in do_ipsintr */
return;
DEBUG_VAR(2, "(%s%d) ips_chkstatus: cmd 0x%X id %d (%d %d %d)",
ips_name,
ha->host_num,
scb->cdb[0],
scb->cmd.basic_io.command_id,
scb->bus, scb->target_id, scb->lun);
if ((scb->scsi_cmd) && (ips_is_passthru(scb->scsi_cmd)))
/* passthru - just returns the raw result */
return;
errcode = DID_OK;
if (((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_SUCCESS) ||
((basic_status & IPS_GSC_STATUS_MASK) == IPS_CMD_RECOVERED_ERROR)) {
if (scb->bus == 0) {
if ((basic_status & IPS_GSC_STATUS_MASK) ==
IPS_CMD_RECOVERED_ERROR) {
DEBUG_VAR(1,
"(%s%d) Recovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x",
ips_name, ha->host_num,
scb->cmd.basic_io.op_code,
basic_status, ext_status);
}
switch (scb->scsi_cmd->cmnd[0]) {
case ALLOW_MEDIUM_REMOVAL:
case REZERO_UNIT:
case ERASE:
case WRITE_FILEMARKS:
case SPACE:
errcode = DID_ERROR;
break;
case START_STOP:
break;
case TEST_UNIT_READY:
if (!ips_online(ha, scb)) {
errcode = DID_TIME_OUT;
}
break;
case INQUIRY:
if (ips_online(ha, scb)) {
ips_inquiry(ha, scb);
} else {
errcode = DID_TIME_OUT;
}
break;
case REQUEST_SENSE:
ips_reqsen(ha, scb);
break;
case READ_6:
case WRITE_6:
case READ_10:
case WRITE_10:
case RESERVE:
case RELEASE:
break;
case MODE_SENSE:
if (!ips_online(ha, scb)
|| !ips_msense(ha, scb)) {
errcode = DID_ERROR;
}
break;
case READ_CAPACITY:
if (ips_online(ha, scb))
ips_rdcap(ha, scb);
else {
errcode = DID_TIME_OUT;
}
break;
case SEND_DIAGNOSTIC:
case REASSIGN_BLOCKS:
break;
case FORMAT_UNIT:
errcode = DID_ERROR;
break;
case SEEK_10:
case VERIFY:
case READ_DEFECT_DATA:
case READ_BUFFER:
case WRITE_BUFFER:
break;
default:
errcode = DID_ERROR;
} /* end switch */
scb->scsi_cmd->result = errcode << 16;
} else { /* bus == 0 */
/* restrict access to physical drives */
if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
ips_scmd_buf_read(scb->scsi_cmd,
&inquiryData, sizeof (inquiryData));
if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK)
scb->scsi_cmd->result = DID_TIME_OUT << 16;
}
} /* else */
} else { /* recovered error / success */
if (scb->bus == 0) {
DEBUG_VAR(1,
"(%s%d) Unrecovered Logical Drive Error OpCode: %x, BSB: %x, ESB: %x",
ips_name, ha->host_num,
scb->cmd.basic_io.op_code, basic_status,
ext_status);
}
ips_map_status(ha, scb, sp);
} /* else */
}
/****************************************************************************/
/* */
/* Routine Name: ips_online */
/* */
/* Routine Description: */
/* */
/* Determine if a logical drive is online */
/* */
/****************************************************************************/
static int
ips_online(ips_ha_t * ha, ips_scb_t * scb)
{
METHOD_TRACE("ips_online", 1);
if (scb->target_id >= IPS_MAX_LD)
return (0);
if ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1) {
memset(ha->logical_drive_info, 0, sizeof (IPS_LD_INFO));
return (0);
}
if (ha->logical_drive_info->drive_info[scb->target_id].state !=
IPS_LD_OFFLINE
&& ha->logical_drive_info->drive_info[scb->target_id].state !=
IPS_LD_FREE
&& ha->logical_drive_info->drive_info[scb->target_id].state !=
IPS_LD_CRS
&& ha->logical_drive_info->drive_info[scb->target_id].state !=
IPS_LD_SYS)
return (1);
else
return (0);
}
/****************************************************************************/
/* */
/* Routine Name: ips_inquiry */
/* */
/* Routine Description: */
/* */
/* Simulate an inquiry command to a logical drive */
/* */
/****************************************************************************/
static int
ips_inquiry(ips_ha_t * ha, ips_scb_t * scb)
{
IPS_SCSI_INQ_DATA inquiry;
METHOD_TRACE("ips_inquiry", 1);
memset(&inquiry, 0, sizeof (IPS_SCSI_INQ_DATA));
inquiry.DeviceType = IPS_SCSI_INQ_TYPE_DASD;
inquiry.DeviceTypeQualifier = IPS_SCSI_INQ_LU_CONNECTED;
inquiry.Version = IPS_SCSI_INQ_REV2;
inquiry.ResponseDataFormat = IPS_SCSI_INQ_RD_REV2;
inquiry.AdditionalLength = 31;
inquiry.Flags[0] = IPS_SCSI_INQ_Address16;
inquiry.Flags[1] =
IPS_SCSI_INQ_WBus16 | IPS_SCSI_INQ_Sync | IPS_SCSI_INQ_CmdQue;
strncpy(inquiry.VendorId, "IBM ", 8);
strncpy(inquiry.ProductId, "SERVERAID ", 16);
strncpy(inquiry.ProductRevisionLevel, "1.00", 4);
ips_scmd_buf_write(scb->scsi_cmd, &inquiry, sizeof (inquiry));
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_rdcap */
/* */
/* Routine Description: */
/* */
/* Simulate a read capacity command to a logical drive */
/* */
/****************************************************************************/
static int
ips_rdcap(ips_ha_t * ha, ips_scb_t * scb)
{
IPS_SCSI_CAPACITY cap;
METHOD_TRACE("ips_rdcap", 1);
if (scsi_bufflen(scb->scsi_cmd) < 8)
return (0);
cap.lba =
cpu_to_be32(le32_to_cpu
(ha->logical_drive_info->
drive_info[scb->target_id].sector_count) - 1);
cap.len = cpu_to_be32((uint32_t) IPS_BLKSIZE);
ips_scmd_buf_write(scb->scsi_cmd, &cap, sizeof (cap));
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_msense */
/* */
/* Routine Description: */
/* */
/* Simulate a mode sense command to a logical drive */
/* */
/****************************************************************************/
static int
ips_msense(ips_ha_t * ha, ips_scb_t * scb)
{
uint16_t heads;
uint16_t sectors;
uint32_t cylinders;
IPS_SCSI_MODE_PAGE_DATA mdata;
METHOD_TRACE("ips_msense", 1);
if (le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) > 0x400000 &&
(ha->enq->ucMiscFlag & 0x8) == 0) {
heads = IPS_NORM_HEADS;
sectors = IPS_NORM_SECTORS;
} else {
heads = IPS_COMP_HEADS;
sectors = IPS_COMP_SECTORS;
}
cylinders =
(le32_to_cpu(ha->enq->ulDriveSize[scb->target_id]) -
1) / (heads * sectors);
memset(&mdata, 0, sizeof (IPS_SCSI_MODE_PAGE_DATA));
mdata.hdr.BlockDescLength = 8;
switch (scb->scsi_cmd->cmnd[2] & 0x3f) {
case 0x03: /* page 3 */
mdata.pdata.pg3.PageCode = 3;
mdata.pdata.pg3.PageLength = sizeof (IPS_SCSI_MODE_PAGE3);
mdata.hdr.DataLength =
3 + mdata.hdr.BlockDescLength + mdata.pdata.pg3.PageLength;
mdata.pdata.pg3.TracksPerZone = 0;
mdata.pdata.pg3.AltSectorsPerZone = 0;
mdata.pdata.pg3.AltTracksPerZone = 0;
mdata.pdata.pg3.AltTracksPerVolume = 0;
mdata.pdata.pg3.SectorsPerTrack = cpu_to_be16(sectors);
mdata.pdata.pg3.BytesPerSector = cpu_to_be16(IPS_BLKSIZE);
mdata.pdata.pg3.Interleave = cpu_to_be16(1);
mdata.pdata.pg3.TrackSkew = 0;
mdata.pdata.pg3.CylinderSkew = 0;
mdata.pdata.pg3.flags = IPS_SCSI_MP3_SoftSector;
break;
case 0x4:
mdata.pdata.pg4.PageCode = 4;
mdata.pdata.pg4.PageLength = sizeof (IPS_SCSI_MODE_PAGE4);
mdata.hdr.DataLength =
3 + mdata.hdr.BlockDescLength + mdata.pdata.pg4.PageLength;
mdata.pdata.pg4.CylindersHigh =
cpu_to_be16((cylinders >> 8) & 0xFFFF);
mdata.pdata.pg4.CylindersLow = (cylinders & 0xFF);
mdata.pdata.pg4.Heads = heads;
mdata.pdata.pg4.WritePrecompHigh = 0;
mdata.pdata.pg4.WritePrecompLow = 0;
mdata.pdata.pg4.ReducedWriteCurrentHigh = 0;
mdata.pdata.pg4.ReducedWriteCurrentLow = 0;
mdata.pdata.pg4.StepRate = cpu_to_be16(1);
mdata.pdata.pg4.LandingZoneHigh = 0;
mdata.pdata.pg4.LandingZoneLow = 0;
mdata.pdata.pg4.flags = 0;
mdata.pdata.pg4.RotationalOffset = 0;
mdata.pdata.pg4.MediumRotationRate = 0;
break;
case 0x8:
mdata.pdata.pg8.PageCode = 8;
mdata.pdata.pg8.PageLength = sizeof (IPS_SCSI_MODE_PAGE8);
mdata.hdr.DataLength =
3 + mdata.hdr.BlockDescLength + mdata.pdata.pg8.PageLength;
/* everything else is left set to 0 */
break;
default:
return (0);
} /* end switch */
ips_scmd_buf_write(scb->scsi_cmd, &mdata, sizeof (mdata));
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_reqsen */
/* */
/* Routine Description: */
/* */
/* Simulate a request sense command to a logical drive */
/* */
/****************************************************************************/
static int
ips_reqsen(ips_ha_t * ha, ips_scb_t * scb)
{
IPS_SCSI_REQSEN reqsen;
METHOD_TRACE("ips_reqsen", 1);
memset(&reqsen, 0, sizeof (IPS_SCSI_REQSEN));
reqsen.ResponseCode =
IPS_SCSI_REQSEN_VALID | IPS_SCSI_REQSEN_CURRENT_ERR;
reqsen.AdditionalLength = 10;
reqsen.AdditionalSenseCode = IPS_SCSI_REQSEN_NO_SENSE;
reqsen.AdditionalSenseCodeQual = IPS_SCSI_REQSEN_NO_SENSE;
ips_scmd_buf_write(scb->scsi_cmd, &reqsen, sizeof (reqsen));
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_free */
/* */
/* Routine Description: */
/* */
/* Free any allocated space for this controller */
/* */
/****************************************************************************/
static void
ips_free(ips_ha_t * ha)
{
METHOD_TRACE("ips_free", 1);
if (ha) {
if (ha->enq) {
pci_free_consistent(ha->pcidev, sizeof(IPS_ENQ),
ha->enq, ha->enq_busaddr);
ha->enq = NULL;
}
kfree(ha->conf);
ha->conf = NULL;
if (ha->adapt) {
pci_free_consistent(ha->pcidev,
sizeof (IPS_ADAPTER) +
sizeof (IPS_IO_CMD), ha->adapt,
ha->adapt->hw_status_start);
ha->adapt = NULL;
}
if (ha->logical_drive_info) {
pci_free_consistent(ha->pcidev,
sizeof (IPS_LD_INFO),
ha->logical_drive_info,
ha->logical_drive_info_dma_addr);
ha->logical_drive_info = NULL;
}
kfree(ha->nvram);
ha->nvram = NULL;
kfree(ha->subsys);
ha->subsys = NULL;
if (ha->ioctl_data) {
pci_free_consistent(ha->pcidev, ha->ioctl_len,
ha->ioctl_data, ha->ioctl_busaddr);
ha->ioctl_data = NULL;
ha->ioctl_datasize = 0;
ha->ioctl_len = 0;
}
ips_deallocatescbs(ha, ha->max_cmds);
/* free memory mapped (if applicable) */
if (ha->mem_ptr) {
iounmap(ha->ioremap_ptr);
ha->ioremap_ptr = NULL;
ha->mem_ptr = NULL;
}
ha->mem_addr = 0;
}
}
/****************************************************************************/
/* */
/* Routine Name: ips_deallocatescbs */
/* */
/* Routine Description: */
/* */
/* Free the command blocks */
/* */
/****************************************************************************/
static int
ips_deallocatescbs(ips_ha_t * ha, int cmds)
{
if (ha->scbs) {
pci_free_consistent(ha->pcidev,
IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds,
ha->scbs->sg_list.list,
ha->scbs->sg_busaddr);
pci_free_consistent(ha->pcidev, sizeof (ips_scb_t) * cmds,
ha->scbs, ha->scbs->scb_busaddr);
ha->scbs = NULL;
} /* end if */
return 1;
}
/****************************************************************************/
/* */
/* Routine Name: ips_allocatescbs */
/* */
/* Routine Description: */
/* */
/* Allocate the command blocks */
/* */
/****************************************************************************/
static int
ips_allocatescbs(ips_ha_t * ha)
{
ips_scb_t *scb_p;
IPS_SG_LIST ips_sg;
int i;
dma_addr_t command_dma, sg_dma;
METHOD_TRACE("ips_allocatescbs", 1);
/* Allocate memory for the SCBs */
ha->scbs =
pci_alloc_consistent(ha->pcidev, ha->max_cmds * sizeof (ips_scb_t),
&command_dma);
if (ha->scbs == NULL)
return 0;
ips_sg.list =
pci_alloc_consistent(ha->pcidev,
IPS_SGLIST_SIZE(ha) * IPS_MAX_SG *
ha->max_cmds, &sg_dma);
if (ips_sg.list == NULL) {
pci_free_consistent(ha->pcidev,
ha->max_cmds * sizeof (ips_scb_t), ha->scbs,
command_dma);
return 0;
}
memset(ha->scbs, 0, ha->max_cmds * sizeof (ips_scb_t));
for (i = 0; i < ha->max_cmds; i++) {
scb_p = &ha->scbs[i];
scb_p->scb_busaddr = command_dma + sizeof (ips_scb_t) * i;
/* set up S/G list */
if (IPS_USE_ENH_SGLIST(ha)) {
scb_p->sg_list.enh_list =
ips_sg.enh_list + i * IPS_MAX_SG;
scb_p->sg_busaddr =
sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
} else {
scb_p->sg_list.std_list =
ips_sg.std_list + i * IPS_MAX_SG;
scb_p->sg_busaddr =
sg_dma + IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * i;
}
/* add to the free list */
if (i < ha->max_cmds - 1) {
scb_p->q_next = ha->scb_freelist;
ha->scb_freelist = scb_p;
}
}
/* success */
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_init_scb */
/* */
/* Routine Description: */
/* */
/* Initialize a CCB to default values */
/* */
/****************************************************************************/
static void
ips_init_scb(ips_ha_t * ha, ips_scb_t * scb)
{
IPS_SG_LIST sg_list;
uint32_t cmd_busaddr, sg_busaddr;
METHOD_TRACE("ips_init_scb", 1);
if (scb == NULL)
return;
sg_list.list = scb->sg_list.list;
cmd_busaddr = scb->scb_busaddr;
sg_busaddr = scb->sg_busaddr;
/* zero fill */
memset(scb, 0, sizeof (ips_scb_t));
memset(ha->dummy, 0, sizeof (IPS_IO_CMD));
/* Initialize dummy command bucket */
ha->dummy->op_code = 0xFF;
ha->dummy->ccsar = cpu_to_le32(ha->adapt->hw_status_start
+ sizeof (IPS_ADAPTER));
ha->dummy->command_id = IPS_MAX_CMDS;
/* set bus address of scb */
scb->scb_busaddr = cmd_busaddr;
scb->sg_busaddr = sg_busaddr;
scb->sg_list.list = sg_list.list;
/* Neptune Fix */
scb->cmd.basic_io.cccr = cpu_to_le32((uint32_t) IPS_BIT_ILE);
scb->cmd.basic_io.ccsar = cpu_to_le32(ha->adapt->hw_status_start
+ sizeof (IPS_ADAPTER));
}
/****************************************************************************/
/* */
/* Routine Name: ips_get_scb */
/* */
/* Routine Description: */
/* */
/* Initialize a CCB to default values */
/* */
/* ASSUMED to be called from within a lock */
/* */
/****************************************************************************/
static ips_scb_t *
ips_getscb(ips_ha_t * ha)
{
ips_scb_t *scb;
METHOD_TRACE("ips_getscb", 1);
if ((scb = ha->scb_freelist) == NULL) {
return (NULL);
}
ha->scb_freelist = scb->q_next;
scb->flags = 0;
scb->q_next = NULL;
ips_init_scb(ha, scb);
return (scb);
}
/****************************************************************************/
/* */
/* Routine Name: ips_free_scb */
/* */
/* Routine Description: */
/* */
/* Return an unused CCB back to the free list */
/* */
/* ASSUMED to be called from within a lock */
/* */
/****************************************************************************/
static void
ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
{
METHOD_TRACE("ips_freescb", 1);
if (scb->flags & IPS_SCB_MAP_SG)
scsi_dma_unmap(scb->scsi_cmd);
else if (scb->flags & IPS_SCB_MAP_SINGLE)
pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
IPS_DMA_DIR(scb));
/* check to make sure this is not our "special" scb */
if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) {
scb->q_next = ha->scb_freelist;
ha->scb_freelist = scb;
}
}
/****************************************************************************/
/* */
/* Routine Name: ips_isinit_copperhead */
/* */
/* Routine Description: */
/* */
/* Is controller initialized ? */
/* */
/****************************************************************************/
static int
ips_isinit_copperhead(ips_ha_t * ha)
{
uint8_t scpr;
uint8_t isr;
METHOD_TRACE("ips_isinit_copperhead", 1);
isr = inb(ha->io_addr + IPS_REG_HISR);
scpr = inb(ha->io_addr + IPS_REG_SCPR);
if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0))
return (0);
else
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_isinit_copperhead_memio */
/* */
/* Routine Description: */
/* */
/* Is controller initialized ? */
/* */
/****************************************************************************/
static int
ips_isinit_copperhead_memio(ips_ha_t * ha)
{
uint8_t isr = 0;
uint8_t scpr;
METHOD_TRACE("ips_is_init_copperhead_memio", 1);
isr = readb(ha->mem_ptr + IPS_REG_HISR);
scpr = readb(ha->mem_ptr + IPS_REG_SCPR);
if (((isr & IPS_BIT_EI) == 0) && ((scpr & IPS_BIT_EBM) == 0))
return (0);
else
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_isinit_morpheus */
/* */
/* Routine Description: */
/* */
/* Is controller initialized ? */
/* */
/****************************************************************************/
static int
ips_isinit_morpheus(ips_ha_t * ha)
{
uint32_t post;
uint32_t bits;
METHOD_TRACE("ips_is_init_morpheus", 1);
if (ips_isintr_morpheus(ha))
ips_flush_and_reset(ha);
post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
if (post == 0)
return (0);
else if (bits & 0x3)
return (0);
else
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_flush_and_reset */
/* */
/* Routine Description: */
/* */
/* Perform cleanup ( FLUSH and RESET ) when the adapter is in an unknown */
/* state ( was trying to INIT and an interrupt was already pending ) ... */
/* */
/****************************************************************************/
static void
ips_flush_and_reset(ips_ha_t *ha)
{
ips_scb_t *scb;
int ret;
int time;
int done;
dma_addr_t command_dma;
/* Create a usuable SCB */
scb = pci_alloc_consistent(ha->pcidev, sizeof(ips_scb_t), &command_dma);
if (scb) {
memset(scb, 0, sizeof(ips_scb_t));
ips_init_scb(ha, scb);
scb->scb_busaddr = command_dma;
scb->timeout = ips_cmd_timeout;
scb->cdb[0] = IPS_CMD_FLUSH;
scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
scb->cmd.flush_cache.command_id = IPS_MAX_CMDS; /* Use an ID that would otherwise not exist */
scb->cmd.flush_cache.state = IPS_NORM_STATE;
scb->cmd.flush_cache.reserved = 0;
scb->cmd.flush_cache.reserved2 = 0;
scb->cmd.flush_cache.reserved3 = 0;
scb->cmd.flush_cache.reserved4 = 0;
ret = ips_send_cmd(ha, scb); /* Send the Flush Command */
if (ret == IPS_SUCCESS) {
time = 60 * IPS_ONE_SEC; /* Max Wait time is 60 seconds */
done = 0;
while ((time > 0) && (!done)) {
done = ips_poll_for_flush_complete(ha);
/* This may look evil, but it's only done during extremely rare start-up conditions ! */
udelay(1000);
time--;
}
}
}
/* Now RESET and INIT the adapter */
(*ha->func.reset) (ha);
pci_free_consistent(ha->pcidev, sizeof(ips_scb_t), scb, command_dma);
return;
}
/****************************************************************************/
/* */
/* Routine Name: ips_poll_for_flush_complete */
/* */
/* Routine Description: */
/* */
/* Poll for the Flush Command issued by ips_flush_and_reset() to complete */
/* All other responses are just taken off the queue and ignored */
/* */
/****************************************************************************/
static int
ips_poll_for_flush_complete(ips_ha_t * ha)
{
IPS_STATUS cstatus;
while (TRUE) {
cstatus.value = (*ha->func.statupd) (ha);
if (cstatus.value == 0xffffffff) /* If No Interrupt to process */
break;
/* Success is when we see the Flush Command ID */
if (cstatus.fields.command_id == IPS_MAX_CMDS)
return 1;
}
return 0;
}
/****************************************************************************/
/* */
/* Routine Name: ips_enable_int_copperhead */
/* */
/* Routine Description: */
/* Turn on interrupts */
/* */
/****************************************************************************/
static void
ips_enable_int_copperhead(ips_ha_t * ha)
{
METHOD_TRACE("ips_enable_int_copperhead", 1);
outb(ha->io_addr + IPS_REG_HISR, IPS_BIT_EI);
inb(ha->io_addr + IPS_REG_HISR); /*Ensure PCI Posting Completes*/
}
/****************************************************************************/
/* */
/* Routine Name: ips_enable_int_copperhead_memio */
/* */
/* Routine Description: */
/* Turn on interrupts */
/* */
/****************************************************************************/
static void
ips_enable_int_copperhead_memio(ips_ha_t * ha)
{
METHOD_TRACE("ips_enable_int_copperhead_memio", 1);
writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR);
readb(ha->mem_ptr + IPS_REG_HISR); /*Ensure PCI Posting Completes*/
}
/****************************************************************************/
/* */
/* Routine Name: ips_enable_int_morpheus */
/* */
/* Routine Description: */
/* Turn on interrupts */
/* */
/****************************************************************************/
static void
ips_enable_int_morpheus(ips_ha_t * ha)
{
uint32_t Oimr;
METHOD_TRACE("ips_enable_int_morpheus", 1);
Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR);
Oimr &= ~0x08;
writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR);
readl(ha->mem_ptr + IPS_REG_I960_OIMR); /*Ensure PCI Posting Completes*/
}
/****************************************************************************/
/* */
/* Routine Name: ips_init_copperhead */
/* */
/* Routine Description: */
/* */
/* Initialize a copperhead controller */
/* */
/****************************************************************************/
static int
ips_init_copperhead(ips_ha_t * ha)
{
uint8_t Isr;
uint8_t Cbsp;
uint8_t PostByte[IPS_MAX_POST_BYTES];
uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES];
int i, j;
METHOD_TRACE("ips_init_copperhead", 1);
for (i = 0; i < IPS_MAX_POST_BYTES; i++) {
for (j = 0; j < 45; j++) {
Isr = inb(ha->io_addr + IPS_REG_HISR);
if (Isr & IPS_BIT_GHI)
break;
/* Delay for 1 Second */
MDELAY(IPS_ONE_SEC);
}
if (j >= 45)
/* error occurred */
return (0);
PostByte[i] = inb(ha->io_addr + IPS_REG_ISPR);
outb(Isr, ha->io_addr + IPS_REG_HISR);
}
if (PostByte[0] < IPS_GOOD_POST_STATUS) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"reset controller fails (post status %x %x).\n",
PostByte[0], PostByte[1]);
return (0);
}
for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) {
for (j = 0; j < 240; j++) {
Isr = inb(ha->io_addr + IPS_REG_HISR);
if (Isr & IPS_BIT_GHI)
break;
/* Delay for 1 Second */
MDELAY(IPS_ONE_SEC);
}
if (j >= 240)
/* error occurred */
return (0);
ConfigByte[i] = inb(ha->io_addr + IPS_REG_ISPR);
outb(Isr, ha->io_addr + IPS_REG_HISR);
}
for (i = 0; i < 240; i++) {
Cbsp = inb(ha->io_addr + IPS_REG_CBSP);
if ((Cbsp & IPS_BIT_OP) == 0)
break;
/* Delay for 1 Second */
MDELAY(IPS_ONE_SEC);
}
if (i >= 240)
/* reset failed */
return (0);
/* setup CCCR */
outl(0x1010, ha->io_addr + IPS_REG_CCCR);
/* Enable busmastering */
outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
/* fix for anaconda64 */
outl(0, ha->io_addr + IPS_REG_NDAE);
/* Enable interrupts */
outb(IPS_BIT_EI, ha->io_addr + IPS_REG_HISR);
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_init_copperhead_memio */
/* */
/* Routine Description: */
/* */
/* Initialize a copperhead controller with memory mapped I/O */
/* */
/****************************************************************************/
static int
ips_init_copperhead_memio(ips_ha_t * ha)
{
uint8_t Isr = 0;
uint8_t Cbsp;
uint8_t PostByte[IPS_MAX_POST_BYTES];
uint8_t ConfigByte[IPS_MAX_CONFIG_BYTES];
int i, j;
METHOD_TRACE("ips_init_copperhead_memio", 1);
for (i = 0; i < IPS_MAX_POST_BYTES; i++) {
for (j = 0; j < 45; j++) {
Isr = readb(ha->mem_ptr + IPS_REG_HISR);
if (Isr & IPS_BIT_GHI)
break;
/* Delay for 1 Second */
MDELAY(IPS_ONE_SEC);
}
if (j >= 45)
/* error occurred */
return (0);
PostByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR);
writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
}
if (PostByte[0] < IPS_GOOD_POST_STATUS) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"reset controller fails (post status %x %x).\n",
PostByte[0], PostByte[1]);
return (0);
}
for (i = 0; i < IPS_MAX_CONFIG_BYTES; i++) {
for (j = 0; j < 240; j++) {
Isr = readb(ha->mem_ptr + IPS_REG_HISR);
if (Isr & IPS_BIT_GHI)
break;
/* Delay for 1 Second */
MDELAY(IPS_ONE_SEC);
}
if (j >= 240)
/* error occurred */
return (0);
ConfigByte[i] = readb(ha->mem_ptr + IPS_REG_ISPR);
writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
}
for (i = 0; i < 240; i++) {
Cbsp = readb(ha->mem_ptr + IPS_REG_CBSP);
if ((Cbsp & IPS_BIT_OP) == 0)
break;
/* Delay for 1 Second */
MDELAY(IPS_ONE_SEC);
}
if (i >= 240)
/* error occurred */
return (0);
/* setup CCCR */
writel(0x1010, ha->mem_ptr + IPS_REG_CCCR);
/* Enable busmastering */
writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
/* fix for anaconda64 */
writel(0, ha->mem_ptr + IPS_REG_NDAE);
/* Enable interrupts */
writeb(IPS_BIT_EI, ha->mem_ptr + IPS_REG_HISR);
/* if we get here then everything went OK */
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_init_morpheus */
/* */
/* Routine Description: */
/* */
/* Initialize a morpheus controller */
/* */
/****************************************************************************/
static int
ips_init_morpheus(ips_ha_t * ha)
{
uint32_t Post;
uint32_t Config;
uint32_t Isr;
uint32_t Oimr;
int i;
METHOD_TRACE("ips_init_morpheus", 1);
/* Wait up to 45 secs for Post */
for (i = 0; i < 45; i++) {
Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
if (Isr & IPS_BIT_I960_MSG0I)
break;
/* Delay for 1 Second */
MDELAY(IPS_ONE_SEC);
}
if (i >= 45) {
/* error occurred */
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"timeout waiting for post.\n");
return (0);
}
Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
if (Post == 0x4F00) { /* If Flashing the Battery PIC */
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Flashing Battery PIC, Please wait ...\n");
/* Clear the interrupt bit */
Isr = (uint32_t) IPS_BIT_I960_MSG0I;
writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
for (i = 0; i < 120; i++) { /* Wait Up to 2 Min. for Completion */
Post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
if (Post != 0x4F00)
break;
/* Delay for 1 Second */
MDELAY(IPS_ONE_SEC);
}
if (i >= 120) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"timeout waiting for Battery PIC Flash\n");
return (0);
}
}
/* Clear the interrupt bit */
Isr = (uint32_t) IPS_BIT_I960_MSG0I;
writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
if (Post < (IPS_GOOD_POST_STATUS << 8)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"reset controller fails (post status %x).\n", Post);
return (0);
}
/* Wait up to 240 secs for config bytes */
for (i = 0; i < 240; i++) {
Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
if (Isr & IPS_BIT_I960_MSG1I)
break;
/* Delay for 1 Second */
MDELAY(IPS_ONE_SEC);
}
if (i >= 240) {
/* error occurred */
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"timeout waiting for config.\n");
return (0);
}
Config = readl(ha->mem_ptr + IPS_REG_I960_MSG1);
/* Clear interrupt bit */
Isr = (uint32_t) IPS_BIT_I960_MSG1I;
writel(Isr, ha->mem_ptr + IPS_REG_I2O_HIR);
/* Turn on the interrupts */
Oimr = readl(ha->mem_ptr + IPS_REG_I960_OIMR);
Oimr &= ~0x8;
writel(Oimr, ha->mem_ptr + IPS_REG_I960_OIMR);
/* if we get here then everything went OK */
/* Since we did a RESET, an EraseStripeLock may be needed */
if (Post == 0xEF10) {
if ((Config == 0x000F) || (Config == 0x0009))
ha->requires_esl = 1;
}
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_reset_copperhead */
/* */
/* Routine Description: */
/* */
/* Reset the controller */
/* */
/****************************************************************************/
static int
ips_reset_copperhead(ips_ha_t * ha)
{
int reset_counter;
METHOD_TRACE("ips_reset_copperhead", 1);
DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d",
ips_name, ha->host_num, ha->io_addr, ha->pcidev->irq);
reset_counter = 0;
while (reset_counter < 2) {
reset_counter++;
outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR);
/* Delay for 1 Second */
MDELAY(IPS_ONE_SEC);
outb(0, ha->io_addr + IPS_REG_SCPR);
/* Delay for 1 Second */
MDELAY(IPS_ONE_SEC);
if ((*ha->func.init) (ha))
break;
else if (reset_counter >= 2) {
return (0);
}
}
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_reset_copperhead_memio */
/* */
/* Routine Description: */
/* */
/* Reset the controller */
/* */
/****************************************************************************/
static int
ips_reset_copperhead_memio(ips_ha_t * ha)
{
int reset_counter;
METHOD_TRACE("ips_reset_copperhead_memio", 1);
DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d",
ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
reset_counter = 0;
while (reset_counter < 2) {
reset_counter++;
writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR);
/* Delay for 1 Second */
MDELAY(IPS_ONE_SEC);
writeb(0, ha->mem_ptr + IPS_REG_SCPR);
/* Delay for 1 Second */
MDELAY(IPS_ONE_SEC);
if ((*ha->func.init) (ha))
break;
else if (reset_counter >= 2) {
return (0);
}
}
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_reset_morpheus */
/* */
/* Routine Description: */
/* */
/* Reset the controller */
/* */
/****************************************************************************/
static int
ips_reset_morpheus(ips_ha_t * ha)
{
int reset_counter;
uint8_t junk;
METHOD_TRACE("ips_reset_morpheus", 1);
DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d",
ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
reset_counter = 0;
while (reset_counter < 2) {
reset_counter++;
writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR);
/* Delay for 5 Seconds */
MDELAY(5 * IPS_ONE_SEC);
/* Do a PCI config read to wait for adapter */
pci_read_config_byte(ha->pcidev, 4, &junk);
if ((*ha->func.init) (ha))
break;
else if (reset_counter >= 2) {
return (0);
}
}
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_statinit */
/* */
/* Routine Description: */
/* */
/* Initialize the status queues on the controller */
/* */
/****************************************************************************/
static void
ips_statinit(ips_ha_t * ha)
{
uint32_t phys_status_start;
METHOD_TRACE("ips_statinit", 1);
ha->adapt->p_status_start = ha->adapt->status;
ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS;
ha->adapt->p_status_tail = ha->adapt->status;
phys_status_start = ha->adapt->hw_status_start;
outl(phys_status_start, ha->io_addr + IPS_REG_SQSR);
outl(phys_status_start + IPS_STATUS_Q_SIZE,
ha->io_addr + IPS_REG_SQER);
outl(phys_status_start + IPS_STATUS_SIZE,
ha->io_addr + IPS_REG_SQHR);
outl(phys_status_start, ha->io_addr + IPS_REG_SQTR);
ha->adapt->hw_status_tail = phys_status_start;
}
/****************************************************************************/
/* */
/* Routine Name: ips_statinit_memio */
/* */
/* Routine Description: */
/* */
/* Initialize the status queues on the controller */
/* */
/****************************************************************************/
static void
ips_statinit_memio(ips_ha_t * ha)
{
uint32_t phys_status_start;
METHOD_TRACE("ips_statinit_memio", 1);
ha->adapt->p_status_start = ha->adapt->status;
ha->adapt->p_status_end = ha->adapt->status + IPS_MAX_CMDS;
ha->adapt->p_status_tail = ha->adapt->status;
phys_status_start = ha->adapt->hw_status_start;
writel(phys_status_start, ha->mem_ptr + IPS_REG_SQSR);
writel(phys_status_start + IPS_STATUS_Q_SIZE,
ha->mem_ptr + IPS_REG_SQER);
writel(phys_status_start + IPS_STATUS_SIZE, ha->mem_ptr + IPS_REG_SQHR);
writel(phys_status_start, ha->mem_ptr + IPS_REG_SQTR);
ha->adapt->hw_status_tail = phys_status_start;
}
/****************************************************************************/
/* */
/* Routine Name: ips_statupd_copperhead */
/* */
/* Routine Description: */
/* */
/* Remove an element from the status queue */
/* */
/****************************************************************************/
static uint32_t
ips_statupd_copperhead(ips_ha_t * ha)
{
METHOD_TRACE("ips_statupd_copperhead", 1);
if (ha->adapt->p_status_tail != ha->adapt->p_status_end) {
ha->adapt->p_status_tail++;
ha->adapt->hw_status_tail += sizeof (IPS_STATUS);
} else {
ha->adapt->p_status_tail = ha->adapt->p_status_start;
ha->adapt->hw_status_tail = ha->adapt->hw_status_start;
}
outl(ha->adapt->hw_status_tail,
ha->io_addr + IPS_REG_SQTR);
return (ha->adapt->p_status_tail->value);
}
/****************************************************************************/
/* */
/* Routine Name: ips_statupd_copperhead_memio */
/* */
/* Routine Description: */
/* */
/* Remove an element from the status queue */
/* */
/****************************************************************************/
static uint32_t
ips_statupd_copperhead_memio(ips_ha_t * ha)
{
METHOD_TRACE("ips_statupd_copperhead_memio", 1);
if (ha->adapt->p_status_tail != ha->adapt->p_status_end) {
ha->adapt->p_status_tail++;
ha->adapt->hw_status_tail += sizeof (IPS_STATUS);
} else {
ha->adapt->p_status_tail = ha->adapt->p_status_start;
ha->adapt->hw_status_tail = ha->adapt->hw_status_start;
}
writel(ha->adapt->hw_status_tail, ha->mem_ptr + IPS_REG_SQTR);
return (ha->adapt->p_status_tail->value);
}
/****************************************************************************/
/* */
/* Routine Name: ips_statupd_morpheus */
/* */
/* Routine Description: */
/* */
/* Remove an element from the status queue */
/* */
/****************************************************************************/
static uint32_t
ips_statupd_morpheus(ips_ha_t * ha)
{
uint32_t val;
METHOD_TRACE("ips_statupd_morpheus", 1);
val = readl(ha->mem_ptr + IPS_REG_I2O_OUTMSGQ);
return (val);
}
/****************************************************************************/
/* */
/* Routine Name: ips_issue_copperhead */
/* */
/* Routine Description: */
/* */
/* Send a command down to the controller */
/* */
/****************************************************************************/
static int
ips_issue_copperhead(ips_ha_t * ha, ips_scb_t * scb)
{
uint32_t TimeOut;
uint32_t val;
METHOD_TRACE("ips_issue_copperhead", 1);
if (scb->scsi_cmd) {
DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
ips_name,
ha->host_num,
scb->cdb[0],
scb->cmd.basic_io.command_id,
scb->bus, scb->target_id, scb->lun);
} else {
DEBUG_VAR(2, KERN_NOTICE "(%s%d) ips_issue: logical cmd id %d",
ips_name, ha->host_num, scb->cmd.basic_io.command_id);
}
TimeOut = 0;
while ((val =
le32_to_cpu(inl(ha->io_addr + IPS_REG_CCCR))) & IPS_BIT_SEM) {
udelay(1000);
if (++TimeOut >= IPS_SEM_TIMEOUT) {
if (!(val & IPS_BIT_START_STOP))
break;
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"ips_issue val [0x%x].\n", val);
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"ips_issue semaphore chk timeout.\n");
return (IPS_FAILURE);
} /* end if */
} /* end while */
outl(scb->scb_busaddr, ha->io_addr + IPS_REG_CCSAR);
outw(IPS_BIT_START_CMD, ha->io_addr + IPS_REG_CCCR);
return (IPS_SUCCESS);
}
/****************************************************************************/
/* */
/* Routine Name: ips_issue_copperhead_memio */
/* */
/* Routine Description: */
/* */
/* Send a command down to the controller */
/* */
/****************************************************************************/
static int
ips_issue_copperhead_memio(ips_ha_t * ha, ips_scb_t * scb)
{
uint32_t TimeOut;
uint32_t val;
METHOD_TRACE("ips_issue_copperhead_memio", 1);
if (scb->scsi_cmd) {
DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
ips_name,
ha->host_num,
scb->cdb[0],
scb->cmd.basic_io.command_id,
scb->bus, scb->target_id, scb->lun);
} else {
DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
ips_name, ha->host_num, scb->cmd.basic_io.command_id);
}
TimeOut = 0;
while ((val = readl(ha->mem_ptr + IPS_REG_CCCR)) & IPS_BIT_SEM) {
udelay(1000);
if (++TimeOut >= IPS_SEM_TIMEOUT) {
if (!(val & IPS_BIT_START_STOP))
break;
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"ips_issue val [0x%x].\n", val);
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"ips_issue semaphore chk timeout.\n");
return (IPS_FAILURE);
} /* end if */
} /* end while */
writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_CCSAR);
writel(IPS_BIT_START_CMD, ha->mem_ptr + IPS_REG_CCCR);
return (IPS_SUCCESS);
}
/****************************************************************************/
/* */
/* Routine Name: ips_issue_i2o */
/* */
/* Routine Description: */
/* */
/* Send a command down to the controller */
/* */
/****************************************************************************/
static int
ips_issue_i2o(ips_ha_t * ha, ips_scb_t * scb)
{
METHOD_TRACE("ips_issue_i2o", 1);
if (scb->scsi_cmd) {
DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
ips_name,
ha->host_num,
scb->cdb[0],
scb->cmd.basic_io.command_id,
scb->bus, scb->target_id, scb->lun);
} else {
DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
ips_name, ha->host_num, scb->cmd.basic_io.command_id);
}
outl(scb->scb_busaddr, ha->io_addr + IPS_REG_I2O_INMSGQ);
return (IPS_SUCCESS);
}
/****************************************************************************/
/* */
/* Routine Name: ips_issue_i2o_memio */
/* */
/* Routine Description: */
/* */
/* Send a command down to the controller */
/* */
/****************************************************************************/
static int
ips_issue_i2o_memio(ips_ha_t * ha, ips_scb_t * scb)
{
METHOD_TRACE("ips_issue_i2o_memio", 1);
if (scb->scsi_cmd) {
DEBUG_VAR(2, "(%s%d) ips_issue: cmd 0x%X id %d (%d %d %d)",
ips_name,
ha->host_num,
scb->cdb[0],
scb->cmd.basic_io.command_id,
scb->bus, scb->target_id, scb->lun);
} else {
DEBUG_VAR(2, "(%s%d) ips_issue: logical cmd id %d",
ips_name, ha->host_num, scb->cmd.basic_io.command_id);
}
writel(scb->scb_busaddr, ha->mem_ptr + IPS_REG_I2O_INMSGQ);
return (IPS_SUCCESS);
}
/****************************************************************************/
/* */
/* Routine Name: ips_isintr_copperhead */
/* */
/* Routine Description: */
/* */
/* Test to see if an interrupt is for us */
/* */
/****************************************************************************/
static int
ips_isintr_copperhead(ips_ha_t * ha)
{
uint8_t Isr;
METHOD_TRACE("ips_isintr_copperhead", 2);
Isr = inb(ha->io_addr + IPS_REG_HISR);
if (Isr == 0xFF)
/* ?!?! Nothing really there */
return (0);
if (Isr & IPS_BIT_SCE)
return (1);
else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) {
/* status queue overflow or GHI */
/* just clear the interrupt */
outb(Isr, ha->io_addr + IPS_REG_HISR);
}
return (0);
}
/****************************************************************************/
/* */
/* Routine Name: ips_isintr_copperhead_memio */
/* */
/* Routine Description: */
/* */
/* Test to see if an interrupt is for us */
/* */
/****************************************************************************/
static int
ips_isintr_copperhead_memio(ips_ha_t * ha)
{
uint8_t Isr;
METHOD_TRACE("ips_isintr_memio", 2);
Isr = readb(ha->mem_ptr + IPS_REG_HISR);
if (Isr == 0xFF)
/* ?!?! Nothing really there */
return (0);
if (Isr & IPS_BIT_SCE)
return (1);
else if (Isr & (IPS_BIT_SQO | IPS_BIT_GHI)) {
/* status queue overflow or GHI */
/* just clear the interrupt */
writeb(Isr, ha->mem_ptr + IPS_REG_HISR);
}
return (0);
}
/****************************************************************************/
/* */
/* Routine Name: ips_isintr_morpheus */
/* */
/* Routine Description: */
/* */
/* Test to see if an interrupt is for us */
/* */
/****************************************************************************/
static int
ips_isintr_morpheus(ips_ha_t * ha)
{
uint32_t Isr;
METHOD_TRACE("ips_isintr_morpheus", 2);
Isr = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
if (Isr & IPS_BIT_I2O_OPQI)
return (1);
else
return (0);
}
/****************************************************************************/
/* */
/* Routine Name: ips_wait */
/* */
/* Routine Description: */
/* */
/* Wait for a command to complete */
/* */
/****************************************************************************/
static int
ips_wait(ips_ha_t * ha, int time, int intr)
{
int ret;
int done;
METHOD_TRACE("ips_wait", 1);
ret = IPS_FAILURE;
done = FALSE;
time *= IPS_ONE_SEC; /* convert seconds */
while ((time > 0) && (!done)) {
if (intr == IPS_INTR_ON) {
if (ha->waitflag == FALSE) {
ret = IPS_SUCCESS;
done = TRUE;
break;
}
} else if (intr == IPS_INTR_IORL) {
if (ha->waitflag == FALSE) {
/*
* controller generated an interrupt to
* acknowledge completion of the command
* and ips_intr() has serviced the interrupt.
*/
ret = IPS_SUCCESS;
done = TRUE;
break;
}
/*
* NOTE: we already have the io_request_lock so
* even if we get an interrupt it won't get serviced
* until after we finish.
*/
(*ha->func.intr) (ha);
}
/* This looks like a very evil loop, but it only does this during start-up */
udelay(1000);
time--;
}
return (ret);
}
/****************************************************************************/
/* */
/* Routine Name: ips_write_driver_status */
/* */
/* Routine Description: */
/* */
/* Write OS/Driver version to Page 5 of the nvram on the controller */
/* */
/****************************************************************************/
static int
ips_write_driver_status(ips_ha_t * ha, int intr)
{
METHOD_TRACE("ips_write_driver_status", 1);
if (!ips_readwrite_page5(ha, FALSE, intr)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"unable to read NVRAM page 5.\n");
return (0);
}
/* check to make sure the page has a valid */
/* signature */
if (le32_to_cpu(ha->nvram->signature) != IPS_NVRAM_P5_SIG) {
DEBUG_VAR(1,
"(%s%d) NVRAM page 5 has an invalid signature: %X.",
ips_name, ha->host_num, ha->nvram->signature);
ha->nvram->signature = IPS_NVRAM_P5_SIG;
}
DEBUG_VAR(2,
"(%s%d) Ad Type: %d, Ad Slot: %d, BIOS: %c%c%c%c %c%c%c%c.",
ips_name, ha->host_num, le16_to_cpu(ha->nvram->adapter_type),
ha->nvram->adapter_slot, ha->nvram->bios_high[0],
ha->nvram->bios_high[1], ha->nvram->bios_high[2],
ha->nvram->bios_high[3], ha->nvram->bios_low[0],
ha->nvram->bios_low[1], ha->nvram->bios_low[2],
ha->nvram->bios_low[3]);
ips_get_bios_version(ha, intr);
/* change values (as needed) */
ha->nvram->operating_system = IPS_OS_LINUX;
ha->nvram->adapter_type = ha->ad_type;
strncpy((char *) ha->nvram->driver_high, IPS_VERSION_HIGH, 4);
strncpy((char *) ha->nvram->driver_low, IPS_VERSION_LOW, 4);
strncpy((char *) ha->nvram->bios_high, ha->bios_version, 4);
strncpy((char *) ha->nvram->bios_low, ha->bios_version + 4, 4);
ha->nvram->versioning = 0; /* Indicate the Driver Does Not Support Versioning */
/* now update the page */
if (!ips_readwrite_page5(ha, TRUE, intr)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"unable to write NVRAM page 5.\n");
return (0);
}
/* IF NVRAM Page 5 is OK, Use it for Slot Number Info Because Linux Doesn't Do Slots */
ha->slot_num = ha->nvram->adapter_slot;
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_read_adapter_status */
/* */
/* Routine Description: */
/* */
/* Do an Inquiry command to the adapter */
/* */
/****************************************************************************/
static int
ips_read_adapter_status(ips_ha_t * ha, int intr)
{
ips_scb_t *scb;
int ret;
METHOD_TRACE("ips_read_adapter_status", 1);
scb = &ha->scbs[ha->max_cmds - 1];
ips_init_scb(ha, scb);
scb->timeout = ips_cmd_timeout;
scb->cdb[0] = IPS_CMD_ENQUIRY;
scb->cmd.basic_io.op_code = IPS_CMD_ENQUIRY;
scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.basic_io.sg_count = 0;
scb->cmd.basic_io.lba = 0;
scb->cmd.basic_io.sector_count = 0;
scb->cmd.basic_io.log_drv = 0;
scb->data_len = sizeof (*ha->enq);
scb->cmd.basic_io.sg_addr = ha->enq_busaddr;
/* send command */
if (((ret =
ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
|| (ret == IPS_SUCCESS_IMM)
|| ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
return (0);
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_read_subsystem_parameters */
/* */
/* Routine Description: */
/* */
/* Read subsystem parameters from the adapter */
/* */
/****************************************************************************/
static int
ips_read_subsystem_parameters(ips_ha_t * ha, int intr)
{
ips_scb_t *scb;
int ret;
METHOD_TRACE("ips_read_subsystem_parameters", 1);
scb = &ha->scbs[ha->max_cmds - 1];
ips_init_scb(ha, scb);
scb->timeout = ips_cmd_timeout;
scb->cdb[0] = IPS_CMD_GET_SUBSYS;
scb->cmd.basic_io.op_code = IPS_CMD_GET_SUBSYS;
scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.basic_io.sg_count = 0;
scb->cmd.basic_io.lba = 0;
scb->cmd.basic_io.sector_count = 0;
scb->cmd.basic_io.log_drv = 0;
scb->data_len = sizeof (*ha->subsys);
scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr;
/* send command */
if (((ret =
ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
|| (ret == IPS_SUCCESS_IMM)
|| ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
return (0);
memcpy(ha->subsys, ha->ioctl_data, sizeof(*ha->subsys));
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_read_config */
/* */
/* Routine Description: */
/* */
/* Read the configuration on the adapter */
/* */
/****************************************************************************/
static int
ips_read_config(ips_ha_t * ha, int intr)
{
ips_scb_t *scb;
int i;
int ret;
METHOD_TRACE("ips_read_config", 1);
/* set defaults for initiator IDs */
for (i = 0; i < 4; i++)
ha->conf->init_id[i] = 7;
scb = &ha->scbs[ha->max_cmds - 1];
ips_init_scb(ha, scb);
scb->timeout = ips_cmd_timeout;
scb->cdb[0] = IPS_CMD_READ_CONF;
scb->cmd.basic_io.op_code = IPS_CMD_READ_CONF;
scb->cmd.basic_io.command_id = IPS_COMMAND_ID(ha, scb);
scb->data_len = sizeof (*ha->conf);
scb->cmd.basic_io.sg_addr = ha->ioctl_busaddr;
/* send command */
if (((ret =
ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
|| (ret == IPS_SUCCESS_IMM)
|| ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
memset(ha->conf, 0, sizeof (IPS_CONF));
/* reset initiator IDs */
for (i = 0; i < 4; i++)
ha->conf->init_id[i] = 7;
/* Allow Completed with Errors, so JCRM can access the Adapter to fix the problems */
if ((scb->basic_status & IPS_GSC_STATUS_MASK) ==
IPS_CMD_CMPLT_WERROR)
return (1);
return (0);
}
memcpy(ha->conf, ha->ioctl_data, sizeof(*ha->conf));
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_readwrite_page5 */
/* */
/* Routine Description: */
/* */
/* Read nvram page 5 from the adapter */
/* */
/****************************************************************************/
static int
ips_readwrite_page5(ips_ha_t * ha, int write, int intr)
{
ips_scb_t *scb;
int ret;
METHOD_TRACE("ips_readwrite_page5", 1);
scb = &ha->scbs[ha->max_cmds - 1];
ips_init_scb(ha, scb);
scb->timeout = ips_cmd_timeout;
scb->cdb[0] = IPS_CMD_RW_NVRAM_PAGE;
scb->cmd.nvram.op_code = IPS_CMD_RW_NVRAM_PAGE;
scb->cmd.nvram.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.nvram.page = 5;
scb->cmd.nvram.write = write;
scb->cmd.nvram.reserved = 0;
scb->cmd.nvram.reserved2 = 0;
scb->data_len = sizeof (*ha->nvram);
scb->cmd.nvram.buffer_addr = ha->ioctl_busaddr;
if (write)
memcpy(ha->ioctl_data, ha->nvram, sizeof(*ha->nvram));
/* issue the command */
if (((ret =
ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
|| (ret == IPS_SUCCESS_IMM)
|| ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1)) {
memset(ha->nvram, 0, sizeof (IPS_NVRAM_P5));
return (0);
}
if (!write)
memcpy(ha->nvram, ha->ioctl_data, sizeof(*ha->nvram));
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_clear_adapter */
/* */
/* Routine Description: */
/* */
/* Clear the stripe lock tables */
/* */
/****************************************************************************/
static int
ips_clear_adapter(ips_ha_t * ha, int intr)
{
ips_scb_t *scb;
int ret;
METHOD_TRACE("ips_clear_adapter", 1);
scb = &ha->scbs[ha->max_cmds - 1];
ips_init_scb(ha, scb);
scb->timeout = ips_reset_timeout;
scb->cdb[0] = IPS_CMD_CONFIG_SYNC;
scb->cmd.config_sync.op_code = IPS_CMD_CONFIG_SYNC;
scb->cmd.config_sync.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.config_sync.channel = 0;
scb->cmd.config_sync.source_target = IPS_POCL;
scb->cmd.config_sync.reserved = 0;
scb->cmd.config_sync.reserved2 = 0;
scb->cmd.config_sync.reserved3 = 0;
/* issue command */
if (((ret =
ips_send_wait(ha, scb, ips_reset_timeout, intr)) == IPS_FAILURE)
|| (ret == IPS_SUCCESS_IMM)
|| ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
return (0);
/* send unlock stripe command */
ips_init_scb(ha, scb);
scb->cdb[0] = IPS_CMD_ERROR_TABLE;
scb->timeout = ips_reset_timeout;
scb->cmd.unlock_stripe.op_code = IPS_CMD_ERROR_TABLE;
scb->cmd.unlock_stripe.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.unlock_stripe.log_drv = 0;
scb->cmd.unlock_stripe.control = IPS_CSL;
scb->cmd.unlock_stripe.reserved = 0;
scb->cmd.unlock_stripe.reserved2 = 0;
scb->cmd.unlock_stripe.reserved3 = 0;
/* issue command */
if (((ret =
ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
|| (ret == IPS_SUCCESS_IMM)
|| ((scb->basic_status & IPS_GSC_STATUS_MASK) > 1))
return (0);
return (1);
}
/****************************************************************************/
/* */
/* Routine Name: ips_ffdc_reset */
/* */
/* Routine Description: */
/* */
/* FFDC: write reset info */
/* */
/****************************************************************************/
static void
ips_ffdc_reset(ips_ha_t * ha, int intr)
{
ips_scb_t *scb;
METHOD_TRACE("ips_ffdc_reset", 1);
scb = &ha->scbs[ha->max_cmds - 1];
ips_init_scb(ha, scb);
scb->timeout = ips_cmd_timeout;
scb->cdb[0] = IPS_CMD_FFDC;
scb->cmd.ffdc.op_code = IPS_CMD_FFDC;
scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.ffdc.reset_count = ha->reset_count;
scb->cmd.ffdc.reset_type = 0x80;
/* convert time to what the card wants */
ips_fix_ffdc_time(ha, scb, ha->last_ffdc);
/* issue command */
ips_send_wait(ha, scb, ips_cmd_timeout, intr);
}
/****************************************************************************/
/* */
/* Routine Name: ips_ffdc_time */
/* */
/* Routine Description: */
/* */
/* FFDC: write time info */
/* */
/****************************************************************************/
static void
ips_ffdc_time(ips_ha_t * ha)
{
ips_scb_t *scb;
METHOD_TRACE("ips_ffdc_time", 1);
DEBUG_VAR(1, "(%s%d) Sending time update.", ips_name, ha->host_num);
scb = &ha->scbs[ha->max_cmds - 1];
ips_init_scb(ha, scb);
scb->timeout = ips_cmd_timeout;
scb->cdb[0] = IPS_CMD_FFDC;
scb->cmd.ffdc.op_code = IPS_CMD_FFDC;
scb->cmd.ffdc.command_id = IPS_COMMAND_ID(ha, scb);
scb->cmd.ffdc.reset_count = 0;
scb->cmd.ffdc.reset_type = 0;
/* convert time to what the card wants */
ips_fix_ffdc_time(ha, scb, ha->last_ffdc);
/* issue command */
ips_send_wait(ha, scb, ips_cmd_timeout, IPS_FFDC);
}
/****************************************************************************/
/* */
/* Routine Name: ips_fix_ffdc_time */
/* */
/* Routine Description: */
/* Adjust time_t to what the card wants */
/* */
/****************************************************************************/
static void
ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time_t current_time)
{
long days;
long rem;
int i;
int year;
int yleap;
int year_lengths[2] = { IPS_DAYS_NORMAL_YEAR, IPS_DAYS_LEAP_YEAR };
int month_lengths[12][2] = { {31, 31},
{28, 29},
{31, 31},
{30, 30},
{31, 31},
{30, 30},
{31, 31},
{31, 31},
{30, 30},
{31, 31},
{30, 30},
{31, 31}
};
METHOD_TRACE("ips_fix_ffdc_time", 1);
days = current_time / IPS_SECS_DAY;
rem = current_time % IPS_SECS_DAY;
scb->cmd.ffdc.hour = (rem / IPS_SECS_HOUR);
rem = rem % IPS_SECS_HOUR;
scb->cmd.ffdc.minute = (rem / IPS_SECS_MIN);
scb->cmd.ffdc.second = (rem % IPS_SECS_MIN);
year = IPS_EPOCH_YEAR;
while (days < 0 || days >= year_lengths[yleap = IPS_IS_LEAP_YEAR(year)]) {
int newy;
newy = year + (days / IPS_DAYS_NORMAL_YEAR);
if (days < 0)
--newy;
days -= (newy - year) * IPS_DAYS_NORMAL_YEAR +
IPS_NUM_LEAP_YEARS_THROUGH(newy - 1) -
IPS_NUM_LEAP_YEARS_THROUGH(year - 1);
year = newy;
}
scb->cmd.ffdc.yearH = year / 100;
scb->cmd.ffdc.yearL = year % 100;
for (i = 0; days >= month_lengths[i][yleap]; ++i)
days -= month_lengths[i][yleap];
scb->cmd.ffdc.month = i + 1;
scb->cmd.ffdc.day = days + 1;
}
/****************************************************************************
* BIOS Flash Routines *
****************************************************************************/
/****************************************************************************/
/* */
/* Routine Name: ips_erase_bios */
/* */
/* Routine Description: */
/* Erase the BIOS on the adapter */
/* */
/****************************************************************************/
static int
ips_erase_bios(ips_ha_t * ha)
{
int timeout;
uint8_t status = 0;
METHOD_TRACE("ips_erase_bios", 1);
status = 0;
/* Clear the status register */
outl(0, ha->io_addr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
outb(0x50, ha->io_addr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* Erase Setup */
outb(0x20, ha->io_addr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* Erase Confirm */
outb(0xD0, ha->io_addr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* Erase Status */
outb(0x70, ha->io_addr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
timeout = 80000; /* 80 seconds */
while (timeout > 0) {
if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
outl(0, ha->io_addr + IPS_REG_FLAP);
udelay(25); /* 25 us */
}
status = inb(ha->io_addr + IPS_REG_FLDP);
if (status & 0x80)
break;
MDELAY(1);
timeout--;
}
/* check for timeout */
if (timeout <= 0) {
/* timeout */
/* try to suspend the erase */
outb(0xB0, ha->io_addr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* wait for 10 seconds */
timeout = 10000;
while (timeout > 0) {
if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
outl(0, ha->io_addr + IPS_REG_FLAP);
udelay(25); /* 25 us */
}
status = inb(ha->io_addr + IPS_REG_FLDP);
if (status & 0xC0)
break;
MDELAY(1);
timeout--;
}
return (1);
}
/* check for valid VPP */
if (status & 0x08)
/* VPP failure */
return (1);
/* check for successful flash */
if (status & 0x30)
/* sequence error */
return (1);
/* Otherwise, we were successful */
/* clear status */
outb(0x50, ha->io_addr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* enable reads */
outb(0xFF, ha->io_addr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (0);
}
/****************************************************************************/
/* */
/* Routine Name: ips_erase_bios_memio */
/* */
/* Routine Description: */
/* Erase the BIOS on the adapter */
/* */
/****************************************************************************/
static int
ips_erase_bios_memio(ips_ha_t * ha)
{
int timeout;
uint8_t status;
METHOD_TRACE("ips_erase_bios_memio", 1);
status = 0;
/* Clear the status register */
writel(0, ha->mem_ptr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* Erase Setup */
writeb(0x20, ha->mem_ptr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* Erase Confirm */
writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* Erase Status */
writeb(0x70, ha->mem_ptr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
timeout = 80000; /* 80 seconds */
while (timeout > 0) {
if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
writel(0, ha->mem_ptr + IPS_REG_FLAP);
udelay(25); /* 25 us */
}
status = readb(ha->mem_ptr + IPS_REG_FLDP);
if (status & 0x80)
break;
MDELAY(1);
timeout--;
}
/* check for timeout */
if (timeout <= 0) {
/* timeout */
/* try to suspend the erase */
writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* wait for 10 seconds */
timeout = 10000;
while (timeout > 0) {
if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
writel(0, ha->mem_ptr + IPS_REG_FLAP);
udelay(25); /* 25 us */
}
status = readb(ha->mem_ptr + IPS_REG_FLDP);
if (status & 0xC0)
break;
MDELAY(1);
timeout--;
}
return (1);
}
/* check for valid VPP */
if (status & 0x08)
/* VPP failure */
return (1);
/* check for successful flash */
if (status & 0x30)
/* sequence error */
return (1);
/* Otherwise, we were successful */
/* clear status */
writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* enable reads */
writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (0);
}
/****************************************************************************/
/* */
/* Routine Name: ips_program_bios */
/* */
/* Routine Description: */
/* Program the BIOS on the adapter */
/* */
/****************************************************************************/
static int
ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
uint32_t offset)
{
int i;
int timeout;
uint8_t status = 0;
METHOD_TRACE("ips_program_bios", 1);
status = 0;
for (i = 0; i < buffersize; i++) {
/* write a byte */
outl(i + offset, ha->io_addr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
outb(0x40, ha->io_addr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
outb(buffer[i], ha->io_addr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* wait up to one second */
timeout = 1000;
while (timeout > 0) {
if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
outl(0, ha->io_addr + IPS_REG_FLAP);
udelay(25); /* 25 us */
}
status = inb(ha->io_addr + IPS_REG_FLDP);
if (status & 0x80)
break;
MDELAY(1);
timeout--;
}
if (timeout == 0) {
/* timeout error */
outl(0, ha->io_addr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
outb(0xFF, ha->io_addr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (1);
}
/* check the status */
if (status & 0x18) {
/* programming error */
outl(0, ha->io_addr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
outb(0xFF, ha->io_addr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (1);
}
} /* end for */
/* Enable reading */
outl(0, ha->io_addr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
outb(0xFF, ha->io_addr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (0);
}
/****************************************************************************/
/* */
/* Routine Name: ips_program_bios_memio */
/* */
/* Routine Description: */
/* Program the BIOS on the adapter */
/* */
/****************************************************************************/
static int
ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
uint32_t offset)
{
int i;
int timeout;
uint8_t status = 0;
METHOD_TRACE("ips_program_bios_memio", 1);
status = 0;
for (i = 0; i < buffersize; i++) {
/* write a byte */
writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
writeb(0x40, ha->mem_ptr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* wait up to one second */
timeout = 1000;
while (timeout > 0) {
if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
writel(0, ha->mem_ptr + IPS_REG_FLAP);
udelay(25); /* 25 us */
}
status = readb(ha->mem_ptr + IPS_REG_FLDP);
if (status & 0x80)
break;
MDELAY(1);
timeout--;
}
if (timeout == 0) {
/* timeout error */
writel(0, ha->mem_ptr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (1);
}
/* check the status */
if (status & 0x18) {
/* programming error */
writel(0, ha->mem_ptr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (1);
}
} /* end for */
/* Enable reading */
writel(0, ha->mem_ptr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (0);
}
/****************************************************************************/
/* */
/* Routine Name: ips_verify_bios */
/* */
/* Routine Description: */
/* Verify the BIOS on the adapter */
/* */
/****************************************************************************/
static int
ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
uint32_t offset)
{
uint8_t checksum;
int i;
METHOD_TRACE("ips_verify_bios", 1);
/* test 1st byte */
outl(0, ha->io_addr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
return (1);
outl(1, ha->io_addr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
return (1);
checksum = 0xff;
for (i = 2; i < buffersize; i++) {
outl(i + offset, ha->io_addr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP);
}
if (checksum != 0)
/* failure */
return (1);
else
/* success */
return (0);
}
/****************************************************************************/
/* */
/* Routine Name: ips_verify_bios_memio */
/* */
/* Routine Description: */
/* Verify the BIOS on the adapter */
/* */
/****************************************************************************/
static int
ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
uint32_t offset)
{
uint8_t checksum;
int i;
METHOD_TRACE("ips_verify_bios_memio", 1);
/* test 1st byte */
writel(0, ha->mem_ptr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
return (1);
writel(1, ha->mem_ptr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
return (1);
checksum = 0xff;
for (i = 2; i < buffersize; i++) {
writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
checksum =
(uint8_t) checksum + readb(ha->mem_ptr + IPS_REG_FLDP);
}
if (checksum != 0)
/* failure */
return (1);
else
/* success */
return (0);
}
/****************************************************************************/
/* */
/* Routine Name: ips_abort_init */
/* */
/* Routine Description: */
/* cleanup routine for a failed adapter initialization */
/****************************************************************************/
static int
ips_abort_init(ips_ha_t * ha, int index)
{
ha->active = 0;
ips_free(ha);
ips_ha[index] = NULL;
ips_sh[index] = NULL;
return -1;
}
/****************************************************************************/
/* */
/* Routine Name: ips_shift_controllers */
/* */
/* Routine Description: */
/* helper function for ordering adapters */
/****************************************************************************/
static void
ips_shift_controllers(int lowindex, int highindex)
{
ips_ha_t *ha_sav = ips_ha[highindex];
struct Scsi_Host *sh_sav = ips_sh[highindex];
int i;
for (i = highindex; i > lowindex; i--) {
ips_ha[i] = ips_ha[i - 1];
ips_sh[i] = ips_sh[i - 1];
ips_ha[i]->host_num = i;
}
ha_sav->host_num = lowindex;
ips_ha[lowindex] = ha_sav;
ips_sh[lowindex] = sh_sav;
}
/****************************************************************************/
/* */
/* Routine Name: ips_order_controllers */
/* */
/* Routine Description: */
/* place controllers is the "proper" boot order */
/****************************************************************************/
static void
ips_order_controllers(void)
{
int i, j, tmp, position = 0;
IPS_NVRAM_P5 *nvram;
if (!ips_ha[0])
return;
nvram = ips_ha[0]->nvram;
if (nvram->adapter_order[0]) {
for (i = 1; i <= nvram->adapter_order[0]; i++) {
for (j = position; j < ips_num_controllers; j++) {
switch (ips_ha[j]->ad_type) {
case IPS_ADTYPE_SERVERAID6M:
case IPS_ADTYPE_SERVERAID7M:
if (nvram->adapter_order[i] == 'M') {
ips_shift_controllers(position,
j);
position++;
}
break;
case IPS_ADTYPE_SERVERAID4L:
case IPS_ADTYPE_SERVERAID4M:
case IPS_ADTYPE_SERVERAID4MX:
case IPS_ADTYPE_SERVERAID4LX:
if (nvram->adapter_order[i] == 'N') {
ips_shift_controllers(position,
j);
position++;
}
break;
case IPS_ADTYPE_SERVERAID6I:
case IPS_ADTYPE_SERVERAID5I2:
case IPS_ADTYPE_SERVERAID5I1:
case IPS_ADTYPE_SERVERAID7k:
if (nvram->adapter_order[i] == 'S') {
ips_shift_controllers(position,
j);
position++;
}
break;
case IPS_ADTYPE_SERVERAID:
case IPS_ADTYPE_SERVERAID2:
case IPS_ADTYPE_NAVAJO:
case IPS_ADTYPE_KIOWA:
case IPS_ADTYPE_SERVERAID3L:
case IPS_ADTYPE_SERVERAID3:
case IPS_ADTYPE_SERVERAID4H:
if (nvram->adapter_order[i] == 'A') {
ips_shift_controllers(position,
j);
position++;
}
break;
default:
break;
}
}
}
/* if adapter_order[0], then ordering is complete */
return;
}
/* old bios, use older ordering */
tmp = 0;
for (i = position; i < ips_num_controllers; i++) {
if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I2 ||
ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID5I1) {
ips_shift_controllers(position, i);
position++;
tmp = 1;
}
}
/* if there were no 5I cards, then don't do any extra ordering */
if (!tmp)
return;
for (i = position; i < ips_num_controllers; i++) {
if (ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4L ||
ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4M ||
ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4LX ||
ips_ha[i]->ad_type == IPS_ADTYPE_SERVERAID4MX) {
ips_shift_controllers(position, i);
position++;
}
}
return;
}
/****************************************************************************/
/* */
/* Routine Name: ips_register_scsi */
/* */
/* Routine Description: */
/* perform any registration and setup with the scsi layer */
/****************************************************************************/
static int
ips_register_scsi(int index)
{
struct Scsi_Host *sh;
ips_ha_t *ha, *oldha = ips_ha[index];
sh = scsi_host_alloc(&ips_driver_template, sizeof (ips_ha_t));
if (!sh) {
IPS_PRINTK(KERN_WARNING, oldha->pcidev,
"Unable to register controller with SCSI subsystem\n");
return -1;
}
ha = IPS_HA(sh);
memcpy(ha, oldha, sizeof (ips_ha_t));
free_irq(oldha->pcidev->irq, oldha);
/* Install the interrupt handler with the new ha */
if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Unable to install interrupt handler\n");
goto err_out_sh;
}
kfree(oldha);
/* Store away needed values for later use */
sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr;
sh->sg_tablesize = sh->hostt->sg_tablesize;
sh->can_queue = sh->hostt->can_queue;
sh->cmd_per_lun = sh->hostt->cmd_per_lun;
sh->use_clustering = sh->hostt->use_clustering;
sh->max_sectors = 128;
sh->max_id = ha->ntargets;
sh->max_lun = ha->nlun;
sh->max_channel = ha->nbus - 1;
sh->can_queue = ha->max_cmds - 1;
if (scsi_add_host(sh, &ha->pcidev->dev))
goto err_out;
ips_sh[index] = sh;
ips_ha[index] = ha;
scsi_scan_host(sh);
return 0;
err_out:
free_irq(ha->pcidev->irq, ha);
err_out_sh:
scsi_host_put(sh);
return -1;
}
/*---------------------------------------------------------------------------*/
/* Routine Name: ips_remove_device */
/* */
/* Routine Description: */
/* Remove one Adapter ( Hot Plugging ) */
/*---------------------------------------------------------------------------*/
static void __devexit
ips_remove_device(struct pci_dev *pci_dev)
{
struct Scsi_Host *sh = pci_get_drvdata(pci_dev);
pci_set_drvdata(pci_dev, NULL);
ips_release(sh);
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
}
/****************************************************************************/
/* */
/* Routine Name: ips_module_init */
/* */
/* Routine Description: */
/* function called on module load */
/****************************************************************************/
static int __init
ips_module_init(void)
{
if (pci_register_driver(&ips_pci_driver) < 0)
return -ENODEV;
ips_driver_template.module = THIS_MODULE;
ips_order_controllers();
if (!ips_detect(&ips_driver_template)) {
pci_unregister_driver(&ips_pci_driver);
return -ENODEV;
}
register_reboot_notifier(&ips_notifier);
return 0;
}
/****************************************************************************/
/* */
/* Routine Name: ips_module_exit */
/* */
/* Routine Description: */
/* function called on module unload */
/****************************************************************************/
static void __exit
ips_module_exit(void)
{
pci_unregister_driver(&ips_pci_driver);
unregister_reboot_notifier(&ips_notifier);
}
module_init(ips_module_init);
module_exit(ips_module_exit);
/*---------------------------------------------------------------------------*/
/* Routine Name: ips_insert_device */
/* */
/* Routine Description: */
/* Add One Adapter ( Hot Plug ) */
/* */
/* Return Value: */
/* 0 if Successful, else non-zero */
/*---------------------------------------------------------------------------*/
static int __devinit
ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent)
{
int index = -1;
int rc;
METHOD_TRACE("ips_insert_device", 1);
rc = pci_enable_device(pci_dev);
if (rc)
return rc;
rc = pci_request_regions(pci_dev, "ips");
if (rc)
goto err_out;
rc = ips_init_phase1(pci_dev, &index);
if (rc == SUCCESS)
rc = ips_init_phase2(index);
if (ips_hotplug)
if (ips_register_scsi(index)) {
ips_free(ips_ha[index]);
rc = -1;
}
if (rc == SUCCESS)
ips_num_controllers++;
ips_next_controller = ips_num_controllers;
if (rc < 0) {
rc = -ENODEV;
goto err_out_regions;
}
pci_set_drvdata(pci_dev, ips_sh[index]);
return 0;
err_out_regions:
pci_release_regions(pci_dev);
err_out:
pci_disable_device(pci_dev);
return rc;
}
/*---------------------------------------------------------------------------*/
/* Routine Name: ips_init_phase1 */
/* */
/* Routine Description: */
/* Adapter Initialization */
/* */
/* Return Value: */
/* 0 if Successful, else non-zero */
/*---------------------------------------------------------------------------*/
static int
ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
{
ips_ha_t *ha;
uint32_t io_addr;
uint32_t mem_addr;
uint32_t io_len;
uint32_t mem_len;
uint8_t bus;
uint8_t func;
int j;
int index;
dma_addr_t dma_address;
char __iomem *ioremap_ptr;
char __iomem *mem_ptr;
uint32_t IsDead;
METHOD_TRACE("ips_init_phase1", 1);
index = IPS_MAX_ADAPTERS;
for (j = 0; j < IPS_MAX_ADAPTERS; j++) {
if (ips_ha[j] == NULL) {
index = j;
break;
}
}
if (index >= IPS_MAX_ADAPTERS)
return -1;
/* stuff that we get in dev */
bus = pci_dev->bus->number;
func = pci_dev->devfn;
/* Init MEM/IO addresses to 0 */
mem_addr = 0;
io_addr = 0;
mem_len = 0;
io_len = 0;
for (j = 0; j < 2; j++) {
if (!pci_resource_start(pci_dev, j))
break;
if (pci_resource_flags(pci_dev, j) & IORESOURCE_IO) {
io_addr = pci_resource_start(pci_dev, j);
io_len = pci_resource_len(pci_dev, j);
} else {
mem_addr = pci_resource_start(pci_dev, j);
mem_len = pci_resource_len(pci_dev, j);
}
}
/* setup memory mapped area (if applicable) */
if (mem_addr) {
uint32_t base;
uint32_t offs;
base = mem_addr & PAGE_MASK;
offs = mem_addr - base;
ioremap_ptr = ioremap(base, PAGE_SIZE);
if (!ioremap_ptr)
return -1;
mem_ptr = ioremap_ptr + offs;
} else {
ioremap_ptr = NULL;
mem_ptr = NULL;
}
/* found a controller */
ha = kzalloc(sizeof (ips_ha_t), GFP_KERNEL);
if (ha == NULL) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate temporary ha struct\n");
return -1;
}
ips_sh[index] = NULL;
ips_ha[index] = ha;
ha->active = 1;
/* Store info in HA structure */
ha->io_addr = io_addr;
ha->io_len = io_len;
ha->mem_addr = mem_addr;
ha->mem_len = mem_len;
ha->mem_ptr = mem_ptr;
ha->ioremap_ptr = ioremap_ptr;
ha->host_num = (uint32_t) index;
ha->slot_num = PCI_SLOT(pci_dev->devfn);
ha->pcidev = pci_dev;
/*
* Set the pci_dev's dma_mask. Not all adapters support 64bit
* addressing so don't enable it if the adapter can't support
* it! Also, don't use 64bit addressing if dma addresses
* are guaranteed to be < 4G.
*/
if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
!pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(64))) {
(ha)->flags |= IPS_HA_ENH_SG;
} else {
if (pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(32)) != 0) {
printk(KERN_WARNING "Unable to set DMA Mask\n");
return ips_abort_init(ha, index);
}
}
if(ips_cd_boot && !ips_FlashData){
ips_FlashData = pci_alloc_consistent(pci_dev, PAGE_SIZE << 7,
&ips_flashbusaddr);
}
ha->enq = pci_alloc_consistent(pci_dev, sizeof (IPS_ENQ),
&ha->enq_busaddr);
if (!ha->enq) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate host inquiry structure\n");
return ips_abort_init(ha, index);
}
ha->adapt = pci_alloc_consistent(pci_dev, sizeof (IPS_ADAPTER) +
sizeof (IPS_IO_CMD), &dma_address);
if (!ha->adapt) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate host adapt & dummy structures\n");
return ips_abort_init(ha, index);
}
ha->adapt->hw_status_start = dma_address;
ha->dummy = (void *) (ha->adapt + 1);
ha->logical_drive_info = pci_alloc_consistent(pci_dev, sizeof (IPS_LD_INFO), &dma_address);
if (!ha->logical_drive_info) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate logical drive info structure\n");
return ips_abort_init(ha, index);
}
ha->logical_drive_info_dma_addr = dma_address;
ha->conf = kmalloc(sizeof (IPS_CONF), GFP_KERNEL);
if (!ha->conf) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate host conf structure\n");
return ips_abort_init(ha, index);
}
ha->nvram = kmalloc(sizeof (IPS_NVRAM_P5), GFP_KERNEL);
if (!ha->nvram) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate host NVRAM structure\n");
return ips_abort_init(ha, index);
}
ha->subsys = kmalloc(sizeof (IPS_SUBSYS), GFP_KERNEL);
if (!ha->subsys) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate host subsystem structure\n");
return ips_abort_init(ha, index);
}
/* the ioctl buffer is now used during adapter initialization, so its
* successful allocation is now required */
if (ips_ioctlsize < PAGE_SIZE)
ips_ioctlsize = PAGE_SIZE;
ha->ioctl_data = pci_alloc_consistent(pci_dev, ips_ioctlsize,
&ha->ioctl_busaddr);
ha->ioctl_len = ips_ioctlsize;
if (!ha->ioctl_data) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate IOCTL data\n");
return ips_abort_init(ha, index);
}
/*
* Setup Functions
*/
ips_setup_funclist(ha);
if ((IPS_IS_MORPHEUS(ha)) || (IPS_IS_MARCO(ha))) {
/* If Morpheus appears dead, reset it */
IsDead = readl(ha->mem_ptr + IPS_REG_I960_MSG1);
if (IsDead == 0xDEADBEEF) {
ips_reset_morpheus(ha);
}
}
/*
* Initialize the card if it isn't already
*/
if (!(*ha->func.isinit) (ha)) {
if (!(*ha->func.init) (ha)) {
/*
* Initialization failed
*/
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to initialize controller\n");
return ips_abort_init(ha, index);
}
}
*indexPtr = index;
return SUCCESS;
}
/*---------------------------------------------------------------------------*/
/* Routine Name: ips_init_phase2 */
/* */
/* Routine Description: */
/* Adapter Initialization Phase 2 */
/* */
/* Return Value: */
/* 0 if Successful, else non-zero */
/*---------------------------------------------------------------------------*/
static int
ips_init_phase2(int index)
{
ips_ha_t *ha;
ha = ips_ha[index];
METHOD_TRACE("ips_init_phase2", 1);
if (!ha->active) {
ips_ha[index] = NULL;
return -1;
}
/* Install the interrupt handler */
if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Unable to install interrupt handler\n");
return ips_abort_init(ha, index);
}
/*
* Allocate a temporary SCB for initialization
*/
ha->max_cmds = 1;
if (!ips_allocatescbs(ha)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Unable to allocate a CCB\n");
free_irq(ha->pcidev->irq, ha);
return ips_abort_init(ha, index);
}
if (!ips_hainit(ha)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Unable to initialize controller\n");
free_irq(ha->pcidev->irq, ha);
return ips_abort_init(ha, index);
}
/* Free the temporary SCB */
ips_deallocatescbs(ha, 1);
/* allocate CCBs */
if (!ips_allocatescbs(ha)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Unable to allocate CCBs\n");
free_irq(ha->pcidev->irq, ha);
return ips_abort_init(ha, index);
}
return SUCCESS;
}
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING);
MODULE_VERSION(IPS_VER_STRING);
/*
* Overrides for Emacs so that we almost follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-indent-level: 2
* c-brace-imaginary-offset: 0
* c-brace-offset: -2
* c-argdecl-indent: 2
* c-label-offset: -2
* c-continued-statement-offset: 2
* c-continued-brace-offset: 0
* indent-tabs-mode: nil
* tab-width: 8
* End:
*/
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.