repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
refusebt/WinObjC | deps/3rdparty/iculegacy/source/layout/MorphTables.cpp | 178 | 2813 | /*
* %W% %W%
*
* (C) Copyright IBM Corp. 1998 - 2004 - All Rights Reserved
*
*/
#include "LETypes.h"
#include "LayoutTables.h"
#include "MorphTables.h"
#include "SubtableProcessor.h"
#include "IndicRearrangementProcessor.h"
#include "ContextualGlyphSubstProc.h"
#include "LigatureSubstProc.h"
#include "NonContextualGlyphSubstProc.h"
//#include "ContextualGlyphInsertionProcessor.h"
#include "LEGlyphStorage.h"
#include "LESwaps.h"
U_NAMESPACE_BEGIN
void MorphTableHeader::process(LEGlyphStorage &glyphStorage) const
{
const ChainHeader *chainHeader = chains;
le_uint32 chainCount = SWAPL(this->nChains);
le_uint32 chain;
for (chain = 0; chain < chainCount; chain += 1) {
FeatureFlags defaultFlags = SWAPL(chainHeader->defaultFlags);
le_uint32 chainLength = SWAPL(chainHeader->chainLength);
le_int16 nFeatureEntries = SWAPW(chainHeader->nFeatureEntries);
le_int16 nSubtables = SWAPW(chainHeader->nSubtables);
const MorphSubtableHeader *subtableHeader =
(const MorphSubtableHeader *)&chainHeader->featureTable[nFeatureEntries];
le_int16 subtable;
for (subtable = 0; subtable < nSubtables; subtable += 1) {
le_int16 length = SWAPW(subtableHeader->length);
SubtableCoverage coverage = SWAPW(subtableHeader->coverage);
FeatureFlags subtableFeatures = SWAPL(subtableHeader->subtableFeatures);
// should check coverage more carefully...
if ((coverage & scfVertical) == 0 && (subtableFeatures & defaultFlags) != 0) {
subtableHeader->process(glyphStorage);
}
subtableHeader = (const MorphSubtableHeader *) ((char *)subtableHeader + length);
}
chainHeader = (const ChainHeader *)((char *)chainHeader + chainLength);
}
}
void MorphSubtableHeader::process(LEGlyphStorage &glyphStorage) const
{
SubtableProcessor *processor = NULL;
switch (SWAPW(coverage) & scfTypeMask)
{
case mstIndicRearrangement:
processor = new IndicRearrangementProcessor(this);
break;
case mstContextualGlyphSubstitution:
processor = new ContextualGlyphSubstitutionProcessor(this);
break;
case mstLigatureSubstitution:
processor = new LigatureSubstitutionProcessor(this);
break;
case mstReservedUnused:
break;
case mstNonContextualGlyphSubstitution:
processor = NonContextualGlyphSubstitutionProcessor::createInstance(this);
break;
/*
case mstContextualGlyphInsertion:
processor = new ContextualGlyphInsertionProcessor(this);
break;
*/
default:
break;
}
if (processor != NULL) {
processor->process(glyphStorage);
delete processor;
}
}
U_NAMESPACE_END
| mit |
KevinShawn/WinObjC | deps/3rdparty/icu/icu/source/layout/ScriptAndLanguage.cpp | 185 | 3923 | /*
* (C) Copyright IBM Corp. 1998-2013 - All Rights Reserved
*
*/
#include "LETypes.h"
#include "OpenTypeTables.h"
#include "OpenTypeUtilities.h"
#include "ScriptAndLanguage.h"
#include "LESwaps.h"
U_NAMESPACE_BEGIN
LEReferenceTo<LangSysTable> ScriptTable::findLanguage(const LETableReference& base, LETag languageTag, LEErrorCode &success, le_bool exactMatch) const
{
le_uint16 count = SWAPW(langSysCount);
Offset langSysTableOffset = exactMatch? 0 : SWAPW(defaultLangSysTableOffset);
if (count > 0) {
LEReferenceToArrayOf<TagAndOffsetRecord> langSysRecords(base, success, langSysRecordArray, count);
Offset foundOffset =
OpenTypeUtilities::getTagOffset(languageTag, langSysRecords, success);
if (foundOffset != 0 && LE_SUCCESS(success)) {
langSysTableOffset = foundOffset;
}
}
if (langSysTableOffset != 0) {
return LEReferenceTo<LangSysTable>(base, success, langSysTableOffset);
}
return LEReferenceTo<LangSysTable>();
}
LEReferenceTo<ScriptTable> ScriptListTable::findScript(const LETableReference &base, LETag scriptTag, LEErrorCode &success) const
{
if (LE_FAILURE(success) ) {
return LEReferenceTo<ScriptTable>(); // get out
}
/*
* There are some fonts that have a large, bogus value for scriptCount. To try
* and protect against this, we use the offset in the first scriptRecord,
* which we know has to be past the end of the scriptRecordArray, to compute
* a value which is greater than or equal to the actual script count.
*
* Note: normally, the first offset will point to just after the scriptRecordArray,
* but there's no guarantee of this, only that it's *after* the scriptRecordArray.
* Because of this, a binary serach isn't safe, because the new count may include
* data that's not actually in the scriptRecordArray and hence the array will appear
* to be unsorted.
*/
le_uint16 count = SWAPW(scriptCount);
if (count == 0) {
return LEReferenceTo<ScriptTable>(); // no items, no search
}
// attempt to construct a ref with at least one element
LEReferenceToArrayOf<ScriptRecord> oneElementTable(base, success, &scriptRecordArray[0], 1);
if( LE_FAILURE(success) ) {
return LEReferenceTo<ScriptTable>(); // couldn't even read the first record - bad font.
}
le_uint16 limit = ((SWAPW(scriptRecordArray[0].offset) - sizeof(ScriptListTable)) / sizeof(scriptRecordArray)) + ANY_NUMBER;
Offset scriptTableOffset = 0;
if (count > limit) {
// the scriptCount value is bogus; do a linear search
// because limit may still be too large.
LEReferenceToArrayOf<ScriptRecord> scriptRecordArrayRef(base, success, &scriptRecordArray[0], limit);
for(le_int32 s = 0; (s < limit)&&LE_SUCCESS(success); s += 1) {
if (SWAPT(scriptRecordArrayRef(s,success).tag) == scriptTag) {
scriptTableOffset = SWAPW(scriptRecordArrayRef(s,success).offset);
break;
}
}
} else {
LEReferenceToArrayOf<ScriptRecord> scriptRecordArrayRef(base, success, &scriptRecordArray[0], count);
scriptTableOffset = OpenTypeUtilities::getTagOffset(scriptTag, scriptRecordArrayRef, success);
}
if (scriptTableOffset != 0) {
return LEReferenceTo<ScriptTable>(base, success, scriptTableOffset);
}
return LEReferenceTo<ScriptTable>();
}
LEReferenceTo<LangSysTable> ScriptListTable::findLanguage(const LETableReference &base, LETag scriptTag, LETag languageTag, LEErrorCode &success, le_bool exactMatch) const
{
const LEReferenceTo<ScriptTable> scriptTable = findScript(base, scriptTag, success);
if (scriptTable.isEmpty()) {
return LEReferenceTo<LangSysTable>();
}
return scriptTable->findLanguage(scriptTable, languageTag, success, exactMatch).reparent(base);
}
U_NAMESPACE_END
| mit |
bSr43/WinObjC | deps/3rdparty/icu/icu/source/common/uloc_keytype.cpp | 191 | 19209 | /*
**********************************************************************
* Copyright (C) 2014, International Business Machines
* Corporation and others. All Rights Reserved.
**********************************************************************
*/
#include "unicode/utypes.h"
#include "cstring.h"
#include "uassert.h"
#include "ucln_cmn.h"
#include "uhash.h"
#include "umutex.h"
#include "uresimp.h"
#include "uvector.h"
static UHashtable* gLocExtKeyMap = NULL;
static icu::UInitOnce gLocExtKeyMapInitOnce = U_INITONCE_INITIALIZER;
static icu::UVector* gKeyTypeStringPool = NULL;
static icu::UVector* gLocExtKeyDataEntries = NULL;
static icu::UVector* gLocExtTypeEntries = NULL;
// bit flags for special types
typedef enum {
SPECIALTYPE_NONE = 0,
SPECIALTYPE_CODEPOINTS = 1,
SPECIALTYPE_REORDER_CODE = 2
} SpecialType;
typedef struct LocExtKeyData {
const char* legacyId;
const char* bcpId;
UHashtable* typeMap;
uint32_t specialTypes;
} LocExtKeyData;
typedef struct LocExtType {
const char* legacyId;
const char* bcpId;
} LocExtType;
U_CDECL_BEGIN
static UBool U_CALLCONV
uloc_key_type_cleanup(void) {
if (gLocExtKeyMap != NULL) {
uhash_close(gLocExtKeyMap);
gLocExtKeyMap = NULL;
}
delete gLocExtKeyDataEntries;
gLocExtKeyDataEntries = NULL;
delete gLocExtTypeEntries;
gLocExtTypeEntries = NULL;
delete gKeyTypeStringPool;
gKeyTypeStringPool = NULL;
gLocExtKeyMapInitOnce.reset();
return TRUE;
}
static void U_CALLCONV
uloc_deleteKeyTypeStringPoolEntry(void* obj) {
uprv_free(obj);
}
static void U_CALLCONV
uloc_deleteKeyDataEntry(void* obj) {
LocExtKeyData* keyData = (LocExtKeyData*)obj;
if (keyData->typeMap != NULL) {
uhash_close(keyData->typeMap);
}
uprv_free(keyData);
}
static void U_CALLCONV
uloc_deleteTypeEntry(void* obj) {
uprv_free(obj);
}
U_CDECL_END
static void U_CALLCONV
initFromResourceBundle(UErrorCode& sts) {
U_NAMESPACE_USE
ucln_common_registerCleanup(UCLN_COMMON_LOCALE_KEY_TYPE, uloc_key_type_cleanup);
gLocExtKeyMap = uhash_open(uhash_hashIChars, uhash_compareIChars, NULL, &sts);
LocalUResourceBundlePointer keyTypeDataRes(ures_openDirect(NULL, "keyTypeData", &sts));
LocalUResourceBundlePointer keyMapRes(ures_getByKey(keyTypeDataRes.getAlias(), "keyMap", NULL, &sts));
LocalUResourceBundlePointer typeMapRes(ures_getByKey(keyTypeDataRes.getAlias(), "typeMap", NULL, &sts));
if (U_FAILURE(sts)) {
return;
}
UErrorCode tmpSts = U_ZERO_ERROR;
LocalUResourceBundlePointer typeAliasRes(ures_getByKey(keyTypeDataRes.getAlias(), "typeAlias", NULL, &tmpSts));
tmpSts = U_ZERO_ERROR;
LocalUResourceBundlePointer bcpTypeAliasRes(ures_getByKey(keyTypeDataRes.getAlias(), "bcpTypeAlias", NULL, &tmpSts));
// initialize vectors storing dynamically allocated objects
gKeyTypeStringPool = new UVector(uloc_deleteKeyTypeStringPoolEntry, NULL, sts);
if (gKeyTypeStringPool == NULL) {
if (U_SUCCESS(sts)) {
sts = U_MEMORY_ALLOCATION_ERROR;
}
}
if (U_FAILURE(sts)) {
return;
}
gLocExtKeyDataEntries = new UVector(uloc_deleteKeyDataEntry, NULL, sts);
if (gLocExtKeyDataEntries == NULL) {
if (U_SUCCESS(sts)) {
sts = U_MEMORY_ALLOCATION_ERROR;
}
}
if (U_FAILURE(sts)) {
return;
}
gLocExtTypeEntries = new UVector(uloc_deleteTypeEntry, NULL, sts);
if (gLocExtTypeEntries == NULL) {
if (U_SUCCESS(sts)) {
sts = U_MEMORY_ALLOCATION_ERROR;
}
}
if (U_FAILURE(sts)) {
return;
}
// iterate through keyMap resource
LocalUResourceBundlePointer keyMapEntry;
while (ures_hasNext(keyMapRes.getAlias())) {
keyMapEntry.adoptInstead(ures_getNextResource(keyMapRes.getAlias(), keyMapEntry.orphan(), &sts));
if (U_FAILURE(sts)) {
break;
}
const char* legacyKeyId = ures_getKey(keyMapEntry.getAlias());
int32_t bcpKeyIdLen = 0;
const UChar* uBcpKeyId = ures_getString(keyMapEntry.getAlias(), &bcpKeyIdLen, &sts);
if (U_FAILURE(sts)) {
break;
}
// empty value indicates that BCP key is same with the legacy key.
const char* bcpKeyId = legacyKeyId;
if (bcpKeyIdLen > 0) {
char* bcpKeyIdBuf = (char*)uprv_malloc(bcpKeyIdLen + 1);
if (bcpKeyIdBuf == NULL) {
sts = U_MEMORY_ALLOCATION_ERROR;
break;
}
u_UCharsToChars(uBcpKeyId, bcpKeyIdBuf, bcpKeyIdLen);
bcpKeyIdBuf[bcpKeyIdLen] = 0;
gKeyTypeStringPool->addElement(bcpKeyIdBuf, sts);
if (U_FAILURE(sts)) {
break;
}
bcpKeyId = bcpKeyIdBuf;
}
UBool isTZ = uprv_strcmp(legacyKeyId, "timezone") == 0;
UHashtable* typeDataMap = uhash_open(uhash_hashIChars, uhash_compareIChars, NULL, &sts);
if (U_FAILURE(sts)) {
break;
}
uint32_t specialTypes = SPECIALTYPE_NONE;
LocalUResourceBundlePointer typeAliasResByKey;
LocalUResourceBundlePointer bcpTypeAliasResByKey;
if (typeAliasRes.isValid()) {
tmpSts = U_ZERO_ERROR;
typeAliasResByKey.adoptInstead(ures_getByKey(typeAliasRes.getAlias(), legacyKeyId, NULL, &tmpSts));
if (U_FAILURE(tmpSts)) {
typeAliasResByKey.orphan();
}
}
if (bcpTypeAliasRes.isValid()) {
tmpSts = U_ZERO_ERROR;
bcpTypeAliasResByKey.adoptInstead(ures_getByKey(bcpTypeAliasRes.getAlias(), bcpKeyId, NULL, &tmpSts));
if (U_FAILURE(tmpSts)) {
bcpTypeAliasResByKey.orphan();
}
}
// look up type map for the key, and walk through the mapping data
tmpSts = U_ZERO_ERROR;
LocalUResourceBundlePointer typeMapResByKey(ures_getByKey(typeMapRes.getAlias(), legacyKeyId, NULL, &tmpSts));
if (U_FAILURE(tmpSts)) {
// type map for each key must exist
U_ASSERT(FALSE);
} else {
LocalUResourceBundlePointer typeMapEntry;
while (ures_hasNext(typeMapResByKey.getAlias())) {
typeMapEntry.adoptInstead(ures_getNextResource(typeMapResByKey.getAlias(), typeMapEntry.orphan(), &sts));
if (U_FAILURE(sts)) {
break;
}
const char* legacyTypeId = ures_getKey(typeMapEntry.getAlias());
// special types
if (uprv_strcmp(legacyTypeId, "CODEPOINTS") == 0) {
specialTypes |= SPECIALTYPE_CODEPOINTS;
continue;
}
if (uprv_strcmp(legacyTypeId, "REORDER_CODE") == 0) {
specialTypes |= SPECIALTYPE_REORDER_CODE;
continue;
}
if (isTZ) {
// a timezone key uses a colon instead of a slash in the resource.
// e.g. America:Los_Angeles
if (uprv_strchr(legacyTypeId, ':') != NULL) {
int32_t legacyTypeIdLen = uprv_strlen(legacyTypeId);
char* legacyTypeIdBuf = (char*)uprv_malloc(legacyTypeIdLen + 1);
if (legacyTypeIdBuf == NULL) {
sts = U_MEMORY_ALLOCATION_ERROR;
break;
}
const char* p = legacyTypeId;
char* q = legacyTypeIdBuf;
while (*p) {
if (*p == ':') {
*q++ = '/';
} else {
*q++ = *p;
}
p++;
}
*q = 0;
gKeyTypeStringPool->addElement(legacyTypeIdBuf, sts);
if (U_FAILURE(sts)) {
break;
}
legacyTypeId = legacyTypeIdBuf;
}
}
int32_t bcpTypeIdLen = 0;
const UChar* uBcpTypeId = ures_getString(typeMapEntry.getAlias(), &bcpTypeIdLen, &sts);
if (U_FAILURE(sts)) {
break;
}
// empty value indicates that BCP type is same with the legacy type.
const char* bcpTypeId = legacyTypeId;
if (bcpTypeIdLen > 0) {
char* bcpTypeIdBuf = (char*)uprv_malloc(bcpTypeIdLen + 1);
if (bcpTypeIdBuf == NULL) {
sts = U_MEMORY_ALLOCATION_ERROR;
break;
}
u_UCharsToChars(uBcpTypeId, bcpTypeIdBuf, bcpTypeIdLen);
bcpTypeIdBuf[bcpTypeIdLen] = 0;
gKeyTypeStringPool->addElement(bcpTypeIdBuf, sts);
if (U_FAILURE(sts)) {
break;
}
bcpTypeId = bcpTypeIdBuf;
}
// Note: legacy type value should never be
// equivalent to bcp type value of a different
// type under the same key. So we use a single
// map for lookup.
LocExtType* t = (LocExtType*)uprv_malloc(sizeof(LocExtType));
if (t == NULL) {
sts = U_MEMORY_ALLOCATION_ERROR;
break;
}
t->bcpId = bcpTypeId;
t->legacyId = legacyTypeId;
gLocExtTypeEntries->addElement((void*)t, sts);
if (U_FAILURE(sts)) {
break;
}
uhash_put(typeDataMap, (void*)legacyTypeId, t, &sts);
if (bcpTypeId != legacyTypeId) {
// different type value
uhash_put(typeDataMap, (void*)bcpTypeId, t, &sts);
}
if (U_FAILURE(sts)) {
break;
}
// also put aliases in the map
if (typeAliasResByKey.isValid()) {
LocalUResourceBundlePointer typeAliasDataEntry;
ures_resetIterator(typeAliasResByKey.getAlias());
while (ures_hasNext(typeAliasResByKey.getAlias()) && U_SUCCESS(sts)) {
int32_t toLen;
typeAliasDataEntry.adoptInstead(ures_getNextResource(typeAliasResByKey.getAlias(), typeAliasDataEntry.orphan(), &sts));
const UChar* to = ures_getString(typeAliasDataEntry.getAlias(), &toLen, &sts);
if (U_FAILURE(sts)) {
break;
}
// check if this is an alias of canoncal legacy type
if (uprv_compareInvAscii(NULL, legacyTypeId, -1, to, toLen) == 0) {
const char* from = ures_getKey(typeAliasDataEntry.getAlias());
if (isTZ) {
// replace colon with slash if necessary
if (uprv_strchr(from, ':') != NULL) {
int32_t fromLen = uprv_strlen(from);
char* fromBuf = (char*)uprv_malloc(fromLen + 1);
if (fromBuf == NULL) {
sts = U_MEMORY_ALLOCATION_ERROR;
break;
}
const char* p = from;
char* q = fromBuf;
while (*p) {
if (*p == ':') {
*q++ = '/';
} else {
*q++ = *p;
}
p++;
}
*q = 0;
gKeyTypeStringPool->addElement(fromBuf, sts);
if (U_FAILURE(sts)) {
break;
}
from = fromBuf;
}
}
uhash_put(typeDataMap, (void*)from, t, &sts);
}
}
if (U_FAILURE(sts)) {
break;
}
}
if (bcpTypeAliasResByKey.isValid()) {
LocalUResourceBundlePointer bcpTypeAliasDataEntry;
ures_resetIterator(bcpTypeAliasResByKey.getAlias());
while (ures_hasNext(bcpTypeAliasResByKey.getAlias()) && U_SUCCESS(sts)) {
int32_t toLen;
bcpTypeAliasDataEntry.adoptInstead(ures_getNextResource(bcpTypeAliasResByKey.getAlias(), bcpTypeAliasDataEntry.orphan(), &sts));
const UChar* to = ures_getString(bcpTypeAliasDataEntry.getAlias(), &toLen, &sts);
if (U_FAILURE(sts)) {
break;
}
// check if this is an alias of bcp type
if (uprv_compareInvAscii(NULL, bcpTypeId, -1, to, toLen) == 0) {
const char* from = ures_getKey(bcpTypeAliasDataEntry.getAlias());
uhash_put(typeDataMap, (void*)from, t, &sts);
}
}
if (U_FAILURE(sts)) {
break;
}
}
}
}
if (U_FAILURE(sts)) {
break;
}
LocExtKeyData* keyData = (LocExtKeyData*)uprv_malloc(sizeof(LocExtKeyData));
if (keyData == NULL) {
sts = U_MEMORY_ALLOCATION_ERROR;
break;
}
keyData->bcpId = bcpKeyId;
keyData->legacyId = legacyKeyId;
keyData->specialTypes = specialTypes;
keyData->typeMap = typeDataMap;
gLocExtKeyDataEntries->addElement((void*)keyData, sts);
if (U_FAILURE(sts)) {
break;
}
uhash_put(gLocExtKeyMap, (void*)legacyKeyId, keyData, &sts);
if (legacyKeyId != bcpKeyId) {
// different key value
uhash_put(gLocExtKeyMap, (void*)bcpKeyId, keyData, &sts);
}
if (U_FAILURE(sts)) {
break;
}
}
}
static UBool
init() {
UErrorCode sts = U_ZERO_ERROR;
umtx_initOnce(gLocExtKeyMapInitOnce, &initFromResourceBundle, sts);
if (U_FAILURE(sts)) {
return FALSE;
}
return TRUE;
}
static UBool
isSpecialTypeCodepoints(const char* val) {
int32_t subtagLen = 0;
const char* p = val;
while (*p) {
if (*p == '-') {
if (subtagLen < 4 || subtagLen > 6) {
return FALSE;
}
subtagLen = 0;
} else if ((*p >= '0' && *p <= '9') ||
(*p >= 'A' && *p <= 'F') || // A-F/a-f are contiguous
(*p >= 'a' && *p <= 'f')) { // also in EBCDIC
subtagLen++;
} else {
return FALSE;
}
p++;
}
return (subtagLen >= 4 && subtagLen <= 6);
}
static UBool
isSpecialTypeReorderCode(const char* val) {
int32_t subtagLen = 0;
const char* p = val;
while (*p) {
if (*p == '-') {
if (subtagLen < 3 || subtagLen > 8) {
return FALSE;
}
subtagLen = 0;
} else if (uprv_isASCIILetter(*p)) {
subtagLen++;
} else {
return FALSE;
}
p++;
}
return (subtagLen >=3 && subtagLen <=8);
}
U_CFUNC const char*
ulocimp_toBcpKey(const char* key) {
if (!init()) {
return NULL;
}
LocExtKeyData* keyData = (LocExtKeyData*)uhash_get(gLocExtKeyMap, key);
if (keyData != NULL) {
return keyData->bcpId;
}
return NULL;
}
U_CFUNC const char*
ulocimp_toLegacyKey(const char* key) {
if (!init()) {
return NULL;
}
LocExtKeyData* keyData = (LocExtKeyData*)uhash_get(gLocExtKeyMap, key);
if (keyData != NULL) {
return keyData->legacyId;
}
return NULL;
}
U_CFUNC const char*
ulocimp_toBcpType(const char* key, const char* type, UBool* isKnownKey, UBool* isSpecialType) {
if (isKnownKey != NULL) {
*isKnownKey = FALSE;
}
if (isSpecialType != NULL) {
*isSpecialType = FALSE;
}
if (!init()) {
return NULL;
}
LocExtKeyData* keyData = (LocExtKeyData*)uhash_get(gLocExtKeyMap, key);
if (keyData != NULL) {
if (isKnownKey != NULL) {
*isKnownKey = TRUE;
}
LocExtType* t = (LocExtType*)uhash_get(keyData->typeMap, type);
if (t != NULL) {
return t->bcpId;
}
if (keyData->specialTypes != SPECIALTYPE_NONE) {
UBool matched = FALSE;
if (keyData->specialTypes & SPECIALTYPE_CODEPOINTS) {
matched = isSpecialTypeCodepoints(type);
}
if (!matched && keyData->specialTypes & SPECIALTYPE_REORDER_CODE) {
matched = isSpecialTypeReorderCode(type);
}
if (matched) {
if (isSpecialType != NULL) {
*isSpecialType = TRUE;
}
return type;
}
}
}
return NULL;
}
U_CFUNC const char*
ulocimp_toLegacyType(const char* key, const char* type, UBool* isKnownKey, UBool* isSpecialType) {
if (isKnownKey != NULL) {
*isKnownKey = FALSE;
}
if (isSpecialType != NULL) {
*isSpecialType = FALSE;
}
if (!init()) {
return NULL;
}
LocExtKeyData* keyData = (LocExtKeyData*)uhash_get(gLocExtKeyMap, key);
if (keyData != NULL) {
if (isKnownKey != NULL) {
*isKnownKey = TRUE;
}
LocExtType* t = (LocExtType*)uhash_get(keyData->typeMap, type);
if (t != NULL) {
return t->legacyId;
}
if (keyData->specialTypes != SPECIALTYPE_NONE) {
UBool matched = FALSE;
if (keyData->specialTypes & SPECIALTYPE_CODEPOINTS) {
matched = isSpecialTypeCodepoints(type);
}
if (!matched && keyData->specialTypes & SPECIALTYPE_REORDER_CODE) {
matched = isSpecialTypeReorderCode(type);
}
if (matched) {
if (isSpecialType != NULL) {
*isSpecialType = TRUE;
}
return type;
}
}
}
return NULL;
}
| mit |
jabez1314/WinObjC | deps/3rdparty/icu/icu/source/i18n/numsys.cpp | 205 | 9678 | /*
*******************************************************************************
* Copyright (C) 2010-2013, International Business Machines Corporation and
* others. All Rights Reserved.
*******************************************************************************
*
*
* File NUMSYS.CPP
*
* Modification History:*
* Date Name Description
*
********************************************************************************
*/
#include "unicode/utypes.h"
#include "unicode/localpointer.h"
#include "unicode/uchar.h"
#include "unicode/unistr.h"
#include "unicode/ures.h"
#include "unicode/ustring.h"
#include "unicode/uloc.h"
#include "unicode/schriter.h"
#include "unicode/numsys.h"
#include "cstring.h"
#include "uresimp.h"
#include "numsys_impl.h"
#if !UCONFIG_NO_FORMATTING
U_NAMESPACE_BEGIN
// Useful constants
#define DEFAULT_DIGITS UNICODE_STRING_SIMPLE("0123456789");
static const char gNumberingSystems[] = "numberingSystems";
static const char gNumberElements[] = "NumberElements";
static const char gDefault[] = "default";
static const char gNative[] = "native";
static const char gTraditional[] = "traditional";
static const char gFinance[] = "finance";
static const char gDesc[] = "desc";
static const char gRadix[] = "radix";
static const char gAlgorithmic[] = "algorithmic";
static const char gLatn[] = "latn";
UOBJECT_DEFINE_RTTI_IMPLEMENTATION(NumberingSystem)
UOBJECT_DEFINE_RTTI_IMPLEMENTATION(NumsysNameEnumeration)
/**
* Default Constructor.
*
* @draft ICU 4.2
*/
NumberingSystem::NumberingSystem() {
radix = 10;
algorithmic = FALSE;
UnicodeString defaultDigits = DEFAULT_DIGITS;
desc.setTo(defaultDigits);
uprv_strcpy(name,gLatn);
}
/**
* Copy constructor.
* @draft ICU 4.2
*/
NumberingSystem::NumberingSystem(const NumberingSystem& other)
: UObject(other) {
*this=other;
}
NumberingSystem* U_EXPORT2
NumberingSystem::createInstance(int32_t radix_in, UBool isAlgorithmic_in, const UnicodeString & desc_in, UErrorCode &status) {
if (U_FAILURE(status)) {
return NULL;
}
if ( radix_in < 2 ) {
status = U_ILLEGAL_ARGUMENT_ERROR;
return NULL;
}
if ( !isAlgorithmic_in ) {
if ( desc_in.countChar32() != radix_in || !isValidDigitString(desc_in)) {
status = U_ILLEGAL_ARGUMENT_ERROR;
return NULL;
}
}
NumberingSystem *ns = new NumberingSystem();
ns->setRadix(radix_in);
ns->setDesc(desc_in);
ns->setAlgorithmic(isAlgorithmic_in);
ns->setName(NULL);
return ns;
}
NumberingSystem* U_EXPORT2
NumberingSystem::createInstance(const Locale & inLocale, UErrorCode& status) {
if (U_FAILURE(status)) {
return NULL;
}
UBool nsResolved = TRUE;
UBool usingFallback = FALSE;
char buffer[ULOC_KEYWORDS_CAPACITY];
int32_t count = inLocale.getKeywordValue("numbers",buffer, sizeof(buffer),status);
if ( count > 0 ) { // @numbers keyword was specified in the locale
buffer[count] = '\0'; // Make sure it is null terminated.
if ( !uprv_strcmp(buffer,gDefault) || !uprv_strcmp(buffer,gNative) ||
!uprv_strcmp(buffer,gTraditional) || !uprv_strcmp(buffer,gFinance)) {
nsResolved = FALSE;
}
} else {
uprv_strcpy(buffer,gDefault);
nsResolved = FALSE;
}
if (!nsResolved) { // Resolve the numbering system ( default, native, traditional or finance ) into a "real" numbering system
UErrorCode localStatus = U_ZERO_ERROR;
UResourceBundle *resource = ures_open(NULL, inLocale.getName(), &localStatus);
UResourceBundle *numberElementsRes = ures_getByKey(resource,gNumberElements,NULL,&localStatus);
while (!nsResolved) {
localStatus = U_ZERO_ERROR;
count = 0;
const UChar *nsName = ures_getStringByKeyWithFallback(numberElementsRes, buffer, &count, &localStatus);
if ( count > 0 && count < ULOC_KEYWORDS_CAPACITY ) { // numbering system found
u_UCharsToChars(nsName,buffer,count);
buffer[count] = '\0'; // Make sure it is null terminated.
nsResolved = TRUE;
}
if (!nsResolved) { // Fallback behavior per TR35 - traditional falls back to native, finance and native fall back to default
if (!uprv_strcmp(buffer,gNative) || !uprv_strcmp(buffer,gFinance)) {
uprv_strcpy(buffer,gDefault);
} else if (!uprv_strcmp(buffer,gTraditional)) {
uprv_strcpy(buffer,gNative);
} else { // If we get here we couldn't find even the default numbering system
usingFallback = TRUE;
nsResolved = TRUE;
}
}
}
ures_close(numberElementsRes);
ures_close(resource);
}
if (usingFallback) {
status = U_USING_FALLBACK_WARNING;
NumberingSystem *ns = new NumberingSystem();
return ns;
} else {
return NumberingSystem::createInstanceByName(buffer,status);
}
}
NumberingSystem* U_EXPORT2
NumberingSystem::createInstance(UErrorCode& status) {
return NumberingSystem::createInstance(Locale::getDefault(), status);
}
NumberingSystem* U_EXPORT2
NumberingSystem::createInstanceByName(const char *name, UErrorCode& status) {
UResourceBundle *numberingSystemsInfo = NULL;
UResourceBundle *nsTop, *nsCurrent;
int32_t radix = 10;
int32_t algorithmic = 0;
numberingSystemsInfo = ures_openDirect(NULL,gNumberingSystems, &status);
nsCurrent = ures_getByKey(numberingSystemsInfo,gNumberingSystems,NULL,&status);
nsTop = ures_getByKey(nsCurrent,name,NULL,&status);
UnicodeString nsd = ures_getUnicodeStringByKey(nsTop,gDesc,&status);
ures_getByKey(nsTop,gRadix,nsCurrent,&status);
radix = ures_getInt(nsCurrent,&status);
ures_getByKey(nsTop,gAlgorithmic,nsCurrent,&status);
algorithmic = ures_getInt(nsCurrent,&status);
UBool isAlgorithmic = ( algorithmic == 1 );
ures_close(nsCurrent);
ures_close(nsTop);
ures_close(numberingSystemsInfo);
if (U_FAILURE(status)) {
status = U_UNSUPPORTED_ERROR;
return NULL;
}
NumberingSystem* ns = NumberingSystem::createInstance(radix,isAlgorithmic,nsd,status);
ns->setName(name);
return ns;
}
/**
* Destructor.
* @draft ICU 4.2
*/
NumberingSystem::~NumberingSystem() {
}
int32_t NumberingSystem::getRadix() const {
return radix;
}
UnicodeString NumberingSystem::getDescription() const {
return desc;
}
const char * NumberingSystem::getName() const {
return name;
}
void NumberingSystem::setRadix(int32_t r) {
radix = r;
}
void NumberingSystem::setAlgorithmic(UBool c) {
algorithmic = c;
}
void NumberingSystem::setDesc(UnicodeString d) {
desc.setTo(d);
}
void NumberingSystem::setName(const char *n) {
if ( n == NULL ) {
name[0] = (char) 0;
} else {
uprv_strncpy(name,n,NUMSYS_NAME_CAPACITY);
name[NUMSYS_NAME_CAPACITY] = (char)0; // Make sure it is null terminated.
}
}
UBool NumberingSystem::isAlgorithmic() const {
return ( algorithmic );
}
StringEnumeration* NumberingSystem::getAvailableNames(UErrorCode &status) {
static StringEnumeration* availableNames = NULL;
if (U_FAILURE(status)) {
return NULL;
}
if ( availableNames == NULL ) {
UVector *fNumsysNames = new UVector(uprv_deleteUObject, NULL, status);
if (U_FAILURE(status)) {
status = U_MEMORY_ALLOCATION_ERROR;
return NULL;
}
UErrorCode rbstatus = U_ZERO_ERROR;
UResourceBundle *numberingSystemsInfo = ures_openDirect(NULL, "numberingSystems", &rbstatus);
numberingSystemsInfo = ures_getByKey(numberingSystemsInfo,"numberingSystems",numberingSystemsInfo,&rbstatus);
if(U_FAILURE(rbstatus)) {
status = U_MISSING_RESOURCE_ERROR;
ures_close(numberingSystemsInfo);
return NULL;
}
while ( ures_hasNext(numberingSystemsInfo) ) {
UResourceBundle *nsCurrent = ures_getNextResource(numberingSystemsInfo,NULL,&rbstatus);
const char *nsName = ures_getKey(nsCurrent);
fNumsysNames->addElement(new UnicodeString(nsName, -1, US_INV),status);
ures_close(nsCurrent);
}
ures_close(numberingSystemsInfo);
availableNames = new NumsysNameEnumeration(fNumsysNames,status);
}
return availableNames;
}
UBool NumberingSystem::isValidDigitString(const UnicodeString& str) {
StringCharacterIterator it(str);
UChar32 c;
int32_t i = 0;
for ( it.setToStart(); it.hasNext(); ) {
c = it.next32PostInc();
if ( c > 0xFFFF ) { // Digits outside the BMP are not currently supported
return FALSE;
}
i++;
}
return TRUE;
}
NumsysNameEnumeration::NumsysNameEnumeration(UVector *fNameList, UErrorCode& /*status*/) {
pos=0;
fNumsysNames = fNameList;
}
const UnicodeString*
NumsysNameEnumeration::snext(UErrorCode& status) {
if (U_SUCCESS(status) && pos < fNumsysNames->size()) {
return (const UnicodeString*)fNumsysNames->elementAt(pos++);
}
return NULL;
}
void
NumsysNameEnumeration::reset(UErrorCode& /*status*/) {
pos=0;
}
int32_t
NumsysNameEnumeration::count(UErrorCode& /*status*/) const {
return (fNumsysNames==NULL) ? 0 : fNumsysNames->size();
}
NumsysNameEnumeration::~NumsysNameEnumeration() {
delete fNumsysNames;
}
U_NAMESPACE_END
#endif /* #if !UCONFIG_NO_FORMATTING */
//eof
| mit |
tomasy23/evertonkrosnodart | drivers/builtin_openssl2/crypto/x509v3/pcy_cache.c | 727 | 7726 | /* pcy_cache.c */
/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL
* project 2004.
*/
/* ====================================================================
* Copyright (c) 2004 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* licensing@OpenSSL.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* (eay@cryptsoft.com). This product includes software written by Tim
* Hudson (tjh@cryptsoft.com).
*
*/
#include "cryptlib.h"
#include <openssl/x509.h>
#include <openssl/x509v3.h>
#include "pcy_int.h"
static int policy_data_cmp(const X509_POLICY_DATA * const *a,
const X509_POLICY_DATA * const *b);
static int policy_cache_set_int(long *out, ASN1_INTEGER *value);
/* Set cache entry according to CertificatePolicies extension.
* Note: this destroys the passed CERTIFICATEPOLICIES structure.
*/
static int policy_cache_create(X509 *x,
CERTIFICATEPOLICIES *policies, int crit)
{
int i;
int ret = 0;
X509_POLICY_CACHE *cache = x->policy_cache;
X509_POLICY_DATA *data = NULL;
POLICYINFO *policy;
if (sk_POLICYINFO_num(policies) == 0)
goto bad_policy;
cache->data = sk_X509_POLICY_DATA_new(policy_data_cmp);
if (!cache->data)
goto bad_policy;
for (i = 0; i < sk_POLICYINFO_num(policies); i++)
{
policy = sk_POLICYINFO_value(policies, i);
data = policy_data_new(policy, NULL, crit);
if (!data)
goto bad_policy;
/* Duplicate policy OIDs are illegal: reject if matches
* found.
*/
if (OBJ_obj2nid(data->valid_policy) == NID_any_policy)
{
if (cache->anyPolicy)
{
ret = -1;
goto bad_policy;
}
cache->anyPolicy = data;
}
else if (sk_X509_POLICY_DATA_find(cache->data, data) != -1)
{
ret = -1;
goto bad_policy;
}
else if (!sk_X509_POLICY_DATA_push(cache->data, data))
goto bad_policy;
data = NULL;
}
ret = 1;
bad_policy:
if (ret == -1)
x->ex_flags |= EXFLAG_INVALID_POLICY;
if (data)
policy_data_free(data);
sk_POLICYINFO_pop_free(policies, POLICYINFO_free);
if (ret <= 0)
{
sk_X509_POLICY_DATA_pop_free(cache->data, policy_data_free);
cache->data = NULL;
}
return ret;
}
static int policy_cache_new(X509 *x)
{
X509_POLICY_CACHE *cache;
ASN1_INTEGER *ext_any = NULL;
POLICY_CONSTRAINTS *ext_pcons = NULL;
CERTIFICATEPOLICIES *ext_cpols = NULL;
POLICY_MAPPINGS *ext_pmaps = NULL;
int i;
cache = OPENSSL_malloc(sizeof(X509_POLICY_CACHE));
if (!cache)
return 0;
cache->anyPolicy = NULL;
cache->data = NULL;
cache->any_skip = -1;
cache->explicit_skip = -1;
cache->map_skip = -1;
x->policy_cache = cache;
/* Handle requireExplicitPolicy *first*. Need to process this
* even if we don't have any policies.
*/
ext_pcons = X509_get_ext_d2i(x, NID_policy_constraints, &i, NULL);
if (!ext_pcons)
{
if (i != -1)
goto bad_cache;
}
else
{
if (!ext_pcons->requireExplicitPolicy
&& !ext_pcons->inhibitPolicyMapping)
goto bad_cache;
if (!policy_cache_set_int(&cache->explicit_skip,
ext_pcons->requireExplicitPolicy))
goto bad_cache;
if (!policy_cache_set_int(&cache->map_skip,
ext_pcons->inhibitPolicyMapping))
goto bad_cache;
}
/* Process CertificatePolicies */
ext_cpols = X509_get_ext_d2i(x, NID_certificate_policies, &i, NULL);
/* If no CertificatePolicies extension or problem decoding then
* there is no point continuing because the valid policies will be
* NULL.
*/
if (!ext_cpols)
{
/* If not absent some problem with extension */
if (i != -1)
goto bad_cache;
return 1;
}
i = policy_cache_create(x, ext_cpols, i);
/* NB: ext_cpols freed by policy_cache_set_policies */
if (i <= 0)
return i;
ext_pmaps = X509_get_ext_d2i(x, NID_policy_mappings, &i, NULL);
if (!ext_pmaps)
{
/* If not absent some problem with extension */
if (i != -1)
goto bad_cache;
}
else
{
i = policy_cache_set_mapping(x, ext_pmaps);
if (i <= 0)
goto bad_cache;
}
ext_any = X509_get_ext_d2i(x, NID_inhibit_any_policy, &i, NULL);
if (!ext_any)
{
if (i != -1)
goto bad_cache;
}
else if (!policy_cache_set_int(&cache->any_skip, ext_any))
goto bad_cache;
if (0)
{
bad_cache:
x->ex_flags |= EXFLAG_INVALID_POLICY;
}
if(ext_pcons)
POLICY_CONSTRAINTS_free(ext_pcons);
if (ext_any)
ASN1_INTEGER_free(ext_any);
return 1;
}
void policy_cache_free(X509_POLICY_CACHE *cache)
{
if (!cache)
return;
if (cache->anyPolicy)
policy_data_free(cache->anyPolicy);
if (cache->data)
sk_X509_POLICY_DATA_pop_free(cache->data, policy_data_free);
OPENSSL_free(cache);
}
const X509_POLICY_CACHE *policy_cache_set(X509 *x)
{
if (x->policy_cache == NULL)
{
CRYPTO_w_lock(CRYPTO_LOCK_X509);
policy_cache_new(x);
CRYPTO_w_unlock(CRYPTO_LOCK_X509);
}
return x->policy_cache;
}
X509_POLICY_DATA *policy_cache_find_data(const X509_POLICY_CACHE *cache,
const ASN1_OBJECT *id)
{
int idx;
X509_POLICY_DATA tmp;
tmp.valid_policy = (ASN1_OBJECT *)id;
idx = sk_X509_POLICY_DATA_find(cache->data, &tmp);
if (idx == -1)
return NULL;
return sk_X509_POLICY_DATA_value(cache->data, idx);
}
static int policy_data_cmp(const X509_POLICY_DATA * const *a,
const X509_POLICY_DATA * const *b)
{
return OBJ_cmp((*a)->valid_policy, (*b)->valid_policy);
}
static int policy_cache_set_int(long *out, ASN1_INTEGER *value)
{
if (value == NULL)
return 1;
if (value->type == V_ASN1_NEG_INTEGER)
return 0;
*out = ASN1_INTEGER_get(value);
return 1;
}
| mit |
jasonsanjose/brackets-sass | node/2.0.3/node_modules/node-sass/src/libsass/plugins.cpp | 228 | 5439 | #ifdef _WIN32
#include <windows.h>
#else
#include <sys/types.h>
#include <dirent.h>
#include <errno.h>
#include <dlfcn.h>
#endif
#include <iostream>
#include "output.hpp"
#include "plugins.hpp"
#define npos string::npos
namespace Sass {
Plugins::Plugins(void) { }
Plugins::~Plugins(void) { }
// check if plugin is compatible with this version
// plugins may be linked static against libsass
// we try to be compatible between major versions
inline bool compatibility(const char* their_version)
{
// const char* their_version = "3.1.2";
// first check if anyone has an unknown version
const char* our_version = libsass_version();
if (!strcmp(their_version, "[na]")) return false;
if (!strcmp(our_version, "[na]")) return false;
// find the position of the second dot
size_t pos = string(our_version).find('.', 0);
if (pos != npos) pos = string(our_version).find('.', pos + 1);
// if we do not have two dots we fallback to compare complete string
if (pos == npos) { return strcmp(their_version, our_version) ? 0 : 1; }
// otherwise only compare up to the second dot (major versions)
else { return strncmp(their_version, our_version, pos) ? 0 : 1; }
}
// load one specific plugin
bool Plugins::load_plugin (const string& path)
{
typedef const char* (*__plugin_version__)(void);
typedef Sass_Function_List (*__plugin_load_fns__)(void);
typedef Sass_Importer_List (*__plugin_load_imps__)(void);
if (LOAD_LIB(plugin, path))
{
// try to load initial function to query libsass version suppor
if (LOAD_LIB_FN(__plugin_version__, plugin_version, "libsass_get_version"))
{
// get the libsass version of the plugin
if (!compatibility(plugin_version())) return false;
// try to get import address for "libsass_load_functions"
if (LOAD_LIB_FN(__plugin_load_fns__, plugin_load_functions, "libsass_load_functions"))
{
Sass_Function_List fns = plugin_load_functions();
while (fns && *fns) { functions.push_back(*fns); ++ fns; }
}
// try to get import address for "libsass_load_importers"
if (LOAD_LIB_FN(__plugin_load_imps__, plugin_load_importers, "libsass_load_importers"))
{
Sass_Importer_List imps = plugin_load_importers();
while (imps && *imps) { importers.push_back(*imps); ++ imps; }
}
// try to get import address for "libsass_load_headers"
if (LOAD_LIB_FN(__plugin_load_imps__, plugin_load_headers, "libsass_load_headers"))
{
Sass_Importer_List imps = plugin_load_headers();
while (imps && *imps) { headers.push_back(*imps); ++ imps; }
}
// success
return true;
}
else
{
// print debug message to stderr (should not happen)
cerr << "failed loading 'libsass_support' in <" << path << ">" << endl;
if (const char* dlsym_error = dlerror()) cerr << dlsym_error << endl;
CLOSE_LIB(plugin);
}
}
else
{
// print debug message to stderr (should not happen)
cerr << "failed loading plugin <" << path << ">" << endl;
if (const char* dlopen_error = dlerror()) cerr << dlopen_error << endl;
}
return false;
}
size_t Plugins::load_plugins(const string& path)
{
// count plugins
size_t loaded = 0;
#ifdef _WIN32
try
{
// use wchar (utf16)
WIN32_FIND_DATAW data;
// trailing slash is guaranteed
string globsrch(path + "*.dll");
// convert to wide chars (utf16) for system call
wstring wglobsrch(UTF_8::convert_to_utf16(globsrch));
HANDLE hFile = FindFirstFileW(wglobsrch.c_str(), &data);
// check if system called returned a result
// ToDo: maybe we should print a debug message
if (hFile == INVALID_HANDLE_VALUE) return -1;
// read directory
while (true)
{
try
{
// the system will report the filenames with wide chars (utf16)
string entry = UTF_8::convert_from_utf16(data.cFileName);
// check if file ending matches exactly
if (!ends_with(entry, ".dll")) continue;
// load the plugin and increase counter
if (load_plugin(path + entry)) ++ loaded;
// check if there should be more entries
if (GetLastError() == ERROR_NO_MORE_FILES) break;
// load next entry (check for return type)
if (!FindNextFileW(hFile, &data)) break;
}
catch (...)
{
// report the error to the console (should not happen)
// seems like we got strange data from the system call?
cerr << "filename in plugin path has invalid utf8?" << endl;
}
}
}
catch (utf8::invalid_utf8)
{
// report the error to the console (should not happen)
// implementors should make sure to provide valid utf8
cerr << "plugin path contains invalid utf8" << endl;
}
#else
DIR *dp;
struct dirent *dirp;
if((dp = opendir(path.c_str())) == NULL) return -1;
while ((dirp = readdir(dp)) != NULL) {
if (!ends_with(dirp->d_name, ".so")) continue;
if (load_plugin(path + dirp->d_name)) ++ loaded;
}
closedir(dp);
#endif
return loaded;
}
}
| mit |
wendellpbarreto/ambiental-awareness | node_modules/gulp-sass/node_modules/node-sass/src/libsass/sass_context.cpp | 229 | 31895 | #ifdef _WIN32
#include <io.h>
#define LFEED "\n"
#else
#include <unistd.h>
#define LFEED "\n"
#endif
#include <cstring>
#include <stdexcept>
#include "file.hpp"
#include "json.hpp"
#include "util.hpp"
#include "context.hpp"
#include "sass_values.h"
#include "sass_context.h"
#include "ast_fwd_decl.hpp"
#include "error_handling.hpp"
extern "C" {
using namespace std;
using namespace Sass;
// Input behaviours
enum Sass_Input_Style {
SASS_CONTEXT_NULL,
SASS_CONTEXT_FILE,
SASS_CONTEXT_DATA,
SASS_CONTEXT_FOLDER
};
// simple linked list
struct string_list {
string_list* next;
char* string;
};
// sass config options structure
struct Sass_Options {
// Precision for fractional numbers
int precision;
// Output style for the generated css code
// A value from above SASS_STYLE_* constants
enum Sass_Output_Style output_style;
// Emit comments in the generated CSS indicating
// the corresponding source line.
bool source_comments;
// embed sourceMappingUrl as data uri
bool source_map_embed;
// embed include contents in maps
bool source_map_contents;
// Disable sourceMappingUrl in css output
bool omit_source_map_url;
// Treat source_string as sass (as opposed to scss)
bool is_indented_syntax_src;
// The input path is used for source map
// generation. It can be used to define
// something with string compilation or to
// overload the input file path. It is
// set to "stdin" for data contexts and
// to the input file on file contexts.
char* input_path;
// The output path is used for source map
// generation. Libsass will not write to
// this file, it is just used to create
// information in source-maps etc.
char* output_path;
// String to be used for indentation
const char* indent;
// String to be used to for line feeds
const char* linefeed;
// Colon-separated list of paths
// Semicolon-separated on Windows
// Maybe use array interface instead?
char* include_path;
char* plugin_path;
// Include paths (linked string list)
struct string_list* include_paths;
// Plugin paths (linked string list)
struct string_list* plugin_paths;
// Path to source map file
// Enables source map generation
// Used to create sourceMappingUrl
char* source_map_file;
// Directly inserted in source maps
char* source_map_root;
// Custom functions that can be called from sccs code
Sass_Function_List c_functions;
// List of custom importers
Sass_Importer_List c_importers;
// List of custom headers
Sass_Importer_List c_headers;
};
// base for all contexts
struct Sass_Context : Sass_Options
{
// store context type info
enum Sass_Input_Style type;
// generated output data
char* output_string;
// generated source map json
char* source_map_string;
// error status
int error_status;
char* error_json;
char* error_text;
char* error_message;
// error position
char* error_file;
size_t error_line;
size_t error_column;
const char* error_src;
// report imported files
char** included_files;
};
// struct for file compilation
struct Sass_File_Context : Sass_Context {
// no additional fields required
// input_path is already on options
};
// struct for data compilation
struct Sass_Data_Context : Sass_Context {
// provided source string
char* source_string;
};
// link c and cpp context
struct Sass_Compiler {
// progress status
Sass_Compiler_State state;
// original c context
Sass_Context* c_ctx;
// Sass::Context
Context* cpp_ctx;
// Sass::Block
Block* root;
};
static void copy_options(struct Sass_Options* to, struct Sass_Options* from) { *to = *from; }
#define IMPLEMENT_SASS_OPTION_ACCESSOR(type, option) \
type ADDCALL sass_option_get_##option (struct Sass_Options* options) { return options->option; } \
void ADDCALL sass_option_set_##option (struct Sass_Options* options, type option) { options->option = option; }
#define IMPLEMENT_SASS_OPTION_STRING_ACCESSOR(type, option) \
type ADDCALL sass_option_get_##option (struct Sass_Options* options) { return options->option; } \
void ADDCALL sass_option_set_##option (struct Sass_Options* options, type option) \
{ free(options->option); options->option = option ? sass_strdup(option) : 0; }
#define IMPLEMENT_SASS_CONTEXT_GETTER(type, option) \
type ADDCALL sass_context_get_##option (struct Sass_Context* ctx) { return ctx->option; }
#define IMPLEMENT_SASS_CONTEXT_TAKER(type, option) \
type sass_context_take_##option (struct Sass_Context* ctx) \
{ type foo = ctx->option; ctx->option = 0; return foo; }
// helper for safe access to c_ctx
static const char* safe_str (const char* str) {
return str == NULL ? "" : str;
}
static void copy_strings(const std::vector<std::string>& strings, char*** array) {
int num = static_cast<int>(strings.size());
char** arr = (char**) malloc(sizeof(char*) * (num + 1));
if (arr == 0) throw(bad_alloc());
for(int i = 0; i < num; i++) {
arr[i] = (char*) malloc(sizeof(char) * (strings[i].size() + 1));
if (arr[i] == 0) throw(bad_alloc());
std::copy(strings[i].begin(), strings[i].end(), arr[i]);
arr[i][strings[i].size()] = '\0';
}
arr[num] = 0;
*array = arr;
}
static void free_string_array(char ** arr) {
if(!arr)
return;
char **it = arr;
while (it && (*it)) {
free(*it);
++it;
}
free(arr);
}
static int handle_errors(Sass_Context* c_ctx) {
try {
throw;
}
catch (Sass_Error& e) {
stringstream msg_stream;
string cwd(Sass::File::get_cwd());
JsonNode* json_err = json_mkobject();
json_append_member(json_err, "status", json_mknumber(1));
json_append_member(json_err, "file", json_mkstring(e.pstate.path.c_str()));
json_append_member(json_err, "line", json_mknumber(e.pstate.line+1));
json_append_member(json_err, "column", json_mknumber(e.pstate.column+1));
json_append_member(json_err, "message", json_mkstring(e.message.c_str()));
string rel_path(Sass::File::resolve_relative_path(e.pstate.path, cwd, cwd));
string msg_prefix("Error: ");
bool got_newline = false;
msg_stream << msg_prefix;
for (char chr : e.message) {
if (chr == '\n') {
got_newline = true;
} else if (got_newline) {
msg_stream << string(msg_prefix.size(), ' ');
got_newline = false;
}
msg_stream << chr;
}
if (!got_newline) msg_stream << "\n";
msg_stream << string(msg_prefix.size(), ' ');
msg_stream << " on line " << e.pstate.line+1 << " of " << rel_path << "\n";
// now create the code trace (ToDo: maybe have util functions?)
if (e.pstate.line != string::npos && e.pstate.column != string::npos) {
size_t line = e.pstate.line;
const char* line_beg = e.pstate.src;
while (line_beg && *line_beg && line) {
if (*line_beg == '\n') -- line;
++ line_beg;
}
const char* line_end = line_beg;
while (line_end && *line_end && *line_end != '\n') {
if (*line_end == '\n') break;
if (*line_end == '\r') break;
line_end ++;
}
size_t max_left = 42; size_t max_right = 78;
size_t move_in = e.pstate.column > max_left ? e.pstate.column - max_left : 0;
size_t shorten = (line_end - line_beg) - move_in > max_right ?
(line_end - line_beg) - move_in - max_right : 0;
msg_stream << ">> " << string(line_beg + move_in, line_end - shorten) << "\n";
msg_stream << " " << string(e.pstate.column - move_in, '-') << "^\n";
}
c_ctx->error_json = json_stringify(json_err, " ");;
c_ctx->error_message = sass_strdup(msg_stream.str().c_str());
c_ctx->error_text = strdup(e.message.c_str());
c_ctx->error_status = 1;
c_ctx->error_file = sass_strdup(e.pstate.path.c_str());
c_ctx->error_line = e.pstate.line+1;
c_ctx->error_column = e.pstate.column+1;
c_ctx->error_src = e.pstate.src;
c_ctx->output_string = 0;
c_ctx->source_map_string = 0;
json_delete(json_err);
}
catch(bad_alloc& ba) {
stringstream msg_stream;
JsonNode* json_err = json_mkobject();
msg_stream << "Unable to allocate memory: " << ba.what() << endl;
json_append_member(json_err, "status", json_mknumber(2));
json_append_member(json_err, "message", json_mkstring(ba.what()));
c_ctx->error_json = json_stringify(json_err, " ");;
c_ctx->error_message = sass_strdup(msg_stream.str().c_str());
c_ctx->error_text = strdup(ba.what());
c_ctx->error_status = 2;
c_ctx->output_string = 0;
c_ctx->source_map_string = 0;
json_delete(json_err);
}
catch (std::exception& e) {
stringstream msg_stream;
JsonNode* json_err = json_mkobject();
msg_stream << "Error: " << e.what() << endl;
json_append_member(json_err, "status", json_mknumber(3));
json_append_member(json_err, "message", json_mkstring(e.what()));
c_ctx->error_json = json_stringify(json_err, " ");;
c_ctx->error_message = sass_strdup(msg_stream.str().c_str());
c_ctx->error_text = strdup(e.what());
c_ctx->error_status = 3;
c_ctx->output_string = 0;
c_ctx->source_map_string = 0;
json_delete(json_err);
}
catch (string& e) {
stringstream msg_stream;
JsonNode* json_err = json_mkobject();
msg_stream << "Error: " << e << endl;
json_append_member(json_err, "status", json_mknumber(4));
json_append_member(json_err, "message", json_mkstring(e.c_str()));
c_ctx->error_json = json_stringify(json_err, " ");;
c_ctx->error_message = sass_strdup(msg_stream.str().c_str());
c_ctx->error_text = strdup(e.c_str());
c_ctx->error_status = 4;
c_ctx->output_string = 0;
c_ctx->source_map_string = 0;
json_delete(json_err);
}
catch (...) {
stringstream msg_stream;
JsonNode* json_err = json_mkobject();
msg_stream << "Unknown error occurred" << endl;
json_append_member(json_err, "status", json_mknumber(5));
json_append_member(json_err, "message", json_mkstring("unknown"));
c_ctx->error_json = json_stringify(json_err, " ");;
c_ctx->error_message = sass_strdup(msg_stream.str().c_str());
c_ctx->error_text = strdup("unknown");
c_ctx->error_status = 5;
c_ctx->output_string = 0;
c_ctx->source_map_string = 0;
json_delete(json_err);
}
return c_ctx->error_status;
}
// generic compilation function (not exported, use file/data compile instead)
static Sass_Compiler* sass_prepare_context (Sass_Context* c_ctx, Context::Data cpp_opt) throw()
{
try {
// get input/output path from options
string input_path = safe_str(c_ctx->input_path);
string output_path = safe_str(c_ctx->output_path);
// maybe we can extract an output path from input path
if (output_path == "" && input_path != "") {
int lastindex = static_cast<int>(input_path.find_last_of("."));
output_path = (lastindex > -1 ? input_path.substr(0, lastindex) : input_path) + ".css";
}
// convert include path linked list to static array
struct string_list* inc = c_ctx->include_paths;
// very poor loop to get the length of the linked list
size_t inc_size = 0; while (inc) { inc_size ++; inc = inc->next; }
// create char* array to hold all paths plus null terminator
const char** include_paths = (const char**) calloc(inc_size + 1, sizeof(char*));
if (include_paths == 0) throw(bad_alloc());
// reset iterator
inc = c_ctx->include_paths;
// copy over the paths
for (size_t i = 0; inc; i++) {
include_paths[i] = inc->string;
inc = inc->next;
}
// convert plugin path linked list to static array
struct string_list* imp = c_ctx->plugin_paths;
// very poor loop to get the length of the linked list
size_t imp_size = 0; while (imp) { imp_size ++; imp = imp->next; }
// create char* array to hold all paths plus null terminator
const char** plugin_paths = (const char**) calloc(imp_size + 1, sizeof(char*));
if (plugin_paths == 0) throw(bad_alloc());
// reset iterator
imp = c_ctx->plugin_paths;
// copy over the paths
for (size_t i = 0; imp; i++) {
plugin_paths[i] = imp->string;
imp = imp->next;
}
// transfer the options to c++
cpp_opt.c_compiler(0)
.c_options(c_ctx)
.input_path(input_path)
.output_path(output_path)
.output_style((Output_Style) c_ctx->output_style)
.is_indented_syntax_src(c_ctx->is_indented_syntax_src)
.source_comments(c_ctx->source_comments)
.source_map_file(safe_str(c_ctx->source_map_file))
.source_map_root(safe_str(c_ctx->source_map_root))
.source_map_embed(c_ctx->source_map_embed)
.source_map_contents(c_ctx->source_map_contents)
.omit_source_map_url(c_ctx->omit_source_map_url)
.include_paths_c_str(c_ctx->include_path)
.plugin_paths_c_str(c_ctx->plugin_path)
// .include_paths_array(include_paths)
// .plugin_paths_array(plugin_paths)
.include_paths(vector<string>())
.plugin_paths(vector<string>())
.precision(c_ctx->precision)
.linefeed(c_ctx->linefeed)
.indent(c_ctx->indent);
// create new c++ Context
Context* cpp_ctx = new Context(cpp_opt);
// free intermediate data
free(include_paths);
free(plugin_paths);
// register our custom functions
if (c_ctx->c_functions) {
auto this_func_data = c_ctx->c_functions;
while (this_func_data && *this_func_data) {
cpp_ctx->add_c_function(*this_func_data);
++this_func_data;
}
}
// register our custom headers
if (c_ctx->c_headers) {
auto this_head_data = c_ctx->c_headers;
while (this_head_data && *this_head_data) {
cpp_ctx->add_c_header(*this_head_data);
++this_head_data;
}
}
// register our custom importers
if (c_ctx->c_importers) {
auto this_imp_data = c_ctx->c_importers;
while (this_imp_data && *this_imp_data) {
cpp_ctx->add_c_importer(*this_imp_data);
++this_imp_data;
}
}
// reset error status
c_ctx->error_json = 0;
c_ctx->error_text = 0;
c_ctx->error_message = 0;
c_ctx->error_status = 0;
// reset error position
c_ctx->error_src = 0;
c_ctx->error_file = 0;
c_ctx->error_line = string::npos;
c_ctx->error_column = string::npos;
// allocate a new compiler instance
Sass_Compiler* compiler = (struct Sass_Compiler*) calloc(1, sizeof(struct Sass_Compiler));
compiler->state = SASS_COMPILER_CREATED;
// store in sass compiler
compiler->c_ctx = c_ctx;
compiler->cpp_ctx = cpp_ctx;
cpp_ctx->c_compiler = compiler;
// use to parse block
return compiler;
}
// pass errors to generic error handler
catch (...) { handle_errors(c_ctx); }
// error
return 0;
}
static Block* sass_parse_block (Sass_Compiler* compiler) throw()
{
// assert valid pointer
if (compiler == 0) return 0;
// The cpp context must be set by now
Context* cpp_ctx = compiler->cpp_ctx;
Sass_Context* c_ctx = compiler->c_ctx;
// We will take care to wire up the rest
compiler->cpp_ctx->c_compiler = compiler;
compiler->state = SASS_COMPILER_PARSED;
try {
// get input/output path from options
string input_path = safe_str(c_ctx->input_path);
string output_path = safe_str(c_ctx->output_path);
// parsed root block
Block* root = 0;
// maybe skip some entries of included files
// we do not include stdin for data contexts
size_t skip = 0;
// dispatch to the correct render function
if (c_ctx->type == SASS_CONTEXT_FILE) {
root = cpp_ctx->parse_file();
} else if (c_ctx->type == SASS_CONTEXT_DATA) {
root = cpp_ctx->parse_string();
skip = 1; // skip first entry of includes
}
// skip all prefixed files?
skip += cpp_ctx->head_imports;
// copy the included files on to the context (dont forget to free)
if (root) copy_strings(cpp_ctx->get_included_files(skip), &c_ctx->included_files);
// return parsed block
return root;
}
// pass errors to generic error handler
catch (...) { handle_errors(c_ctx); }
// error
return 0;
}
// generic compilation function (not exported, use file/data compile instead)
static int sass_compile_context (Sass_Context* c_ctx, Context::Data cpp_opt)
{
// prepare sass compiler with context and options
Sass_Compiler* compiler = sass_prepare_context(c_ctx, cpp_opt);
try {
// call each compiler step
sass_compiler_parse(compiler);
sass_compiler_execute(compiler);
}
// pass errors to generic error handler
catch (...) { handle_errors(c_ctx); }
sass_delete_compiler(compiler);
return c_ctx->error_status;
}
inline void init_options (struct Sass_Options* options)
{
options->precision = 5;
options->indent = " ";
options->linefeed = LFEED;
}
Sass_Options* ADDCALL sass_make_options (void)
{
struct Sass_Options* options = (struct Sass_Options*) calloc(1, sizeof(struct Sass_Options));
if (options == 0) { cerr << "Error allocating memory for options" << endl; return 0; }
init_options(options);
return options;
}
Sass_File_Context* ADDCALL sass_make_file_context(const char* input_path)
{
struct Sass_File_Context* ctx = (struct Sass_File_Context*) calloc(1, sizeof(struct Sass_File_Context));
if (ctx == 0) { cerr << "Error allocating memory for file context" << endl; return 0; }
ctx->type = SASS_CONTEXT_FILE;
init_options(ctx);
try {
if (input_path == 0) { throw(runtime_error("File context created without an input path")); }
if (*input_path == 0) { throw(runtime_error("File context created with empty input path")); }
sass_option_set_input_path(ctx, input_path);
} catch (...) {
handle_errors(ctx);
}
return ctx;
}
Sass_Data_Context* ADDCALL sass_make_data_context(char* source_string)
{
struct Sass_Data_Context* ctx = (struct Sass_Data_Context*) calloc(1, sizeof(struct Sass_Data_Context));
if (ctx == 0) { cerr << "Error allocating memory for data context" << endl; return 0; }
ctx->type = SASS_CONTEXT_DATA;
init_options(ctx);
try {
if (source_string == 0) { throw(runtime_error("Data context created without a source string")); }
if (*source_string == 0) { throw(runtime_error("Data context created with empty source string")); }
ctx->source_string = source_string;
} catch (...) {
handle_errors(ctx);
}
return ctx;
}
struct Sass_Compiler* ADDCALL sass_make_file_compiler (struct Sass_File_Context* c_ctx)
{
if (c_ctx == 0) return 0;
Context::Data cpp_opt = Context::Data();
cpp_opt.entry_point(c_ctx->input_path);
return sass_prepare_context(c_ctx, cpp_opt);
}
struct Sass_Compiler* ADDCALL sass_make_data_compiler (struct Sass_Data_Context* c_ctx)
{
if (c_ctx == 0) return 0;
Context::Data cpp_opt = Context::Data();
cpp_opt.source_c_str(c_ctx->source_string);
return sass_prepare_context(c_ctx, cpp_opt);
}
int ADDCALL sass_compile_data_context(Sass_Data_Context* data_ctx)
{
if (data_ctx == 0) return 1;
Sass_Context* c_ctx = data_ctx;
if (c_ctx->error_status)
return c_ctx->error_status;
Context::Data cpp_opt = Context::Data();
try {
if (data_ctx->source_string == 0) { throw(runtime_error("Data context has no source string")); }
if (*data_ctx->source_string == 0) { throw(runtime_error("Data context has empty source string")); }
cpp_opt.source_c_str(data_ctx->source_string);
}
catch (...) { return handle_errors(c_ctx) | 1; }
return sass_compile_context(c_ctx, cpp_opt);
}
int ADDCALL sass_compile_file_context(Sass_File_Context* file_ctx)
{
if (file_ctx == 0) return 1;
Sass_Context* c_ctx = file_ctx;
if (c_ctx->error_status)
return c_ctx->error_status;
Context::Data cpp_opt = Context::Data();
try {
if (file_ctx->input_path == 0) { throw(runtime_error("File context has no input path")); }
if (*file_ctx->input_path == 0) { throw(runtime_error("File context has empty input path")); }
cpp_opt.entry_point(file_ctx->input_path);
}
catch (...) { return handle_errors(c_ctx) | 1; }
return sass_compile_context(c_ctx, cpp_opt);
}
int ADDCALL sass_compiler_parse(struct Sass_Compiler* compiler)
{
if (compiler == 0) return 1;
if (compiler->state == SASS_COMPILER_PARSED) return 0;
if (compiler->state != SASS_COMPILER_CREATED) return -1;
if (compiler->c_ctx == NULL) return 1;
if (compiler->cpp_ctx == NULL) return 1;
if (compiler->c_ctx->error_status)
return compiler->c_ctx->error_status;
// parse the context we have set up (file or data)
compiler->root = sass_parse_block(compiler);
// success
return 0;
}
int ADDCALL sass_compiler_execute(struct Sass_Compiler* compiler)
{
if (compiler == 0) return 1;
if (compiler->state == SASS_COMPILER_EXECUTED) return 0;
if (compiler->state != SASS_COMPILER_PARSED) return -1;
if (compiler->c_ctx == NULL) return 1;
if (compiler->cpp_ctx == NULL) return 1;
if (compiler->root == NULL) return 1;
if (compiler->c_ctx->error_status)
return compiler->c_ctx->error_status;
compiler->state = SASS_COMPILER_EXECUTED;
Context* cpp_ctx = (Context*) compiler->cpp_ctx;
Block* root = (Block*) compiler->root;
// compile the parsed root block
try { compiler->c_ctx->output_string = cpp_ctx->compile_block(root); }
// pass catched errors to generic error handler
catch (...) { return handle_errors(compiler->c_ctx) | 1; }
// generate source map json and store on context
compiler->c_ctx->source_map_string = cpp_ctx->generate_source_map();
// success
return 0;
}
// helper function, not exported, only accessible locally
static void sass_clear_options (struct Sass_Options* options)
{
if (options == 0) return;
// Deallocate custom functions
if (options->c_functions) {
Sass_Function_List this_func_data = options->c_functions;
while (this_func_data && *this_func_data) {
free(*this_func_data);
++this_func_data;
}
}
// Deallocate custom headers
if (options->c_headers) {
Sass_Importer_List this_head_data = options->c_headers;
while (this_head_data && *this_head_data) {
free(*this_head_data);
++this_head_data;
}
}
// Deallocate custom importers
if (options->c_importers) {
Sass_Importer_List this_imp_data = options->c_importers;
while (this_imp_data && *this_imp_data) {
free(*this_imp_data);
++this_imp_data;
}
}
// Deallocate inc paths
if (options->plugin_paths) {
struct string_list* cur;
struct string_list* next;
cur = options->plugin_paths;
while (cur) {
next = cur->next;
free(cur->string);
free(cur);
cur = next;
}
}
// Deallocate inc paths
if (options->include_paths) {
struct string_list* cur;
struct string_list* next;
cur = options->include_paths;
while (cur) {
next = cur->next;
free(cur->string);
free(cur);
cur = next;
}
}
// Free custom functions
free(options->c_functions);
// Free custom importers
free(options->c_importers);
free(options->c_headers);
// Reset our pointers
options->c_functions = 0;
options->c_importers = 0;
options->c_headers = 0;
options->plugin_paths = 0;
options->include_paths = 0;
}
// helper function, not exported, only accessible locally
// sass_free_context is also defined in old sass_interface
static void sass_clear_context (struct Sass_Context* ctx)
{
if (ctx == 0) return;
// release the allocated memory (mostly via sass_strdup)
if (ctx->output_string) free(ctx->output_string);
if (ctx->source_map_string) free(ctx->source_map_string);
if (ctx->error_message) free(ctx->error_message);
if (ctx->error_text) free(ctx->error_text);
if (ctx->error_json) free(ctx->error_json);
if (ctx->error_file) free(ctx->error_file);
if (ctx->input_path) free(ctx->input_path);
if (ctx->output_path) free(ctx->output_path);
if (ctx->include_path) free(ctx->include_path);
if (ctx->source_map_file) free(ctx->source_map_file);
if (ctx->source_map_root) free(ctx->source_map_root);
free_string_array(ctx->included_files);
// play safe and reset properties
ctx->output_string = 0;
ctx->source_map_string = 0;
ctx->error_message = 0;
ctx->error_text = 0;
ctx->error_json = 0;
ctx->error_file = 0;
ctx->input_path = 0;
ctx->output_path = 0;
ctx->include_path = 0;
ctx->source_map_file = 0;
ctx->source_map_root = 0;
ctx->included_files = 0;
// now clear the options
sass_clear_options(ctx);
}
void ADDCALL sass_delete_compiler (struct Sass_Compiler* compiler)
{
if (compiler == 0) return;
Context* cpp_ctx = (Context*) compiler->cpp_ctx;
compiler->cpp_ctx = 0;
delete cpp_ctx;
free(compiler);
}
// Deallocate all associated memory with contexts
void ADDCALL sass_delete_file_context (struct Sass_File_Context* ctx) { sass_clear_context(ctx); free(ctx); }
void ADDCALL sass_delete_data_context (struct Sass_Data_Context* ctx) { sass_clear_context(ctx); free(ctx); }
// Getters for sass context from specific implementations
struct Sass_Context* ADDCALL sass_file_context_get_context(struct Sass_File_Context* ctx) { return ctx; }
struct Sass_Context* ADDCALL sass_data_context_get_context(struct Sass_Data_Context* ctx) { return ctx; }
// Getters for context options from Sass_Context
struct Sass_Options* ADDCALL sass_context_get_options(struct Sass_Context* ctx) { return ctx; }
struct Sass_Options* ADDCALL sass_file_context_get_options(struct Sass_File_Context* ctx) { return ctx; }
struct Sass_Options* ADDCALL sass_data_context_get_options(struct Sass_Data_Context* ctx) { return ctx; }
void ADDCALL sass_file_context_set_options (struct Sass_File_Context* ctx, struct Sass_Options* opt) { copy_options(ctx, opt); }
void ADDCALL sass_data_context_set_options (struct Sass_Data_Context* ctx, struct Sass_Options* opt) { copy_options(ctx, opt); }
// Getters for Sass_Compiler options (get conected sass context)
enum Sass_Compiler_State ADDCALL sass_compiler_get_state(struct Sass_Compiler* compiler) { return compiler->state; }
struct Sass_Context* ADDCALL sass_compiler_get_context(struct Sass_Compiler* compiler) { return compiler->c_ctx; }
// Getters for Sass_Compiler options (query import stack)
size_t ADDCALL sass_compiler_get_import_stack_size(struct Sass_Compiler* compiler) { return compiler->cpp_ctx->import_stack.size(); }
Sass_Import_Entry ADDCALL sass_compiler_get_last_import(struct Sass_Compiler* compiler) { return compiler->cpp_ctx->import_stack.back(); }
Sass_Import_Entry ADDCALL sass_compiler_get_import_entry(struct Sass_Compiler* compiler, size_t idx) { return compiler->cpp_ctx->import_stack[idx]; }
// Calculate the size of the stored null terminated array
size_t ADDCALL sass_context_get_included_files_size (struct Sass_Context* ctx)
{ size_t l = 0; auto i = ctx->included_files; while (i && *i) { ++i; ++l; } return l; }
// Create getter and setters for options
IMPLEMENT_SASS_OPTION_ACCESSOR(int, precision);
IMPLEMENT_SASS_OPTION_ACCESSOR(enum Sass_Output_Style, output_style);
IMPLEMENT_SASS_OPTION_ACCESSOR(bool, source_comments);
IMPLEMENT_SASS_OPTION_ACCESSOR(bool, source_map_embed);
IMPLEMENT_SASS_OPTION_ACCESSOR(bool, source_map_contents);
IMPLEMENT_SASS_OPTION_ACCESSOR(bool, omit_source_map_url);
IMPLEMENT_SASS_OPTION_ACCESSOR(bool, is_indented_syntax_src);
IMPLEMENT_SASS_OPTION_ACCESSOR(Sass_Function_List, c_functions);
IMPLEMENT_SASS_OPTION_ACCESSOR(Sass_Importer_List, c_importers);
IMPLEMENT_SASS_OPTION_ACCESSOR(Sass_Importer_List, c_headers);
IMPLEMENT_SASS_OPTION_ACCESSOR(const char*, indent);
IMPLEMENT_SASS_OPTION_ACCESSOR(const char*, linefeed);
IMPLEMENT_SASS_OPTION_STRING_ACCESSOR(const char*, input_path);
IMPLEMENT_SASS_OPTION_STRING_ACCESSOR(const char*, output_path);
IMPLEMENT_SASS_OPTION_STRING_ACCESSOR(const char*, plugin_path);
IMPLEMENT_SASS_OPTION_STRING_ACCESSOR(const char*, include_path);
IMPLEMENT_SASS_OPTION_STRING_ACCESSOR(const char*, source_map_file);
IMPLEMENT_SASS_OPTION_STRING_ACCESSOR(const char*, source_map_root);
// Create getter and setters for context
IMPLEMENT_SASS_CONTEXT_GETTER(int, error_status);
IMPLEMENT_SASS_CONTEXT_GETTER(const char*, error_json);
IMPLEMENT_SASS_CONTEXT_GETTER(const char*, error_message);
IMPLEMENT_SASS_CONTEXT_GETTER(const char*, error_text);
IMPLEMENT_SASS_CONTEXT_GETTER(const char*, error_file);
IMPLEMENT_SASS_CONTEXT_GETTER(size_t, error_line);
IMPLEMENT_SASS_CONTEXT_GETTER(size_t, error_column);
IMPLEMENT_SASS_CONTEXT_GETTER(const char*, error_src);
IMPLEMENT_SASS_CONTEXT_GETTER(const char*, output_string);
IMPLEMENT_SASS_CONTEXT_GETTER(const char*, source_map_string);
IMPLEMENT_SASS_CONTEXT_GETTER(char**, included_files);
// Take ownership of memory (value on context is set to 0)
IMPLEMENT_SASS_CONTEXT_TAKER(char*, error_json);
IMPLEMENT_SASS_CONTEXT_TAKER(char*, error_message);
IMPLEMENT_SASS_CONTEXT_TAKER(char*, error_text);
IMPLEMENT_SASS_CONTEXT_TAKER(char*, error_file);
IMPLEMENT_SASS_CONTEXT_TAKER(char*, output_string);
IMPLEMENT_SASS_CONTEXT_TAKER(char*, source_map_string);
IMPLEMENT_SASS_CONTEXT_TAKER(char**, included_files);
// Push function for include paths (no manipulation support for now)
void ADDCALL sass_option_push_include_path(struct Sass_Options* options, const char* path)
{
struct string_list* include_path = (struct string_list*) calloc(1, sizeof(struct string_list));
if (include_path == 0) return;
include_path->string = path ? sass_strdup(path) : 0;
struct string_list* last = options->include_paths;
if (!options->include_paths) {
options->include_paths = include_path;
} else {
while (last->next)
last = last->next;
last->next = include_path;
}
}
// Push function for plugin paths (no manipulation support for now)
void ADDCALL sass_option_push_plugin_path(struct Sass_Options* options, const char* path)
{
struct string_list* plugin_path = (struct string_list*) calloc(1, sizeof(struct string_list));
if (plugin_path == 0) return;
plugin_path->string = path ? sass_strdup(path) : 0;
struct string_list* last = options->plugin_paths;
if (!options->plugin_paths) {
options->plugin_paths = plugin_path;
} else {
while (last->next)
last = last->next;
last->next = plugin_path;
}
}
}
| mit |
nusacoin/nusacoin | src/bmw.c | 1021 | 29235 | /* $Id: bmw.c 227 2010-06-16 17:28:38Z tp $ */
/*
* BMW implementation.
*
* ==========================(LICENSE BEGIN)============================
*
* Copyright (c) 2007-2010 Projet RNRT SAPHIR
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* ===========================(LICENSE END)=============================
*
* @author Thomas Pornin <thomas.pornin@cryptolog.com>
*/
#include <stddef.h>
#include <string.h>
#include <limits.h>
#ifdef __cplusplus
extern "C"{
#endif
#include "sph_bmw.h"
#if SPH_SMALL_FOOTPRINT && !defined SPH_SMALL_FOOTPRINT_BMW
#define SPH_SMALL_FOOTPRINT_BMW 1
#endif
#ifdef _MSC_VER
#pragma warning (disable: 4146)
#endif
static const sph_u32 IV224[] = {
SPH_C32(0x00010203), SPH_C32(0x04050607),
SPH_C32(0x08090A0B), SPH_C32(0x0C0D0E0F),
SPH_C32(0x10111213), SPH_C32(0x14151617),
SPH_C32(0x18191A1B), SPH_C32(0x1C1D1E1F),
SPH_C32(0x20212223), SPH_C32(0x24252627),
SPH_C32(0x28292A2B), SPH_C32(0x2C2D2E2F),
SPH_C32(0x30313233), SPH_C32(0x34353637),
SPH_C32(0x38393A3B), SPH_C32(0x3C3D3E3F)
};
static const sph_u32 IV256[] = {
SPH_C32(0x40414243), SPH_C32(0x44454647),
SPH_C32(0x48494A4B), SPH_C32(0x4C4D4E4F),
SPH_C32(0x50515253), SPH_C32(0x54555657),
SPH_C32(0x58595A5B), SPH_C32(0x5C5D5E5F),
SPH_C32(0x60616263), SPH_C32(0x64656667),
SPH_C32(0x68696A6B), SPH_C32(0x6C6D6E6F),
SPH_C32(0x70717273), SPH_C32(0x74757677),
SPH_C32(0x78797A7B), SPH_C32(0x7C7D7E7F)
};
#if SPH_64
static const sph_u64 IV384[] = {
SPH_C64(0x0001020304050607), SPH_C64(0x08090A0B0C0D0E0F),
SPH_C64(0x1011121314151617), SPH_C64(0x18191A1B1C1D1E1F),
SPH_C64(0x2021222324252627), SPH_C64(0x28292A2B2C2D2E2F),
SPH_C64(0x3031323334353637), SPH_C64(0x38393A3B3C3D3E3F),
SPH_C64(0x4041424344454647), SPH_C64(0x48494A4B4C4D4E4F),
SPH_C64(0x5051525354555657), SPH_C64(0x58595A5B5C5D5E5F),
SPH_C64(0x6061626364656667), SPH_C64(0x68696A6B6C6D6E6F),
SPH_C64(0x7071727374757677), SPH_C64(0x78797A7B7C7D7E7F)
};
static const sph_u64 IV512[] = {
SPH_C64(0x8081828384858687), SPH_C64(0x88898A8B8C8D8E8F),
SPH_C64(0x9091929394959697), SPH_C64(0x98999A9B9C9D9E9F),
SPH_C64(0xA0A1A2A3A4A5A6A7), SPH_C64(0xA8A9AAABACADAEAF),
SPH_C64(0xB0B1B2B3B4B5B6B7), SPH_C64(0xB8B9BABBBCBDBEBF),
SPH_C64(0xC0C1C2C3C4C5C6C7), SPH_C64(0xC8C9CACBCCCDCECF),
SPH_C64(0xD0D1D2D3D4D5D6D7), SPH_C64(0xD8D9DADBDCDDDEDF),
SPH_C64(0xE0E1E2E3E4E5E6E7), SPH_C64(0xE8E9EAEBECEDEEEF),
SPH_C64(0xF0F1F2F3F4F5F6F7), SPH_C64(0xF8F9FAFBFCFDFEFF)
};
#endif
#define XCAT(x, y) XCAT_(x, y)
#define XCAT_(x, y) x ## y
#define LPAR (
#define I16_16 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
#define I16_17 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
#define I16_18 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17
#define I16_19 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18
#define I16_20 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19
#define I16_21 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20
#define I16_22 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21
#define I16_23 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22
#define I16_24 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23
#define I16_25 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24
#define I16_26 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
#define I16_27 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26
#define I16_28 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27
#define I16_29 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28
#define I16_30 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29
#define I16_31 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30
#define M16_16 0, 1, 3, 4, 7, 10, 11
#define M16_17 1, 2, 4, 5, 8, 11, 12
#define M16_18 2, 3, 5, 6, 9, 12, 13
#define M16_19 3, 4, 6, 7, 10, 13, 14
#define M16_20 4, 5, 7, 8, 11, 14, 15
#define M16_21 5, 6, 8, 9, 12, 15, 16
#define M16_22 6, 7, 9, 10, 13, 0, 1
#define M16_23 7, 8, 10, 11, 14, 1, 2
#define M16_24 8, 9, 11, 12, 15, 2, 3
#define M16_25 9, 10, 12, 13, 0, 3, 4
#define M16_26 10, 11, 13, 14, 1, 4, 5
#define M16_27 11, 12, 14, 15, 2, 5, 6
#define M16_28 12, 13, 15, 16, 3, 6, 7
#define M16_29 13, 14, 0, 1, 4, 7, 8
#define M16_30 14, 15, 1, 2, 5, 8, 9
#define M16_31 15, 16, 2, 3, 6, 9, 10
#define ss0(x) (((x) >> 1) ^ SPH_T32((x) << 3) \
^ SPH_ROTL32(x, 4) ^ SPH_ROTL32(x, 19))
#define ss1(x) (((x) >> 1) ^ SPH_T32((x) << 2) \
^ SPH_ROTL32(x, 8) ^ SPH_ROTL32(x, 23))
#define ss2(x) (((x) >> 2) ^ SPH_T32((x) << 1) \
^ SPH_ROTL32(x, 12) ^ SPH_ROTL32(x, 25))
#define ss3(x) (((x) >> 2) ^ SPH_T32((x) << 2) \
^ SPH_ROTL32(x, 15) ^ SPH_ROTL32(x, 29))
#define ss4(x) (((x) >> 1) ^ (x))
#define ss5(x) (((x) >> 2) ^ (x))
#define rs1(x) SPH_ROTL32(x, 3)
#define rs2(x) SPH_ROTL32(x, 7)
#define rs3(x) SPH_ROTL32(x, 13)
#define rs4(x) SPH_ROTL32(x, 16)
#define rs5(x) SPH_ROTL32(x, 19)
#define rs6(x) SPH_ROTL32(x, 23)
#define rs7(x) SPH_ROTL32(x, 27)
#define Ks(j) SPH_T32((sph_u32)(j) * SPH_C32(0x05555555))
#define add_elt_s(mf, hf, j0m, j1m, j3m, j4m, j7m, j10m, j11m, j16) \
(SPH_T32(SPH_ROTL32(mf(j0m), j1m) + SPH_ROTL32(mf(j3m), j4m) \
- SPH_ROTL32(mf(j10m), j11m) + Ks(j16)) ^ hf(j7m))
#define expand1s_inner(qf, mf, hf, i16, \
i0, i1, i2, i3, i4, i5, i6, i7, i8, \
i9, i10, i11, i12, i13, i14, i15, \
i0m, i1m, i3m, i4m, i7m, i10m, i11m) \
SPH_T32(ss1(qf(i0)) + ss2(qf(i1)) + ss3(qf(i2)) + ss0(qf(i3)) \
+ ss1(qf(i4)) + ss2(qf(i5)) + ss3(qf(i6)) + ss0(qf(i7)) \
+ ss1(qf(i8)) + ss2(qf(i9)) + ss3(qf(i10)) + ss0(qf(i11)) \
+ ss1(qf(i12)) + ss2(qf(i13)) + ss3(qf(i14)) + ss0(qf(i15)) \
+ add_elt_s(mf, hf, i0m, i1m, i3m, i4m, i7m, i10m, i11m, i16))
#define expand1s(qf, mf, hf, i16) \
expand1s_(qf, mf, hf, i16, I16_ ## i16, M16_ ## i16)
#define expand1s_(qf, mf, hf, i16, ix, iy) \
expand1s_inner LPAR qf, mf, hf, i16, ix, iy)
#define expand2s_inner(qf, mf, hf, i16, \
i0, i1, i2, i3, i4, i5, i6, i7, i8, \
i9, i10, i11, i12, i13, i14, i15, \
i0m, i1m, i3m, i4m, i7m, i10m, i11m) \
SPH_T32(qf(i0) + rs1(qf(i1)) + qf(i2) + rs2(qf(i3)) \
+ qf(i4) + rs3(qf(i5)) + qf(i6) + rs4(qf(i7)) \
+ qf(i8) + rs5(qf(i9)) + qf(i10) + rs6(qf(i11)) \
+ qf(i12) + rs7(qf(i13)) + ss4(qf(i14)) + ss5(qf(i15)) \
+ add_elt_s(mf, hf, i0m, i1m, i3m, i4m, i7m, i10m, i11m, i16))
#define expand2s(qf, mf, hf, i16) \
expand2s_(qf, mf, hf, i16, I16_ ## i16, M16_ ## i16)
#define expand2s_(qf, mf, hf, i16, ix, iy) \
expand2s_inner LPAR qf, mf, hf, i16, ix, iy)
#if SPH_64
#define sb0(x) (((x) >> 1) ^ SPH_T64((x) << 3) \
^ SPH_ROTL64(x, 4) ^ SPH_ROTL64(x, 37))
#define sb1(x) (((x) >> 1) ^ SPH_T64((x) << 2) \
^ SPH_ROTL64(x, 13) ^ SPH_ROTL64(x, 43))
#define sb2(x) (((x) >> 2) ^ SPH_T64((x) << 1) \
^ SPH_ROTL64(x, 19) ^ SPH_ROTL64(x, 53))
#define sb3(x) (((x) >> 2) ^ SPH_T64((x) << 2) \
^ SPH_ROTL64(x, 28) ^ SPH_ROTL64(x, 59))
#define sb4(x) (((x) >> 1) ^ (x))
#define sb5(x) (((x) >> 2) ^ (x))
#define rb1(x) SPH_ROTL64(x, 5)
#define rb2(x) SPH_ROTL64(x, 11)
#define rb3(x) SPH_ROTL64(x, 27)
#define rb4(x) SPH_ROTL64(x, 32)
#define rb5(x) SPH_ROTL64(x, 37)
#define rb6(x) SPH_ROTL64(x, 43)
#define rb7(x) SPH_ROTL64(x, 53)
#define Kb(j) SPH_T64((sph_u64)(j) * SPH_C64(0x0555555555555555))
#if SPH_SMALL_FOOTPRINT_BMW
static const sph_u64 Kb_tab[] = {
Kb(16), Kb(17), Kb(18), Kb(19), Kb(20), Kb(21), Kb(22), Kb(23),
Kb(24), Kb(25), Kb(26), Kb(27), Kb(28), Kb(29), Kb(30), Kb(31)
};
#define rol_off(mf, j, off) \
SPH_ROTL64(mf(((j) + (off)) & 15), (((j) + (off)) & 15) + 1)
#define add_elt_b(mf, hf, j) \
(SPH_T64(rol_off(mf, j, 0) + rol_off(mf, j, 3) \
- rol_off(mf, j, 10) + Kb_tab[j]) ^ hf(((j) + 7) & 15))
#define expand1b(qf, mf, hf, i) \
SPH_T64(sb1(qf((i) - 16)) + sb2(qf((i) - 15)) \
+ sb3(qf((i) - 14)) + sb0(qf((i) - 13)) \
+ sb1(qf((i) - 12)) + sb2(qf((i) - 11)) \
+ sb3(qf((i) - 10)) + sb0(qf((i) - 9)) \
+ sb1(qf((i) - 8)) + sb2(qf((i) - 7)) \
+ sb3(qf((i) - 6)) + sb0(qf((i) - 5)) \
+ sb1(qf((i) - 4)) + sb2(qf((i) - 3)) \
+ sb3(qf((i) - 2)) + sb0(qf((i) - 1)) \
+ add_elt_b(mf, hf, (i) - 16))
#define expand2b(qf, mf, hf, i) \
SPH_T64(qf((i) - 16) + rb1(qf((i) - 15)) \
+ qf((i) - 14) + rb2(qf((i) - 13)) \
+ qf((i) - 12) + rb3(qf((i) - 11)) \
+ qf((i) - 10) + rb4(qf((i) - 9)) \
+ qf((i) - 8) + rb5(qf((i) - 7)) \
+ qf((i) - 6) + rb6(qf((i) - 5)) \
+ qf((i) - 4) + rb7(qf((i) - 3)) \
+ sb4(qf((i) - 2)) + sb5(qf((i) - 1)) \
+ add_elt_b(mf, hf, (i) - 16))
#else
#define add_elt_b(mf, hf, j0m, j1m, j3m, j4m, j7m, j10m, j11m, j16) \
(SPH_T64(SPH_ROTL64(mf(j0m), j1m) + SPH_ROTL64(mf(j3m), j4m) \
- SPH_ROTL64(mf(j10m), j11m) + Kb(j16)) ^ hf(j7m))
#define expand1b_inner(qf, mf, hf, i16, \
i0, i1, i2, i3, i4, i5, i6, i7, i8, \
i9, i10, i11, i12, i13, i14, i15, \
i0m, i1m, i3m, i4m, i7m, i10m, i11m) \
SPH_T64(sb1(qf(i0)) + sb2(qf(i1)) + sb3(qf(i2)) + sb0(qf(i3)) \
+ sb1(qf(i4)) + sb2(qf(i5)) + sb3(qf(i6)) + sb0(qf(i7)) \
+ sb1(qf(i8)) + sb2(qf(i9)) + sb3(qf(i10)) + sb0(qf(i11)) \
+ sb1(qf(i12)) + sb2(qf(i13)) + sb3(qf(i14)) + sb0(qf(i15)) \
+ add_elt_b(mf, hf, i0m, i1m, i3m, i4m, i7m, i10m, i11m, i16))
#define expand1b(qf, mf, hf, i16) \
expand1b_(qf, mf, hf, i16, I16_ ## i16, M16_ ## i16)
#define expand1b_(qf, mf, hf, i16, ix, iy) \
expand1b_inner LPAR qf, mf, hf, i16, ix, iy)
#define expand2b_inner(qf, mf, hf, i16, \
i0, i1, i2, i3, i4, i5, i6, i7, i8, \
i9, i10, i11, i12, i13, i14, i15, \
i0m, i1m, i3m, i4m, i7m, i10m, i11m) \
SPH_T64(qf(i0) + rb1(qf(i1)) + qf(i2) + rb2(qf(i3)) \
+ qf(i4) + rb3(qf(i5)) + qf(i6) + rb4(qf(i7)) \
+ qf(i8) + rb5(qf(i9)) + qf(i10) + rb6(qf(i11)) \
+ qf(i12) + rb7(qf(i13)) + sb4(qf(i14)) + sb5(qf(i15)) \
+ add_elt_b(mf, hf, i0m, i1m, i3m, i4m, i7m, i10m, i11m, i16))
#define expand2b(qf, mf, hf, i16) \
expand2b_(qf, mf, hf, i16, I16_ ## i16, M16_ ## i16)
#define expand2b_(qf, mf, hf, i16, ix, iy) \
expand2b_inner LPAR qf, mf, hf, i16, ix, iy)
#endif
#endif
#define MAKE_W(tt, i0, op01, i1, op12, i2, op23, i3, op34, i4) \
tt((M(i0) ^ H(i0)) op01 (M(i1) ^ H(i1)) op12 (M(i2) ^ H(i2)) \
op23 (M(i3) ^ H(i3)) op34 (M(i4) ^ H(i4)))
#define Ws0 MAKE_W(SPH_T32, 5, -, 7, +, 10, +, 13, +, 14)
#define Ws1 MAKE_W(SPH_T32, 6, -, 8, +, 11, +, 14, -, 15)
#define Ws2 MAKE_W(SPH_T32, 0, +, 7, +, 9, -, 12, +, 15)
#define Ws3 MAKE_W(SPH_T32, 0, -, 1, +, 8, -, 10, +, 13)
#define Ws4 MAKE_W(SPH_T32, 1, +, 2, +, 9, -, 11, -, 14)
#define Ws5 MAKE_W(SPH_T32, 3, -, 2, +, 10, -, 12, +, 15)
#define Ws6 MAKE_W(SPH_T32, 4, -, 0, -, 3, -, 11, +, 13)
#define Ws7 MAKE_W(SPH_T32, 1, -, 4, -, 5, -, 12, -, 14)
#define Ws8 MAKE_W(SPH_T32, 2, -, 5, -, 6, +, 13, -, 15)
#define Ws9 MAKE_W(SPH_T32, 0, -, 3, +, 6, -, 7, +, 14)
#define Ws10 MAKE_W(SPH_T32, 8, -, 1, -, 4, -, 7, +, 15)
#define Ws11 MAKE_W(SPH_T32, 8, -, 0, -, 2, -, 5, +, 9)
#define Ws12 MAKE_W(SPH_T32, 1, +, 3, -, 6, -, 9, +, 10)
#define Ws13 MAKE_W(SPH_T32, 2, +, 4, +, 7, +, 10, +, 11)
#define Ws14 MAKE_W(SPH_T32, 3, -, 5, +, 8, -, 11, -, 12)
#define Ws15 MAKE_W(SPH_T32, 12, -, 4, -, 6, -, 9, +, 13)
#if SPH_SMALL_FOOTPRINT_BMW
#define MAKE_Qas do { \
unsigned u; \
sph_u32 Ws[16]; \
Ws[ 0] = Ws0; \
Ws[ 1] = Ws1; \
Ws[ 2] = Ws2; \
Ws[ 3] = Ws3; \
Ws[ 4] = Ws4; \
Ws[ 5] = Ws5; \
Ws[ 6] = Ws6; \
Ws[ 7] = Ws7; \
Ws[ 8] = Ws8; \
Ws[ 9] = Ws9; \
Ws[10] = Ws10; \
Ws[11] = Ws11; \
Ws[12] = Ws12; \
Ws[13] = Ws13; \
Ws[14] = Ws14; \
Ws[15] = Ws15; \
for (u = 0; u < 15; u += 5) { \
qt[u + 0] = SPH_T32(ss0(Ws[u + 0]) + H(u + 1)); \
qt[u + 1] = SPH_T32(ss1(Ws[u + 1]) + H(u + 2)); \
qt[u + 2] = SPH_T32(ss2(Ws[u + 2]) + H(u + 3)); \
qt[u + 3] = SPH_T32(ss3(Ws[u + 3]) + H(u + 4)); \
qt[u + 4] = SPH_T32(ss4(Ws[u + 4]) + H(u + 5)); \
} \
qt[15] = SPH_T32(ss0(Ws[15]) + H(0)); \
} while (0)
#define MAKE_Qbs do { \
qt[16] = expand1s(Qs, M, H, 16); \
qt[17] = expand1s(Qs, M, H, 17); \
qt[18] = expand2s(Qs, M, H, 18); \
qt[19] = expand2s(Qs, M, H, 19); \
qt[20] = expand2s(Qs, M, H, 20); \
qt[21] = expand2s(Qs, M, H, 21); \
qt[22] = expand2s(Qs, M, H, 22); \
qt[23] = expand2s(Qs, M, H, 23); \
qt[24] = expand2s(Qs, M, H, 24); \
qt[25] = expand2s(Qs, M, H, 25); \
qt[26] = expand2s(Qs, M, H, 26); \
qt[27] = expand2s(Qs, M, H, 27); \
qt[28] = expand2s(Qs, M, H, 28); \
qt[29] = expand2s(Qs, M, H, 29); \
qt[30] = expand2s(Qs, M, H, 30); \
qt[31] = expand2s(Qs, M, H, 31); \
} while (0)
#else
#define MAKE_Qas do { \
qt[ 0] = SPH_T32(ss0(Ws0 ) + H( 1)); \
qt[ 1] = SPH_T32(ss1(Ws1 ) + H( 2)); \
qt[ 2] = SPH_T32(ss2(Ws2 ) + H( 3)); \
qt[ 3] = SPH_T32(ss3(Ws3 ) + H( 4)); \
qt[ 4] = SPH_T32(ss4(Ws4 ) + H( 5)); \
qt[ 5] = SPH_T32(ss0(Ws5 ) + H( 6)); \
qt[ 6] = SPH_T32(ss1(Ws6 ) + H( 7)); \
qt[ 7] = SPH_T32(ss2(Ws7 ) + H( 8)); \
qt[ 8] = SPH_T32(ss3(Ws8 ) + H( 9)); \
qt[ 9] = SPH_T32(ss4(Ws9 ) + H(10)); \
qt[10] = SPH_T32(ss0(Ws10) + H(11)); \
qt[11] = SPH_T32(ss1(Ws11) + H(12)); \
qt[12] = SPH_T32(ss2(Ws12) + H(13)); \
qt[13] = SPH_T32(ss3(Ws13) + H(14)); \
qt[14] = SPH_T32(ss4(Ws14) + H(15)); \
qt[15] = SPH_T32(ss0(Ws15) + H( 0)); \
} while (0)
#define MAKE_Qbs do { \
qt[16] = expand1s(Qs, M, H, 16); \
qt[17] = expand1s(Qs, M, H, 17); \
qt[18] = expand2s(Qs, M, H, 18); \
qt[19] = expand2s(Qs, M, H, 19); \
qt[20] = expand2s(Qs, M, H, 20); \
qt[21] = expand2s(Qs, M, H, 21); \
qt[22] = expand2s(Qs, M, H, 22); \
qt[23] = expand2s(Qs, M, H, 23); \
qt[24] = expand2s(Qs, M, H, 24); \
qt[25] = expand2s(Qs, M, H, 25); \
qt[26] = expand2s(Qs, M, H, 26); \
qt[27] = expand2s(Qs, M, H, 27); \
qt[28] = expand2s(Qs, M, H, 28); \
qt[29] = expand2s(Qs, M, H, 29); \
qt[30] = expand2s(Qs, M, H, 30); \
qt[31] = expand2s(Qs, M, H, 31); \
} while (0)
#endif
#define MAKE_Qs do { \
MAKE_Qas; \
MAKE_Qbs; \
} while (0)
#define Qs(j) (qt[j])
#if SPH_64
#define Wb0 MAKE_W(SPH_T64, 5, -, 7, +, 10, +, 13, +, 14)
#define Wb1 MAKE_W(SPH_T64, 6, -, 8, +, 11, +, 14, -, 15)
#define Wb2 MAKE_W(SPH_T64, 0, +, 7, +, 9, -, 12, +, 15)
#define Wb3 MAKE_W(SPH_T64, 0, -, 1, +, 8, -, 10, +, 13)
#define Wb4 MAKE_W(SPH_T64, 1, +, 2, +, 9, -, 11, -, 14)
#define Wb5 MAKE_W(SPH_T64, 3, -, 2, +, 10, -, 12, +, 15)
#define Wb6 MAKE_W(SPH_T64, 4, -, 0, -, 3, -, 11, +, 13)
#define Wb7 MAKE_W(SPH_T64, 1, -, 4, -, 5, -, 12, -, 14)
#define Wb8 MAKE_W(SPH_T64, 2, -, 5, -, 6, +, 13, -, 15)
#define Wb9 MAKE_W(SPH_T64, 0, -, 3, +, 6, -, 7, +, 14)
#define Wb10 MAKE_W(SPH_T64, 8, -, 1, -, 4, -, 7, +, 15)
#define Wb11 MAKE_W(SPH_T64, 8, -, 0, -, 2, -, 5, +, 9)
#define Wb12 MAKE_W(SPH_T64, 1, +, 3, -, 6, -, 9, +, 10)
#define Wb13 MAKE_W(SPH_T64, 2, +, 4, +, 7, +, 10, +, 11)
#define Wb14 MAKE_W(SPH_T64, 3, -, 5, +, 8, -, 11, -, 12)
#define Wb15 MAKE_W(SPH_T64, 12, -, 4, -, 6, -, 9, +, 13)
#if SPH_SMALL_FOOTPRINT_BMW
#define MAKE_Qab do { \
unsigned u; \
sph_u64 Wb[16]; \
Wb[ 0] = Wb0; \
Wb[ 1] = Wb1; \
Wb[ 2] = Wb2; \
Wb[ 3] = Wb3; \
Wb[ 4] = Wb4; \
Wb[ 5] = Wb5; \
Wb[ 6] = Wb6; \
Wb[ 7] = Wb7; \
Wb[ 8] = Wb8; \
Wb[ 9] = Wb9; \
Wb[10] = Wb10; \
Wb[11] = Wb11; \
Wb[12] = Wb12; \
Wb[13] = Wb13; \
Wb[14] = Wb14; \
Wb[15] = Wb15; \
for (u = 0; u < 15; u += 5) { \
qt[u + 0] = SPH_T64(sb0(Wb[u + 0]) + H(u + 1)); \
qt[u + 1] = SPH_T64(sb1(Wb[u + 1]) + H(u + 2)); \
qt[u + 2] = SPH_T64(sb2(Wb[u + 2]) + H(u + 3)); \
qt[u + 3] = SPH_T64(sb3(Wb[u + 3]) + H(u + 4)); \
qt[u + 4] = SPH_T64(sb4(Wb[u + 4]) + H(u + 5)); \
} \
qt[15] = SPH_T64(sb0(Wb[15]) + H(0)); \
} while (0)
#define MAKE_Qbb do { \
unsigned u; \
for (u = 16; u < 18; u ++) \
qt[u] = expand1b(Qb, M, H, u); \
for (u = 18; u < 32; u ++) \
qt[u] = expand2b(Qb, M, H, u); \
} while (0)
#else
#define MAKE_Qab do { \
qt[ 0] = SPH_T64(sb0(Wb0 ) + H( 1)); \
qt[ 1] = SPH_T64(sb1(Wb1 ) + H( 2)); \
qt[ 2] = SPH_T64(sb2(Wb2 ) + H( 3)); \
qt[ 3] = SPH_T64(sb3(Wb3 ) + H( 4)); \
qt[ 4] = SPH_T64(sb4(Wb4 ) + H( 5)); \
qt[ 5] = SPH_T64(sb0(Wb5 ) + H( 6)); \
qt[ 6] = SPH_T64(sb1(Wb6 ) + H( 7)); \
qt[ 7] = SPH_T64(sb2(Wb7 ) + H( 8)); \
qt[ 8] = SPH_T64(sb3(Wb8 ) + H( 9)); \
qt[ 9] = SPH_T64(sb4(Wb9 ) + H(10)); \
qt[10] = SPH_T64(sb0(Wb10) + H(11)); \
qt[11] = SPH_T64(sb1(Wb11) + H(12)); \
qt[12] = SPH_T64(sb2(Wb12) + H(13)); \
qt[13] = SPH_T64(sb3(Wb13) + H(14)); \
qt[14] = SPH_T64(sb4(Wb14) + H(15)); \
qt[15] = SPH_T64(sb0(Wb15) + H( 0)); \
} while (0)
#define MAKE_Qbb do { \
qt[16] = expand1b(Qb, M, H, 16); \
qt[17] = expand1b(Qb, M, H, 17); \
qt[18] = expand2b(Qb, M, H, 18); \
qt[19] = expand2b(Qb, M, H, 19); \
qt[20] = expand2b(Qb, M, H, 20); \
qt[21] = expand2b(Qb, M, H, 21); \
qt[22] = expand2b(Qb, M, H, 22); \
qt[23] = expand2b(Qb, M, H, 23); \
qt[24] = expand2b(Qb, M, H, 24); \
qt[25] = expand2b(Qb, M, H, 25); \
qt[26] = expand2b(Qb, M, H, 26); \
qt[27] = expand2b(Qb, M, H, 27); \
qt[28] = expand2b(Qb, M, H, 28); \
qt[29] = expand2b(Qb, M, H, 29); \
qt[30] = expand2b(Qb, M, H, 30); \
qt[31] = expand2b(Qb, M, H, 31); \
} while (0)
#endif
#define MAKE_Qb do { \
MAKE_Qab; \
MAKE_Qbb; \
} while (0)
#define Qb(j) (qt[j])
#endif
#define FOLD(type, mkQ, tt, rol, mf, qf, dhf) do { \
type qt[32], xl, xh; \
mkQ; \
xl = qf(16) ^ qf(17) ^ qf(18) ^ qf(19) \
^ qf(20) ^ qf(21) ^ qf(22) ^ qf(23); \
xh = xl ^ qf(24) ^ qf(25) ^ qf(26) ^ qf(27) \
^ qf(28) ^ qf(29) ^ qf(30) ^ qf(31); \
dhf( 0) = tt(((xh << 5) ^ (qf(16) >> 5) ^ mf( 0)) \
+ (xl ^ qf(24) ^ qf( 0))); \
dhf( 1) = tt(((xh >> 7) ^ (qf(17) << 8) ^ mf( 1)) \
+ (xl ^ qf(25) ^ qf( 1))); \
dhf( 2) = tt(((xh >> 5) ^ (qf(18) << 5) ^ mf( 2)) \
+ (xl ^ qf(26) ^ qf( 2))); \
dhf( 3) = tt(((xh >> 1) ^ (qf(19) << 5) ^ mf( 3)) \
+ (xl ^ qf(27) ^ qf( 3))); \
dhf( 4) = tt(((xh >> 3) ^ (qf(20) << 0) ^ mf( 4)) \
+ (xl ^ qf(28) ^ qf( 4))); \
dhf( 5) = tt(((xh << 6) ^ (qf(21) >> 6) ^ mf( 5)) \
+ (xl ^ qf(29) ^ qf( 5))); \
dhf( 6) = tt(((xh >> 4) ^ (qf(22) << 6) ^ mf( 6)) \
+ (xl ^ qf(30) ^ qf( 6))); \
dhf( 7) = tt(((xh >> 11) ^ (qf(23) << 2) ^ mf( 7)) \
+ (xl ^ qf(31) ^ qf( 7))); \
dhf( 8) = tt(rol(dhf(4), 9) + (xh ^ qf(24) ^ mf( 8)) \
+ ((xl << 8) ^ qf(23) ^ qf( 8))); \
dhf( 9) = tt(rol(dhf(5), 10) + (xh ^ qf(25) ^ mf( 9)) \
+ ((xl >> 6) ^ qf(16) ^ qf( 9))); \
dhf(10) = tt(rol(dhf(6), 11) + (xh ^ qf(26) ^ mf(10)) \
+ ((xl << 6) ^ qf(17) ^ qf(10))); \
dhf(11) = tt(rol(dhf(7), 12) + (xh ^ qf(27) ^ mf(11)) \
+ ((xl << 4) ^ qf(18) ^ qf(11))); \
dhf(12) = tt(rol(dhf(0), 13) + (xh ^ qf(28) ^ mf(12)) \
+ ((xl >> 3) ^ qf(19) ^ qf(12))); \
dhf(13) = tt(rol(dhf(1), 14) + (xh ^ qf(29) ^ mf(13)) \
+ ((xl >> 4) ^ qf(20) ^ qf(13))); \
dhf(14) = tt(rol(dhf(2), 15) + (xh ^ qf(30) ^ mf(14)) \
+ ((xl >> 7) ^ qf(21) ^ qf(14))); \
dhf(15) = tt(rol(dhf(3), 16) + (xh ^ qf(31) ^ mf(15)) \
+ ((xl >> 2) ^ qf(22) ^ qf(15))); \
} while (0)
#define FOLDs FOLD(sph_u32, MAKE_Qs, SPH_T32, SPH_ROTL32, M, Qs, dH)
#if SPH_64
#define FOLDb FOLD(sph_u64, MAKE_Qb, SPH_T64, SPH_ROTL64, M, Qb, dH)
#endif
static void
compress_small(const unsigned char *data, const sph_u32 h[16], sph_u32 dh[16])
{
#if SPH_LITTLE_FAST
#define M(x) sph_dec32le_aligned(data + 4 * (x))
#else
sph_u32 mv[16];
mv[ 0] = sph_dec32le_aligned(data + 0);
mv[ 1] = sph_dec32le_aligned(data + 4);
mv[ 2] = sph_dec32le_aligned(data + 8);
mv[ 3] = sph_dec32le_aligned(data + 12);
mv[ 4] = sph_dec32le_aligned(data + 16);
mv[ 5] = sph_dec32le_aligned(data + 20);
mv[ 6] = sph_dec32le_aligned(data + 24);
mv[ 7] = sph_dec32le_aligned(data + 28);
mv[ 8] = sph_dec32le_aligned(data + 32);
mv[ 9] = sph_dec32le_aligned(data + 36);
mv[10] = sph_dec32le_aligned(data + 40);
mv[11] = sph_dec32le_aligned(data + 44);
mv[12] = sph_dec32le_aligned(data + 48);
mv[13] = sph_dec32le_aligned(data + 52);
mv[14] = sph_dec32le_aligned(data + 56);
mv[15] = sph_dec32le_aligned(data + 60);
#define M(x) (mv[x])
#endif
#define H(x) (h[x])
#define dH(x) (dh[x])
FOLDs;
#undef M
#undef H
#undef dH
}
static const sph_u32 final_s[16] = {
SPH_C32(0xaaaaaaa0), SPH_C32(0xaaaaaaa1), SPH_C32(0xaaaaaaa2),
SPH_C32(0xaaaaaaa3), SPH_C32(0xaaaaaaa4), SPH_C32(0xaaaaaaa5),
SPH_C32(0xaaaaaaa6), SPH_C32(0xaaaaaaa7), SPH_C32(0xaaaaaaa8),
SPH_C32(0xaaaaaaa9), SPH_C32(0xaaaaaaaa), SPH_C32(0xaaaaaaab),
SPH_C32(0xaaaaaaac), SPH_C32(0xaaaaaaad), SPH_C32(0xaaaaaaae),
SPH_C32(0xaaaaaaaf)
};
static void
bmw32_init(sph_bmw_small_context *sc, const sph_u32 *iv)
{
memcpy(sc->H, iv, sizeof sc->H);
sc->ptr = 0;
#if SPH_64
sc->bit_count = 0;
#else
sc->bit_count_high = 0;
sc->bit_count_low = 0;
#endif
}
static void
bmw32(sph_bmw_small_context *sc, const void *data, size_t len)
{
unsigned char *buf;
size_t ptr;
sph_u32 htmp[16];
sph_u32 *h1, *h2;
#if !SPH_64
sph_u32 tmp;
#endif
#if SPH_64
sc->bit_count += (sph_u64)len << 3;
#else
tmp = sc->bit_count_low;
sc->bit_count_low = SPH_T32(tmp + ((sph_u32)len << 3));
if (sc->bit_count_low < tmp)
sc->bit_count_high ++;
sc->bit_count_high += len >> 29;
#endif
buf = sc->buf;
ptr = sc->ptr;
h1 = sc->H;
h2 = htmp;
while (len > 0) {
size_t clen;
clen = (sizeof sc->buf) - ptr;
if (clen > len)
clen = len;
memcpy(buf + ptr, data, clen);
data = (const unsigned char *)data + clen;
len -= clen;
ptr += clen;
if (ptr == sizeof sc->buf) {
sph_u32 *ht;
compress_small(buf, h1, h2);
ht = h1;
h1 = h2;
h2 = ht;
ptr = 0;
}
}
sc->ptr = ptr;
if (h1 != sc->H)
memcpy(sc->H, h1, sizeof sc->H);
}
static void
bmw32_close(sph_bmw_small_context *sc, unsigned ub, unsigned n,
void *dst, size_t out_size_w32)
{
unsigned char *buf, *out;
size_t ptr, u, v;
unsigned z;
sph_u32 h1[16], h2[16], *h;
buf = sc->buf;
ptr = sc->ptr;
z = 0x80 >> n;
buf[ptr ++] = ((ub & -z) | z) & 0xFF;
h = sc->H;
if (ptr > (sizeof sc->buf) - 8) {
memset(buf + ptr, 0, (sizeof sc->buf) - ptr);
compress_small(buf, h, h1);
ptr = 0;
h = h1;
}
memset(buf + ptr, 0, (sizeof sc->buf) - 8 - ptr);
#if SPH_64
sph_enc64le_aligned(buf + (sizeof sc->buf) - 8,
SPH_T64(sc->bit_count + n));
#else
sph_enc32le_aligned(buf + (sizeof sc->buf) - 8,
sc->bit_count_low + n);
sph_enc32le_aligned(buf + (sizeof sc->buf) - 4,
SPH_T32(sc->bit_count_high));
#endif
compress_small(buf, h, h2);
for (u = 0; u < 16; u ++)
sph_enc32le_aligned(buf + 4 * u, h2[u]);
compress_small(buf, final_s, h1);
out = dst;
for (u = 0, v = 16 - out_size_w32; u < out_size_w32; u ++, v ++)
sph_enc32le(out + 4 * u, h1[v]);
}
#if SPH_64
static void
compress_big(const unsigned char *data, const sph_u64 h[16], sph_u64 dh[16])
{
#if SPH_LITTLE_FAST
#define M(x) sph_dec64le_aligned(data + 8 * (x))
#else
sph_u64 mv[16];
mv[ 0] = sph_dec64le_aligned(data + 0);
mv[ 1] = sph_dec64le_aligned(data + 8);
mv[ 2] = sph_dec64le_aligned(data + 16);
mv[ 3] = sph_dec64le_aligned(data + 24);
mv[ 4] = sph_dec64le_aligned(data + 32);
mv[ 5] = sph_dec64le_aligned(data + 40);
mv[ 6] = sph_dec64le_aligned(data + 48);
mv[ 7] = sph_dec64le_aligned(data + 56);
mv[ 8] = sph_dec64le_aligned(data + 64);
mv[ 9] = sph_dec64le_aligned(data + 72);
mv[10] = sph_dec64le_aligned(data + 80);
mv[11] = sph_dec64le_aligned(data + 88);
mv[12] = sph_dec64le_aligned(data + 96);
mv[13] = sph_dec64le_aligned(data + 104);
mv[14] = sph_dec64le_aligned(data + 112);
mv[15] = sph_dec64le_aligned(data + 120);
#define M(x) (mv[x])
#endif
#define H(x) (h[x])
#define dH(x) (dh[x])
FOLDb;
#undef M
#undef H
#undef dH
}
static const sph_u64 final_b[16] = {
SPH_C64(0xaaaaaaaaaaaaaaa0), SPH_C64(0xaaaaaaaaaaaaaaa1),
SPH_C64(0xaaaaaaaaaaaaaaa2), SPH_C64(0xaaaaaaaaaaaaaaa3),
SPH_C64(0xaaaaaaaaaaaaaaa4), SPH_C64(0xaaaaaaaaaaaaaaa5),
SPH_C64(0xaaaaaaaaaaaaaaa6), SPH_C64(0xaaaaaaaaaaaaaaa7),
SPH_C64(0xaaaaaaaaaaaaaaa8), SPH_C64(0xaaaaaaaaaaaaaaa9),
SPH_C64(0xaaaaaaaaaaaaaaaa), SPH_C64(0xaaaaaaaaaaaaaaab),
SPH_C64(0xaaaaaaaaaaaaaaac), SPH_C64(0xaaaaaaaaaaaaaaad),
SPH_C64(0xaaaaaaaaaaaaaaae), SPH_C64(0xaaaaaaaaaaaaaaaf)
};
static void
bmw64_init(sph_bmw_big_context *sc, const sph_u64 *iv)
{
memcpy(sc->H, iv, sizeof sc->H);
sc->ptr = 0;
sc->bit_count = 0;
}
static void
bmw64(sph_bmw_big_context *sc, const void *data, size_t len)
{
unsigned char *buf;
size_t ptr;
sph_u64 htmp[16];
sph_u64 *h1, *h2;
sc->bit_count += (sph_u64)len << 3;
buf = sc->buf;
ptr = sc->ptr;
h1 = sc->H;
h2 = htmp;
while (len > 0) {
size_t clen;
clen = (sizeof sc->buf) - ptr;
if (clen > len)
clen = len;
memcpy(buf + ptr, data, clen);
data = (const unsigned char *)data + clen;
len -= clen;
ptr += clen;
if (ptr == sizeof sc->buf) {
sph_u64 *ht;
compress_big(buf, h1, h2);
ht = h1;
h1 = h2;
h2 = ht;
ptr = 0;
}
}
sc->ptr = ptr;
if (h1 != sc->H)
memcpy(sc->H, h1, sizeof sc->H);
}
static void
bmw64_close(sph_bmw_big_context *sc, unsigned ub, unsigned n,
void *dst, size_t out_size_w64)
{
unsigned char *buf, *out;
size_t ptr, u, v;
unsigned z;
sph_u64 h1[16], h2[16], *h;
buf = sc->buf;
ptr = sc->ptr;
z = 0x80 >> n;
buf[ptr ++] = ((ub & -z) | z) & 0xFF;
h = sc->H;
if (ptr > (sizeof sc->buf) - 8) {
memset(buf + ptr, 0, (sizeof sc->buf) - ptr);
compress_big(buf, h, h1);
ptr = 0;
h = h1;
}
memset(buf + ptr, 0, (sizeof sc->buf) - 8 - ptr);
sph_enc64le_aligned(buf + (sizeof sc->buf) - 8,
SPH_T64(sc->bit_count + n));
compress_big(buf, h, h2);
for (u = 0; u < 16; u ++)
sph_enc64le_aligned(buf + 8 * u, h2[u]);
compress_big(buf, final_b, h1);
out = dst;
for (u = 0, v = 16 - out_size_w64; u < out_size_w64; u ++, v ++)
sph_enc64le(out + 8 * u, h1[v]);
}
#endif
/* see sph_bmw.h */
void
sph_bmw224_init(void *cc)
{
bmw32_init(cc, IV224);
}
/* see sph_bmw.h */
void
sph_bmw224(void *cc, const void *data, size_t len)
{
bmw32(cc, data, len);
}
/* see sph_bmw.h */
void
sph_bmw224_close(void *cc, void *dst)
{
sph_bmw224_addbits_and_close(cc, 0, 0, dst);
}
/* see sph_bmw.h */
void
sph_bmw224_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
{
bmw32_close(cc, ub, n, dst, 7);
sph_bmw224_init(cc);
}
/* see sph_bmw.h */
void
sph_bmw256_init(void *cc)
{
bmw32_init(cc, IV256);
}
/* see sph_bmw.h */
void
sph_bmw256(void *cc, const void *data, size_t len)
{
bmw32(cc, data, len);
}
/* see sph_bmw.h */
void
sph_bmw256_close(void *cc, void *dst)
{
sph_bmw256_addbits_and_close(cc, 0, 0, dst);
}
/* see sph_bmw.h */
void
sph_bmw256_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
{
bmw32_close(cc, ub, n, dst, 8);
sph_bmw256_init(cc);
}
#if SPH_64
/* see sph_bmw.h */
void
sph_bmw384_init(void *cc)
{
bmw64_init(cc, IV384);
}
/* see sph_bmw.h */
void
sph_bmw384(void *cc, const void *data, size_t len)
{
bmw64(cc, data, len);
}
/* see sph_bmw.h */
void
sph_bmw384_close(void *cc, void *dst)
{
sph_bmw384_addbits_and_close(cc, 0, 0, dst);
}
/* see sph_bmw.h */
void
sph_bmw384_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
{
bmw64_close(cc, ub, n, dst, 6);
sph_bmw384_init(cc);
}
/* see sph_bmw.h */
void
sph_bmw512_init(void *cc)
{
bmw64_init(cc, IV512);
}
/* see sph_bmw.h */
void
sph_bmw512(void *cc, const void *data, size_t len)
{
bmw64(cc, data, len);
}
/* see sph_bmw.h */
void
sph_bmw512_close(void *cc, void *dst)
{
sph_bmw512_addbits_and_close(cc, 0, 0, dst);
}
/* see sph_bmw.h */
void
sph_bmw512_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
{
bmw64_close(cc, ub, n, dst, 8);
sph_bmw512_init(cc);
}
#endif
#ifdef __cplusplus
}
#endif
| mit |
pixelglow/graphviz | cmd/lefty/ws/mswin32/gcanvas.c | 17 | 43121 | /* $Id$ $Revision$ */
/* vim:set shiftwidth=4 ts=8: */
/*************************************************************************
* Copyright (c) 2011 AT&T Intellectual Property
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors: See CVS logs. Details at http://www.graphviz.org/
*************************************************************************/
/* Lefteris Koutsofios - AT&T Labs Research */
#include "common.h"
#include "g.h"
#include "gcommon.h"
#include "mem.h"
#define WCU widget->u.c
#define WINDOW widget->u.c->window
#define GC widget->u.c->gc
#define ISVISIBLE(r) ( \
(r.o.x <= WCU->clip.c.x) && (r.c.x >= WCU->clip.o.x) && \
(r.o.y <= WCU->clip.c.y) && (r.c.y >= WCU->clip.o.y) \
)
#define max(a, b) (((a) >= (b)) ? (a) : (b))
#define min(a, b) (((a) <= (b)) ? (a) : (b))
static long gstyles[5] = {
/* G_SOLID */ PS_SOLID,
/* G_DASHED */ PS_DASH,
/* G_DOTTED */ PS_DOT,
/* G_LONGDASHED */ PS_DASH,
/* G_SHORTDASHED */ PS_DASH,
};
static char grays[][4] = {
{ 0x00, 0x00, 0x00, 0x00 },
{ 0x08, 0x00, 0x00, 0x00 },
{ 0x08, 0x00, 0x02, 0x00 },
{ 0x0A, 0x00, 0x02, 0x00 },
{ 0x0A, 0x00, 0x0A, 0x00 },
{ 0x0A, 0x04, 0x0A, 0x00 },
{ 0x0A, 0x04, 0x0A, 0x01 },
{ 0x0A, 0x05, 0x0A, 0x01 },
{ 0x0A, 0x05, 0x0A, 0x05 },
{ 0x0E, 0x05, 0x0A, 0x05 },
{ 0x0E, 0x05, 0x0B, 0x05 },
{ 0x0F, 0x05, 0x0B, 0x05 },
{ 0x0F, 0x05, 0x0F, 0x05 },
{ 0x0F, 0x0D, 0x0F, 0x05 },
{ 0x0F, 0x0D, 0x0F, 0x07 },
{ 0x0F, 0x0F, 0x0F, 0x07 },
{ 0x0F, 0x0F, 0x0F, 0x0F }
};
static int curcursori = -1;
static void bezier (PIXpoint_t, PIXpoint_t, PIXpoint_t, PIXpoint_t);
static HFONT findfont (char *, int);
static int scalebitmap (Gwidget_t *, Gbitmap_t *, Gsize_t, int, int);
static void setgattr (Gwidget_t *, Ggattr_t *);
static PIXrect_t rdrawtopix (Gwidget_t *, Grect_t);
static PIXpoint_t pdrawtopix (Gwidget_t *, Gpoint_t);
static PIXsize_t sdrawtopix (Gwidget_t *, Gsize_t);
static Gsize_t spixtodraw (Gwidget_t *, PIXsize_t);
static Grect_t rpixtodraw (Gwidget_t *, PIXrect_t);
static PIXrect_t rdrawtobpix (Gbitmap_t *, Grect_t);
static PIXpoint_t pdrawtobpix (Gbitmap_t *, Gpoint_t);
int GCcreatewidget (
Gwidget_t *parent, Gwidget_t *widget, int attrn, Gwattr_t *attrp
) {
PIXsize_t ps;
/* the 2 here is to provide enough space for palPalEntry[0] and [1] */
LOGPALETTE pal[2];
HBRUSH brush;
HPEN pen;
HBITMAP bmap;
HCURSOR cursor;
DWORD wflags;
int color, ai, i;
if (!parent) {
Gerr (POS, G_ERRNOPARENTWIDGET);
return -1;
}
wflags = WS_CHILDWINDOW;
WCU->func = NULL;
WCU->needredraw = FALSE;
WCU->buttonsdown = 0;
WCU->bstate[0] = WCU->bstate[1] = WCU->bstate[2] = 0;
ps.x = ps.y = MINCWSIZE;
for (ai = 0; ai < attrn; ai++) {
switch (attrp[ai].id) {
case G_ATTRSIZE:
GETSIZE (attrp[ai].u.s, ps, MINCWSIZE);
break;
case G_ATTRBORDERWIDTH:
wflags |= WS_BORDER;
break;
case G_ATTRCURSOR:
/* will do it after the widget is created */
break;
case G_ATTRCOLOR:
/* will do it after the widget is created */
break;
case G_ATTRVIEWPORT:
/* will do it after the widget is created */
break;
case G_ATTRWINDOW:
/* will do it after the widget is created */
break;
case G_ATTRWINDOWID:
Gerr (POS, G_ERRCANNOTSETATTR1, "windowid");
return -1;
case G_ATTREVENTCB:
WCU->func = attrp[ai].u.func;
break;
case G_ATTRUSERDATA:
widget->udata = attrp[ai].u.u;
break;
default:
Gerr (POS, G_ERRBADATTRID, attrp[ai].id);
return -1;
}
}
Gadjustwrect (parent, &ps);
WCU->wrect.o.x = 0.0, WCU->wrect.o.y = 0.0;
WCU->wrect.c.x = 1.0, WCU->wrect.c.y = 1.0;
WCU->vsize.x = ps.x, WCU->vsize.y = ps.y;
if (!(widget->w = CreateWindow (
"CanvasClass", "canvas", wflags, 0, 0,
ps.x, ps.y, parent->w, (HMENU) (widget - &Gwidgets[0]),
hinstance, NULL
))) {
Gerr (POS, G_ERRCANNOTCREATEWIDGET);
return -1;
}
ShowWindow (widget->w, SW_SHOW);
UpdateWindow (widget->w);
SetCursor (LoadCursor ((HINSTANCE) NULL, IDC_ARROW));
GC = GetDC (widget->w);
WCU->ncolor = 2;
pal[0].palVersion = 0x300; /* HA HA HA */
pal[0].palNumEntries = 2;
pal[0].palPalEntry[0].peRed = 255;
pal[0].palPalEntry[0].peGreen = 255;
pal[0].palPalEntry[0].peBlue = 255;
pal[0].palPalEntry[0].peFlags = 0;
pal[0].palPalEntry[1].peRed = 0;
pal[0].palPalEntry[1].peGreen = 0;
pal[0].palPalEntry[1].peBlue = 0;
pal[0].palPalEntry[1].peFlags = 0;
WCU->cmap = CreatePalette (&pal[0]);
WCU->colors[0].color = pal[0].palPalEntry[0];
for (i = 1; i < G_MAXCOLORS; i++)
WCU->colors[i].color = pal[0].palPalEntry[1];
SelectPalette (GC, WCU->cmap, FALSE);
RealizePalette (GC);
WCU->colors[0].inuse = TRUE;
WCU->colors[1].inuse = TRUE;
for (i = 2; i < G_MAXCOLORS; i++)
WCU->colors[i].inuse = FALSE;
WCU->gattr.color = 1;
brush = CreateSolidBrush (PALETTEINDEX (1));
SelectObject (GC, brush);
pen = CreatePen (PS_SOLID, 1, PALETTEINDEX (1));
SelectObject (GC, pen);
SetTextColor (GC, PALETTEINDEX (1));
SetBkMode (GC, TRANSPARENT);
WCU->gattr.width = 0;
WCU->gattr.mode = G_SRC;
WCU->gattr.fill = 0;
WCU->gattr.style = 0;
WCU->defgattr = WCU->gattr;
WCU->font = NULL;
if (Gdepth == 1) {
for (i = 0; i < 17; i++) {
if (!(bmap = CreateBitmap (4, 4, 1, 1, &grays[i][0])))
continue;
WCU->grays[i] = CreatePatternBrush (bmap);
}
}
for (ai = 0; ai < attrn; ai++) {
switch (attrp[ai].id) {
case G_ATTRCURSOR:
if (strcmp (attrp[ai].u.t, "watch") == 0) {
curcursori = 1;
cursor = LoadCursor ((HINSTANCE) NULL, IDC_WAIT);
} else if (strcmp (attrp[ai].u.t, "default") == 0) {
curcursori = -1;
cursor = LoadCursor ((HINSTANCE) NULL, IDC_ARROW);
} else {
Gerr (POS, G_ERRNOSUCHCURSOR, attrp[ai].u.t);
return -1;
}
SetCursor (cursor);
break;
case G_ATTRCOLOR:
color = attrp[ai].u.c.index;
if (color < 0 || color > G_MAXCOLORS) {
Gerr (POS, G_ERRBADCOLORINDEX, color);
return -1;
}
WCU->colors[color].color.peRed = attrp[ai].u.c.r;
WCU->colors[color].color.peGreen = attrp[ai].u.c.g;
WCU->colors[color].color.peBlue = attrp[ai].u.c.b;
WCU->colors[color].color.peFlags = 0;
if (color >= WCU->ncolor)
ResizePalette (WCU->cmap, color + 1), WCU->ncolor = color + 1;
SetPaletteEntries (
WCU->cmap, (int) color, 1, &WCU->colors[color].color);
RealizePalette (GC);
WCU->colors[color].inuse = TRUE;
if (color == WCU->gattr.color)
WCU->gattr.color = -1;
break;
case G_ATTRVIEWPORT:
if (attrp[ai].u.s.x == 0)
attrp[ai].u.s.x = 1;
if (attrp[ai].u.s.y == 0)
attrp[ai].u.s.y = 1;
WCU->vsize.x = (int) (attrp[ai].u.s.x + 0.5);
WCU->vsize.y = (int) (attrp[ai].u.s.y + 0.5);
SetWindowPos (
widget->w, (HWND) NULL, 0, 0, WCU->vsize.x,
WCU->vsize.y, SWP_NOACTIVATE | SWP_NOZORDER | SWP_NOMOVE
);
break;
case G_ATTRWINDOW:
if (attrp[ai].u.r.o.x == attrp[ai].u.r.c.x)
attrp[ai].u.r.c.x = attrp[ai].u.r.o.x + 1;
if (attrp[ai].u.r.o.y == attrp[ai].u.r.c.y)
attrp[ai].u.r.c.y = attrp[ai].u.r.o.y + 1;
WCU->wrect = attrp[ai].u.r;
break;
}
}
if (parent && parent->type == G_ARRAYWIDGET)
Gawinsertchild (parent, widget);
Gadjustclip (widget);
return 0;
}
int GCsetwidgetattr (Gwidget_t *widget, int attrn, Gwattr_t *attrp) {
HCURSOR cursor;
Gwidget_t *parent;
PIXsize_t ps;
DWORD wflags1;
int ai, color;
parent = (widget->pwi == -1) ? NULL : &Gwidgets[widget->pwi];
wflags1 = SWP_NOMOVE | SWP_NOZORDER;
for (ai = 0; ai < attrn; ai++) {
switch (attrp[ai].id) {
case G_ATTRSIZE:
GETSIZE (attrp[ai].u.s, ps, MINCWSIZE);
Gadjustwrect (parent, &ps);
SetWindowPos (widget->w, (HWND) NULL, 0, 0, ps.x, ps.y, wflags1);
break;
case G_ATTRBORDERWIDTH:
Gerr (POS, G_ERRCANNOTSETATTR2, "borderwidth");
return -1;
case G_ATTRCURSOR:
if (strcmp (attrp[ai].u.t, "watch") == 0) {
curcursori = 1;
cursor = LoadCursor ((HINSTANCE) NULL, IDC_WAIT);
} else if (strcmp (attrp[ai].u.t, "default") == 0) {
curcursori = -1;
cursor = LoadCursor ((HINSTANCE) NULL, IDC_ARROW);
} else {
Gerr (POS, G_ERRNOSUCHCURSOR, attrp[ai].u.t);
return -1;
}
SetCursor (cursor);
break;
case G_ATTRCOLOR:
color = attrp[ai].u.c.index;
if (color < 0 || color > G_MAXCOLORS) {
Gerr (POS, G_ERRBADCOLORINDEX, color);
return -1;
}
WCU->colors[color].color.peRed = attrp[ai].u.c.r;
WCU->colors[color].color.peGreen = attrp[ai].u.c.g;
WCU->colors[color].color.peBlue = attrp[ai].u.c.b;
WCU->colors[color].color.peFlags = 0;
if (color >= WCU->ncolor)
ResizePalette (WCU->cmap, color + 1), WCU->ncolor = color + 1;
SetPaletteEntries (
WCU->cmap, (int) color, 1, &WCU->colors[color].color
);
RealizePalette (GC);
WCU->colors[color].inuse = TRUE;
if (color == WCU->gattr.color)
WCU->gattr.color = -1;
break;
case G_ATTRVIEWPORT:
if (attrp[ai].u.s.x == 0)
attrp[ai].u.s.x = 1;
if (attrp[ai].u.s.y == 0)
attrp[ai].u.s.y = 1;
WCU->vsize.x = (int) (attrp[ai].u.s.x + 0.5);
WCU->vsize.y = (int) (attrp[ai].u.s.y + 0.5);
ps.x = WCU->vsize.x, ps.y = WCU->vsize.y;
Gadjustwrect (&Gwidgets[widget->pwi], &ps);
SetWindowPos (
widget->w, (HWND) NULL, 0, 0, ps.x,
ps.y, SWP_NOACTIVATE | SWP_NOZORDER | SWP_NOMOVE
);
Gadjustclip (widget);
break;
case G_ATTRWINDOW:
if (attrp[ai].u.r.o.x == attrp[ai].u.r.c.x)
attrp[ai].u.r.c.x = attrp[ai].u.r.o.x + 1;
if (attrp[ai].u.r.o.y == attrp[ai].u.r.c.y)
attrp[ai].u.r.c.y = attrp[ai].u.r.o.y + 1;
WCU->wrect = attrp[ai].u.r;
Gadjustclip (widget);
break;
case G_ATTRWINDOWID:
Gerr (POS, G_ERRCANNOTSETATTR2, "windowid");
return -1;
case G_ATTREVENTCB:
WCU->func = attrp[ai].u.func;
break;
case G_ATTRUSERDATA:
widget->udata = attrp[ai].u.u;
break;
default:
Gerr (POS, G_ERRBADATTRID, attrp[ai].id);
return -1;
}
}
return 0;
}
int GCgetwidgetattr (Gwidget_t *widget, int attrn, Gwattr_t *attrp) {
PALETTEENTRY *cp;
RECT r;
int color, ai;
for (ai = 0; ai < attrn; ai++) {
switch (attrp[ai].id) {
case G_ATTRSIZE:
GetWindowRect (widget->w, &r);
attrp[ai].u.s.x = r.right - r.left;
attrp[ai].u.s.y = r.bottom - r.top;
break;
case G_ATTRBORDERWIDTH:
Gerr (POS, G_ERRCANNOTGETATTR, "borderwidth");
return -1;
case G_ATTRCURSOR:
attrp[ai].u.t = (curcursori == -1) ? "default" : "watch";
break;
case G_ATTRCOLOR:
color = attrp[ai].u.c.index;
if (color < 0 || color > G_MAXCOLORS) {
Gerr (POS, G_ERRBADCOLORINDEX, color);
return -1;
}
if (WCU->colors[color].inuse) {
cp = &WCU->colors[color].color;
attrp[ai].u.c.r = cp->peRed;
attrp[ai].u.c.g = cp->peGreen;
attrp[ai].u.c.b = cp->peBlue;
} else {
attrp[ai].u.c.r = -1;
attrp[ai].u.c.g = -1;
attrp[ai].u.c.b = -1;
}
break;
case G_ATTRVIEWPORT:
attrp[ai].u.s = WCU->vsize;
break;
case G_ATTRWINDOW:
attrp[ai].u.r = WCU->wrect;
break;
case G_ATTRWINDOWID:
sprintf (&Gbufp[0], "0x%lx", widget->w);
attrp[ai].u.t = &Gbufp[0];
break;
case G_ATTREVENTCB:
attrp[ai].u.func = WCU->func;
break;
case G_ATTRUSERDATA:
attrp[ai].u.u = widget->udata;
break;
default:
Gerr (POS, G_ERRBADATTRID, attrp[ai].id);
return -1;
}
}
return 0;
}
int GCdestroywidget (Gwidget_t *widget) {
Gwidget_t *parent;
parent = (widget->pwi == -1) ? NULL : &Gwidgets[widget->pwi];
if (parent && parent->type == G_ARRAYWIDGET)
Gawdeletechild (parent, widget);
DestroyWindow (widget->w);
return 0;
}
int GCcanvasclear (Gwidget_t *widget) {
Ggattr_t attr;
RECT r;
HBRUSH brush, pbrush;
attr.flags = 0;
setgattr (widget, &attr);
brush = CreateSolidBrush (PALETTEINDEX (0));
pbrush = SelectObject (GC, brush);
GetClientRect (widget->w, &r);
Rectangle (GC, r.left, r.top, r.right, r.bottom);
SelectObject (GC, pbrush);
DeleteObject (brush);
WCU->needredraw = FALSE;
return 0;
}
int GCsetgfxattr (Gwidget_t *widget, Ggattr_t *ap) {
setgattr (widget, ap);
WCU->defgattr = WCU->gattr;
return 0;
}
int GCgetgfxattr (Gwidget_t *widget, Ggattr_t *ap) {
if ((ap->flags & G_GATTRCOLOR))
ap->color = WCU->gattr.color;
if ((ap->flags & G_GATTRWIDTH))
ap->width = WCU->gattr.width;
if ((ap->flags & G_GATTRMODE))
ap->mode = WCU->gattr.mode;
if ((ap->flags & G_GATTRFILL))
ap->fill = WCU->gattr.fill;
if ((ap->flags & G_GATTRSTYLE))
ap->style = WCU->gattr.style;
return 0;
}
int GCarrow (Gwidget_t *widget, Gpoint_t gp1, Gpoint_t gp2, Ggattr_t *ap) {
PIXpoint_t pp1, pp2, pa, pb, pd;
Grect_t gr;
double tangent, l;
if (gp1.x < gp2.x)
gr.o.x = gp1.x, gr.c.x = gp2.x;
else
gr.o.x = gp2.x, gr.c.x = gp1.x;
if (gp1.y < gp2.y)
gr.o.y = gp1.y, gr.c.y = gp2.y;
else
gr.o.y = gp2.y, gr.c.y = gp1.y;
if (!ISVISIBLE (gr))
return 1;
pp1 = pdrawtopix (widget, gp1), pp2 = pdrawtopix (widget, gp2);
pd.x = pp1.x - pp2.x, pd.y = pp1.y - pp2.y;
if (pd.x == 0 && pd.y == 0)
return 0;
tangent = atan2 ((double) pd.y, (double) pd.x);
if ((l = sqrt ((double) (pd.x * pd.x + pd.y * pd.y))) > 30)
l = 30;
pa.x = l * cos (tangent + M_PI / 7) + pp2.x;
pa.y = l * sin (tangent + M_PI / 7) + pp2.y;
pb.x = l * cos (tangent - M_PI / 7) + pp2.x;
pb.y = l * sin (tangent - M_PI / 7) + pp2.y;
setgattr (widget, ap);
MoveToEx (GC, pp1.x, pp1.y, NULL), LineTo (GC, pp2.x, pp2.y);
MoveToEx (GC, pa.x, pa.y, NULL), LineTo (GC, pp2.x, pp2.y);
MoveToEx (GC, pb.x, pb.y, NULL), LineTo (GC, pp2.x, pp2.y);
return 0;
}
int GCline (Gwidget_t *widget, Gpoint_t gp1, Gpoint_t gp2, Ggattr_t *ap) {
PIXpoint_t pp1, pp2;
Grect_t gr;
if (gp1.x < gp2.x)
gr.o.x = gp1.x, gr.c.x = gp2.x;
else
gr.o.x = gp2.x, gr.c.x = gp1.x;
if (gp1.y < gp2.y)
gr.o.y = gp1.y, gr.c.y = gp2.y;
else
gr.o.y = gp2.y, gr.c.y = gp1.y;
if (!ISVISIBLE (gr))
return 1;
pp1 = pdrawtopix (widget, gp1), pp2 = pdrawtopix (widget, gp2);
setgattr (widget, ap);
MoveToEx (GC, pp1.x, pp1.y, NULL);
LineTo (GC, pp2.x, pp2.y);
return 0;
}
int GCbox (Gwidget_t *widget, Grect_t gr, Ggattr_t *ap) {
PIXrect_t pr;
Grect_t gr2;
if (gr.o.x <= gr.c.x)
gr2.o.x = gr.o.x, gr2.c.x = gr.c.x;
else
gr2.o.x = gr.c.x, gr2.c.x = gr.o.x;
if (gr.o.y <= gr.c.y)
gr2.o.y = gr.o.y, gr2.c.y = gr.c.y;
else
gr2.o.y = gr.c.y, gr2.c.y = gr.o.y;
if (!ISVISIBLE (gr2))
return 1;
pr = rdrawtopix (widget, gr);
setgattr (widget, ap);
if (WCU->gattr.fill)
Rectangle (GC, pr.o.x, pr.o.y, pr.c.x, pr.c.y);
else {
Gppp[0].x = pr.o.x, Gppp[0].y = pr.o.y;
Gppp[1].x = pr.c.x, Gppp[1].y = pr.o.y;
Gppp[2].x = pr.c.x, Gppp[2].y = pr.c.y;
Gppp[3].x = pr.o.x, Gppp[3].y = pr.c.y;
Gppp[4].x = pr.o.x, Gppp[4].y = pr.o.y;
Polyline (GC, Gppp, 5);
}
return 0;
}
int GCpolygon (Gwidget_t *widget, int gpn, Gpoint_t *gpp, Ggattr_t *ap) {
Grect_t gr;
int n, i;
if (gpn == 0)
return 0;
gr.o = gpp[0], gr.c = gpp[0];
for (i = 1; i < gpn; i++) {
gr.o.x = min (gr.o.x, gpp[i].x);
gr.o.y = min (gr.o.y, gpp[i].y);
gr.c.x = max (gr.c.x, gpp[i].x);
gr.c.y = max (gr.c.y, gpp[i].y);
}
if (!ISVISIBLE (gr))
return 1;
if (gpn + 1 > Gppn) {
n = (((gpn + 1) + PPINCR - 1) / PPINCR) * PPINCR;
Gppp = Marraygrow (Gppp, (long) n * PPSIZE);
Gppn = n;
}
for (i = 0; i < gpn; i++)
Gppp[i] = pdrawtopix (widget, gpp[i]);
setgattr (widget, ap);
if (WCU->gattr.fill) {
if (Gppp[gpn - 1].x != Gppp[0].x || Gppp[gpn - 1].y != Gppp[0].y)
Gppp[gpn] = Gppp[0], gpn++;
Polygon (GC, Gppp, (int) gpn);
} else
Polyline (GC, Gppp, (int) gpn);
return 0;
}
int GCsplinegon (Gwidget_t *widget, int gpn, Gpoint_t *gpp, Ggattr_t *ap) {
PIXpoint_t p0, p1, p2, p3;
Grect_t gr;
int n, i;
if (gpn == 0)
return 0;
gr.o = gpp[0], gr.c = gpp[0];
for (i = 1; i < gpn; i++) {
gr.o.x = min (gr.o.x, gpp[i].x);
gr.o.y = min (gr.o.y, gpp[i].y);
gr.c.x = max (gr.c.x, gpp[i].x);
gr.c.y = max (gr.c.y, gpp[i].y);
}
if (!ISVISIBLE (gr))
return 1;
Gppi = 1;
if (Gppi >= Gppn) {
n = (((Gppi + 1) + PPINCR - 1) / PPINCR) * PPINCR;
Gppp = Marraygrow (Gppp, (long) n * PPSIZE);
Gppn = n;
}
Gppp[0] = p3 = pdrawtopix (widget, gpp[0]);
for (i = 1; i < gpn; i += 3) {
p0 = p3;
p1 = pdrawtopix (widget, gpp[i]);
p2 = pdrawtopix (widget, gpp[i + 1]);
p3 = pdrawtopix (widget, gpp[i + 2]);
bezier (p0, p1, p2, p3);
}
setgattr (widget, ap);
if (WCU->gattr.fill) {
if (Gppp[Gppi - 1].x != Gppp[0].x || Gppp[Gppi - 1].y != Gppp[0].y)
Gppp[Gppi] = Gppp[0], Gppi++;
Polygon (GC, Gppp, (int) Gppi);
} else
Polyline (GC, Gppp, (int) Gppi);
return 0;
}
static void bezier (
PIXpoint_t p0, PIXpoint_t p1, PIXpoint_t p2, PIXpoint_t p3
) {
Gpoint_t gp0, gp1, gp2;
Gsize_t s;
PIXpoint_t p;
double t;
int n, i, steps;
if ((s.x = p3.x - p0.x) < 0)
s.x = - s.x;
if ((s.y = p3.y - p0.y) < 0)
s.y = - s.y;
if (s.x > s.y)
steps = s.x / 5 + 1;
else
steps = s.y / 5 + 1;
for (i = 0; i <= steps; i++) {
t = i / (double) steps;
gp0.x = p0.x + t * (p1.x - p0.x);
gp0.y = p0.y + t * (p1.y - p0.y);
gp1.x = p1.x + t * (p2.x - p1.x);
gp1.y = p1.y + t * (p2.y - p1.y);
gp2.x = p2.x + t * (p3.x - p2.x);
gp2.y = p2.y + t * (p3.y - p2.y);
gp0.x = gp0.x + t * (gp1.x - gp0.x);
gp0.y = gp0.y + t * (gp1.y - gp0.y);
gp1.x = gp1.x + t * (gp2.x - gp1.x);
gp1.y = gp1.y + t * (gp2.y - gp1.y);
p.x = gp0.x + t * (gp1.x - gp0.x) + 0.5;
p.y = gp0.y + t * (gp1.y - gp0.y) + 0.5;
if (Gppi >= Gppn) {
n = (((Gppi + 1) + PPINCR - 1) / PPINCR) * PPINCR;
Gppp = Marraygrow (Gppp, (long) n * PPSIZE);
Gppn = n;
}
Gppp[Gppi++] = p;
}
}
int GCarc (
Gwidget_t *widget, Gpoint_t gc, Gsize_t gs, double ang1,
double ang2, Ggattr_t *ap
) {
PIXpoint_t pc;
PIXsize_t ps;
Grect_t gr;
double a1, a2;
gr.o.x = gc.x - gs.x, gr.o.y = gc.y - gs.y;
gr.c.x = gc.x + gs.x, gr.c.y = gc.y + gs.y;
if (!ISVISIBLE (gr))
return 1;
pc = pdrawtopix (widget, gc), ps = sdrawtopix (widget, gs);
setgattr (widget, ap);
a1 = ang1 * M_PI / 180, a2 = ang2 * M_PI / 180;
if (WCU->gattr.fill)
Chord (
GC, pc.x - ps.x, pc.y - ps.y, pc.x + ps.x, pc.y + ps.y,
(int) (cos (a1) * ps.x), (int) (sin (a1) * ps.x),
(int) (cos (a2) * ps.x), (int) (sin (a2) * ps.x)
);
else
Arc (
GC, pc.x - ps.x, pc.y - ps.y, pc.x + ps.x, pc.y + ps.y,
(int) (cos (a1) * ps.x), (int) (sin (a1) * ps.x),
(int) (cos (a2) * ps.x), (int) (sin (a2) * ps.x)
);
return 0;
}
#define YSCALE ((WCU->vsize.y) / (WCU->wrect.c.y - WCU->wrect.o.y))
int GCtext (
Gwidget_t *widget, Gtextline_t *tlp, int n, Gpoint_t go,
char *fn, double fs, char *justs, Ggattr_t *ap
) {
Gsize_t gs;
PIXpoint_t po;
PIXsize_t ps;
PIXrect_t pr;
Grect_t gr;
HFONT font;
TEXTMETRIC tm;
SIZE size;
RECT r;
int x, y, w, h, i;
po = pdrawtopix (widget, go);
gs.x = 0, gs.y = fs;
ps = sdrawtopix (widget, gs);
if (!(font = findfont (fn, ps.y))) {
Rectangle (GC, po.x, po.y, po.x + 1, po.y + 1);
return 0;
}
setgattr (widget, ap);
SETFONT (font);
GetTextMetrics (GC, &tm);
for (w = h = 0, i = 0; i < n; i++) {
if (tlp[i].n)
GetTextExtentPoint32 (GC, tlp[i].p, (int) tlp[i].n, &size);
else
GetTextExtentPoint32 (GC, "M", (int) 1, &size);
tlp[i].w = size.cx, tlp[i].h = size.cy;
w = max (w, size.cx), h += size.cy;
}
switch (justs[0]) {
case 'l': po.x += w / 2; break;
case 'r': po.x -= w / 2; break;
}
switch (justs[1]) {
case 'd': po.y -= h; break;
case 'b': po.y -= (h - tm.tmDescent); break;
case 'c': po.y -= h / 2; break;
}
pr.o.x = po.x - w / 2, pr.o.y = po.y;
pr.c.x = po.x + w / 2, pr.c.y = po.y + h;
gr = rpixtodraw (widget, pr);
if (!ISVISIBLE (gr))
return 1;
for (i = 0; i < n; i++) {
switch (tlp[i].j) {
case 'l': x = po.x - w / 2; break;
case 'n': x = po.x - tlp[i].w / 2; break;
case 'r': x = po.x - (tlp[i].w - w / 2); break;
}
y = po.y + i * tlp[i].h;
r.left = x, r.top = y;
r.right = x + tlp[i].w, r.bottom = y + tlp[i].h;
DrawText (GC, tlp[i].p, (int) tlp[i].n, &r, DT_LEFT | DT_TOP);
}
return 0;
}
int GCgettextsize (
Gwidget_t *widget, Gtextline_t *tlp, int n, char *fn,
double fs, Gsize_t *gsp
) {
Gsize_t gs;
PIXsize_t ps;
HFONT font;
int i;
SIZE size;
gs.x = 0, gs.y = fs;
ps = sdrawtopix (widget, gs);
if (!(font = findfont (fn, ps.y))) {
gsp->x = 1, gsp->y = 1;
return 0;
}
SETFONT (font);
for (ps.x = ps.y = 0, i = 0; i < n; i++) {
GetTextExtentPoint32 (GC, tlp[i].p, (int) tlp[i].n, &size);
ps.x = max (ps.x, size.cx), ps.y += size.cy;
}
*gsp = spixtodraw (widget, ps);
return 0;
}
static HFONT findfont (char *name, int size) {
HFONT font;
int fi;
if (name[0] == '\000')
return Gfontp[0].font;
sprintf (&Gbufp[0], name, size);
for (fi = 0; fi < Gfontn; fi++)
if (strcmp (&Gbufp[0], Gfontp[fi].name) == 0 && Gfontp[fi].size == size)
return Gfontp[fi].font;
font = CreateFont (
(int) size, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, &Gbufp[0]
);
if (!font)
font = Gfontp[0].font;
Gfontp = Marraygrow (Gfontp, (long) (Gfontn + 1) * FONTSIZE);
Gfontp[Gfontn].name = strdup (&Gbufp[0]);
Gfontp[Gfontn].size = size;
Gfontp[Gfontn].font = font;
Gfontn++;
return font;
}
int GCcreatebitmap (Gwidget_t *widget, Gbitmap_t *bitmap, Gsize_t s) {
if (!widget) {
Gerr (POS, G_ERRNOPARENTWIDGET);
return -1;
}
if (!bitmap) {
Gerr (POS, G_ERRNOBITMAP);
return -1;
}
if (!(bitmap->u.bmap.orig = CreateBitmap (
(int) s.x, (int) s.y, 1, Gdepth, NULL
))) {
Gerr (POS, G_ERRCANNOTCREATEBITMAP);
return -1;
}
bitmap->u.bmap.scaled = 0;
bitmap->scale.x = bitmap->scale.y = 1;
bitmap->ctype = widget->type;
bitmap->canvas = widget - &Gwidgets[0];
bitmap->size = s;
return 0;
}
int GCdestroybitmap (Gbitmap_t *bitmap) {
if (!bitmap) {
Gerr (POS, G_ERRNOBITMAP);
return -1;
}
DeleteObject (bitmap->u.bmap.orig);
if (bitmap->u.bmap.scaled)
DeleteObject (bitmap->u.bmap.scaled);
return 0;
}
int GCreadbitmap (Gwidget_t *widget, Gbitmap_t *bitmap, FILE *fp) {
Gsize_t s;
HDC gc;
char bufp[2048];
unsigned int rgb[3];
char *s1, *s2;
char c;
int bufn, bufi, step, x, y, k;
if (!widget) {
Gerr (POS, G_ERRNOPARENTWIDGET);
return -1;
}
if (!bitmap) {
Gerr (POS, G_ERRNOBITMAP);
return -1;
}
step = 0;
while (step < 3) {
l1:
if (!fgets (bufp, 2048, fp)) {
Gerr (POS, G_ERRCANNOTREADBITMAP);
return -1;
}
s1 = &bufp[0];
l2:
for (; *s1 && isspace (*s1); s1++)
;
if (!*s1 || *s1 == '#')
goto l1;
switch (step) {
case 0:
if (strncmp (s1, "P6", 2) != 0) {
Gerr (POS, G_ERRCANNOTREADBITMAP);
return -1;
}
step++, s1 += 2;
goto l2;
case 1:
for (s2 = s1; *s2 && *s2 >= '0' && *s2 <= '9'; s2++)
;
c = *s2, *s2 = 0;
if (s2 == s1 || (s.x = atoi (s1)) <= 0) {
*s2 = c, Gerr (POS, G_ERRCANNOTREADBITMAP);
return -1;
}
*s2 = c, step++, s1 = s2;
goto l2;
case 2:
for (s2 = s1; *s2 && *s2 >= '0' && *s2 <= '9'; s2++)
;
c = *s2, *s2 = 0;
if (s2 == s1 || (s.y = atoi (s1)) <= 0) {
*s2 = c, Gerr (POS, G_ERRCANNOTREADBITMAP);
return -1;
}
*s2 = c, step++, s1 = s2;
goto l2;
}
}
if (!(bitmap->u.bmap.orig = CreateBitmap (
(int) s.x, (int) s.y, 1, Gdepth, NULL
))) {
Gerr (POS, G_ERRCANNOTCREATEBITMAP);
return -1;
}
gc = CreateCompatibleDC (GC);
SelectObject (gc, bitmap->u.bmap.orig);
bitmap->u.bmap.scaled = 0;
bitmap->scale.x = bitmap->scale.y = 1;
bitmap->ctype = widget->type;
bitmap->canvas = widget - &Gwidgets[0];
bitmap->size = s;
bufi = bufn = 0;
bufp[bufi] = 0;
for (y = 0; y < s.y; y++) {
for (x = 0; x < s.x; x++) {
for (k = 0; k < 3; k++) {
if (bufi == bufn) {
if ((bufn = fread (bufp, 1, 2047, fp)) == 0) {
if (ferror (fp))
bufn = -1;
DeleteDC (gc);
DeleteObject (bitmap->u.bmap.orig);
Gerr (POS, G_ERRCANNOTCREATEBITMAP);
return -1;
}
bufi = 0;
}
rgb[k] = (unsigned char) bufp[bufi++];
}
SetPixel (gc, x, y, RGB (rgb[0], rgb[1], rgb[2]));
}
}
DeleteDC (gc);
return 0;
}
int GCwritebitmap (Gbitmap_t *bitmap, FILE *fp) {
Gwidget_t *widget;
HDC gc;
COLORREF color;
char bufp[2048];
int bufi, x, y, w, h;
if (!bitmap) {
Gerr (POS, G_ERRNOBITMAP);
return -1;
}
if (
bitmap->canvas < 0 || bitmap->canvas >= Gwidgetn ||
!Gwidgets[bitmap->canvas].inuse
) {
Gerr (POS, G_ERRBADWIDGETID, bitmap->canvas);
return -1;
}
widget = &Gwidgets[bitmap->canvas];
if (widget->type != G_CANVASWIDGET && widget->type != G_PCANVASWIDGET) {
Gerr (POS, G_ERRNOTACANVAS, bitmap->canvas);
return -1;
}
gc = CreateCompatibleDC (GC);
SelectObject (gc, bitmap->u.bmap.orig);
fprintf (fp, "P6\n%d %d 255\n", (int) bitmap->size.x, (int) bitmap->size.y);
bufi = 0;
w = bitmap->size.x;
h = bitmap->size.y;
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
color = GetPixel (gc, x, y);
bufp[bufi++] = GetRValue (color);
bufp[bufi++] = GetGValue (color);
bufp[bufi++] = GetBValue (color);
if (bufi + 3 >= 2048) {
fwrite (bufp, 1, bufi, fp);
bufi = 0;
}
}
}
if (bufi > 0)
fwrite (bufp, 1, bufi, fp);
DeleteDC (gc);
return 0;
}
int GCbitblt (
Gwidget_t *widget, Gpoint_t gp, Grect_t gr, Gbitmap_t *bitmap,
char *mode, Ggattr_t *ap
) {
PIXrect_t pr, r;
PIXpoint_t pp;
PIXsize_t s;
Gsize_t scale;
Gxy_t p;
HBITMAP pix;
HDC gc;
double tvx, tvy, twx, twy;
if (gr.o.x > gr.c.x)
p.x = gr.o.x, gr.o.x = gr.c.x, gr.c.x = p.x;
if (gr.o.y > gr.c.y)
p.y = gr.o.y, gr.o.y = gr.c.y, gr.c.y = p.y;
if (strcmp (mode, "b2c") == 0) {
if (!ISVISIBLE (gr))
return 1;
tvx = WCU->vsize.x, tvy = WCU->vsize.y;
twx = WCU->wrect.c.x - WCU->wrect.o.x;
twy = WCU->wrect.c.y - WCU->wrect.o.y;
scale.x = tvx / twx, scale.y = tvy / twy;
if (scale.x == 1 && scale.y == 1)
pix = bitmap->u.bmap.orig;
else {
if (scale.x != bitmap->scale.x || scale.y != bitmap->scale.y)
scalebitmap (widget, bitmap, scale, TRUE, 1);
pix = bitmap->u.bmap.scaled;
}
pr = rdrawtopix (widget, gr);
pp = pdrawtobpix (bitmap, gp);
s.x = pr.c.x - pr.o.x + 1, s.y = pr.c.y - pr.o.y + 1;
r.o.x = pp.x, r.o.y = pp.y - s.y + 1;
r.c.x = r.o.x + s.x - 1, r.c.y = r.o.y + s.y - 1;
if (r.o.x < 0)
pr.o.x -= r.o.x, r.o.x = 0;
if (r.o.y < 0)
pr.o.y -= r.o.y, r.o.y = 0;
if (r.c.x >= bitmap->size.x * scale.x) {
pr.c.x -= (r.c.x + 1 - bitmap->size.x * scale.x);
r.c.x = bitmap->size.x * scale.x - 1;
}
if (r.c.y >= bitmap->size.y * scale.y) {
pr.c.y -= (r.c.y + 1 - bitmap->size.y * scale.y);
r.c.y = bitmap->size.y * scale.y - 1;
}
if (pr.o.x < 0)
r.o.x -= pr.o.x, pr.o.x = 0;
if (pr.o.y < 0)
r.o.y -= pr.o.y, pr.o.y = 0;
setgattr (widget, ap);
gc = CreateCompatibleDC (GC);
SelectObject (gc, pix);
BitBlt (
GC, pr.o.x, pr.o.y, r.c.x - r.o.x + 1, r.c.y - r.o.y + 1,
gc, r.o.x, r.o.y, (WCU->gattr.mode == G_SRC) ? SRCCOPY : SRCINVERT
);
DeleteDC (gc);
} else if (strcmp (mode, "c2b") == 0) {
tvx = WCU->vsize.x, tvy = WCU->vsize.y;
twx = WCU->wrect.c.x - WCU->wrect.o.x;
twy = WCU->wrect.c.y - WCU->wrect.o.y;
scale.x = tvx / twx, scale.y = tvy / twy;
if (scale.x == 1 && scale.y == 1)
pix = bitmap->u.bmap.orig;
else {
if (scale.x != bitmap->scale.x || scale.y != bitmap->scale.y)
scalebitmap (widget, bitmap, scale, FALSE, 1);
pix = bitmap->u.bmap.scaled;
}
pr = rdrawtobpix (bitmap, gr);
pp = pdrawtopix (widget, gp);
s.x = pr.c.x - pr.o.x + 1, s.y = pr.c.y - pr.o.y + 1;
r.o.x = pp.x, r.o.y = pp.y - s.y + 1;
r.c.x = r.o.x + s.x - 1, r.c.y = r.o.y + s.y - 1;
if (pr.o.x < 0)
r.o.x -= pr.o.x, pr.o.x = 0;
if (pr.o.y < 0)
r.o.y -= pr.o.y, pr.o.y = 0;
if (pr.c.x >= bitmap->size.x * scale.x) {
r.c.x -= (pr.c.x + 1 - bitmap->size.x * scale.x);
pr.c.x = bitmap->size.x * scale.x - 1;
}
if (pr.c.y >= bitmap->size.y * scale.y) {
r.c.y -= (pr.c.y + 1 - bitmap->size.y * scale.y);
pr.c.y = bitmap->size.y * scale.y - 1;
}
if (r.o.x < 0)
pr.o.x -= r.o.x, r.o.x = 0;
if (r.o.y < 0)
pr.o.y -= r.o.y, r.o.y = 0;
setgattr (widget, ap);
gc = CreateCompatibleDC (GC);
SelectObject (gc, pix);
BitBlt (
gc, pr.o.x, pr.o.y, r.c.x - r.o.x + 1, r.c.y - r.o.y + 1,
GC, r.o.x, r.o.y, (WCU->gattr.mode == G_SRC) ? SRCCOPY : SRCINVERT
);
if (pix != bitmap->u.bmap.orig)
scalebitmap (widget, bitmap, scale, TRUE, -1);
DeleteDC (gc);
}
return 0;
}
static int scalebitmap (
Gwidget_t *widget, Gbitmap_t *bitmap, Gsize_t scale,
int copybits, int dir
) {
Gsize_t nsize, o2n;
HBITMAP opix, spix;
COLORREF color;
HDC gc1, gc2;
int x, y, x2, y2, xp, yp;
double prod, rgb[3], xr2, yr2, xl2, yl2, xf2, yf2, xr, yr, xl, yl;
if (!copybits) {
if (dir == 1) {
nsize.x = (int) (bitmap->size.x * scale.x);
nsize.y = (int) (bitmap->size.y * scale.y);
if (!(spix = CreateBitmap (
(int) nsize.x, (int) nsize.y, 1, Gdepth, NULL
))) {
Gerr (POS, G_ERRCANNOTCREATEBITMAP);
return -1;
}
if (bitmap->u.bmap.scaled)
DeleteObject (bitmap->u.bmap.scaled);
bitmap->u.bmap.scaled = spix;
bitmap->scale = scale;
}
return 0;
}
if (dir == 1) {
nsize.x = (int) (bitmap->size.x * scale.x);
nsize.y = (int) (bitmap->size.y * scale.y);
o2n.x = 1 / scale.x, o2n.y = 1 / scale.y;
if (!(spix = CreateBitmap (
(int) nsize.x, (int) nsize.y, 1, Gdepth, NULL
))) {
Gerr (POS, G_ERRCANNOTCREATEBITMAP);
return -1;
}
opix = bitmap->u.bmap.orig;
} else {
nsize.x = (int) bitmap->size.x;
nsize.y = (int) bitmap->size.y;
o2n.x = scale.x, o2n.y = scale.y;
spix = bitmap->u.bmap.orig;
opix = bitmap->u.bmap.scaled;
}
gc1 = CreateCompatibleDC (GC);
SelectObject (gc1, opix);
gc2 = CreateCompatibleDC (GC);
SelectObject (gc2, spix);
prod = o2n.x * o2n.y;
y = 0;
yr = o2n.y;
yl = 0;
for (yp = 0; yp < nsize.y; yp++) {
x = 0;
xr = o2n.x;
xl = 0;
for (xp = 0; xp < nsize.x; xp++) {
y2 = y;
yr2 = yr;
yl2 = yl;
rgb[0] = rgb[1] = rgb[2] = 0;
do {
x2 = x;
xr2 = xr;
xl2 = xl;
yf2 = (yl2 + yr2 > 1) ? 1 - yl2 : yr2, yr2 -= yf2;
do {
xf2 = (xl2 + xr2 > 1) ? 1 - xl2 : xr2, xr2 -= xf2;
color = GetPixel (gc1, x2, y2);
rgb[0] += (GetRValue (color) * xf2 * yf2 / prod);
rgb[1] += (GetGValue (color) * xf2 * yf2 / prod);
rgb[2] += (GetBValue (color) * xf2 * yf2 / prod);
xl2 += xf2;
if (xl2 >= 1)
x2++, xl2 -= 1;
} while (xr2 > 0);
xr2 = o2n.x;
yl2 += yf2;
if (yl2 >= 1)
y2++, yl2 -= 1;
} while (yr2 > 0);
yr2 = o2n.y;
SetPixel (gc2, xp, yp, RGB (rgb[0], rgb[1], rgb[2]));
x = x2;
xr = xr2;
xl = xl2;
}
y = y2;
yr = yr2;
yl = yl2;
}
DeleteDC (gc1);
DeleteDC (gc2);
if (dir == 1) {
if (bitmap->u.bmap.scaled)
DeleteObject (bitmap->u.bmap.scaled);
bitmap->u.bmap.scaled = spix;
bitmap->scale = scale;
}
return 0;
}
int GCgetmousecoords (Gwidget_t *widget, Gpoint_t *gpp, int *count) {
PIXpoint_t pp;
POINT p;
int n1, n2, n3;
GetCursorPos (&p);
ScreenToClient (widget->w, &p);
pp.x = p.x, pp.y = p.y;
*gpp = ppixtodraw (widget, pp);
n1 = GetAsyncKeyState (VK_LBUTTON);
n2 = GetAsyncKeyState (VK_MBUTTON);
n3 = GetAsyncKeyState (VK_RBUTTON);
*count = (n1 < 0 ? 1 : 0) + (n2 < 0 ? 1 : 0) + (n3 < 0 ? 1 : 0);
return 0;
}
static void setgattr (Gwidget_t *widget, Ggattr_t *ap) {
HBRUSH brush, pbrush;
HPEN pen, ppen;
PALETTEENTRY *colorp;
long color, mode, style, width, flag, pati;
double intens;
if (!(ap->flags & G_GATTRCOLOR))
ap->color = WCU->defgattr.color;
if (!(ap->flags & G_GATTRWIDTH))
ap->width = WCU->defgattr.width;
if (!(ap->flags & G_GATTRMODE))
ap->mode = WCU->defgattr.mode;
if (!(ap->flags & G_GATTRFILL))
ap->fill = WCU->defgattr.fill;
if (!(ap->flags & G_GATTRSTYLE))
ap->style = WCU->defgattr.style;
flag = FALSE;
mode = ap->mode;
if (mode != WCU->gattr.mode) {
WCU->gattr.mode = mode;
SetROP2 (GC, (int) mode);
}
WCU->gattr.fill = ap->fill;
color = ap->color;
if (color >= G_MAXCOLORS || !(WCU->colors[color].inuse))
color = 1;
if (color != WCU->gattr.color)
WCU->gattr.color = color, flag = TRUE;
width = ap->width;
if (width != WCU->gattr.width)
WCU->gattr.width = width, flag = TRUE;
style = ap->style;
if (style != WCU->gattr.style)
WCU->gattr.style = style, flag = TRUE;
if (!flag)
return;
WCU->gattr.color = color;
if (Gdepth == 1) {
colorp = &WCU->colors[color].color;
intens = (
0.3 * colorp->peBlue + 0.59 * colorp->peRed +
0.11 * colorp->peGreen
) / 255.0;
pati = (intens <= 0.0625) ? 16 : -16.0 * (log (intens) / 2.7725887222);
brush = WCU->grays[pati];
} else
brush = CreateSolidBrush (PALETTEINDEX (WCU->gattr.color));
pbrush = SelectObject (GC, brush);
if (Gdepth != 1)
DeleteObject (pbrush);
pen = CreatePen (
(int) gstyles[WCU->gattr.style], WCU->gattr.width,
PALETTEINDEX (WCU->gattr.color)
);
ppen = SelectObject (GC, pen);
DeleteObject (ppen);
SetTextColor (GC, PALETTEINDEX (WCU->gattr.color));
}
static PIXrect_t rdrawtopix (Gwidget_t *widget, Grect_t gr) {
PIXrect_t pr;
double tvx, tvy, twx, twy;
tvx = WCU->vsize.x - 1, tvy = WCU->vsize.y - 1;
twx = WCU->wrect.c.x - WCU->wrect.o.x;
twy = WCU->wrect.c.y - WCU->wrect.o.y;
pr.o.x = tvx * (gr.o.x - WCU->wrect.o.x) / twx + 0.5;
pr.o.y = tvy * (1.0 - (gr.c.y - WCU->wrect.o.y) / twy) + 0.5;
pr.c.x = tvx * (gr.c.x - WCU->wrect.o.x) / twx + 0.5;
pr.c.y = tvy * (1.0 - (gr.o.y - WCU->wrect.o.y) / twy) + 0.5;
return pr;
}
static PIXpoint_t pdrawtopix (Gwidget_t *widget, Gpoint_t gp) {
PIXpoint_t pp;
double tvx, tvy, twx, twy;
tvx = WCU->vsize.x - 1, tvy = WCU->vsize.y - 1;
twx = WCU->wrect.c.x - WCU->wrect.o.x;
twy = WCU->wrect.c.y - WCU->wrect.o.y;
pp.x = tvx * (gp.x - WCU->wrect.o.x) / twx + 0.5;
pp.y = tvy * (1.0 - (gp.y - WCU->wrect.o.y) / twy) + 0.5;
return pp;
}
static PIXsize_t sdrawtopix (Gwidget_t *widget, Gsize_t gs) {
PIXsize_t ps;
double tvx, tvy, twx, twy;
tvx = WCU->vsize.x - 1, tvy = WCU->vsize.y - 1;
twx = WCU->wrect.c.x - WCU->wrect.o.x;
twy = WCU->wrect.c.y - WCU->wrect.o.y;
ps.x = tvx * (gs.x - 1) / twx + 1.5;
ps.y = tvy * (gs.y - 1) / twy + 1.5;
return ps;
}
Gpoint_t ppixtodraw (Gwidget_t *widget, PIXpoint_t pp) {
Gpoint_t gp;
double tvx, tvy, twx, twy;
tvx = WCU->vsize.x - 1, tvy = WCU->vsize.y - 1;
twx = WCU->wrect.c.x - WCU->wrect.o.x;
twy = WCU->wrect.c.y - WCU->wrect.o.y;
gp.x = (pp.x / tvx) * twx + WCU->wrect.o.x;
gp.y = (1.0 - pp.y / tvy) * twy + WCU->wrect.o.y;
return gp;
}
static Gsize_t spixtodraw (Gwidget_t *widget, PIXsize_t ps) {
Gsize_t gs;
double tvx, tvy, twx, twy;
tvx = WCU->vsize.x - 1, tvy = WCU->vsize.y - 1;
twx = WCU->wrect.c.x - WCU->wrect.o.x;
twy = WCU->wrect.c.y - WCU->wrect.o.y;
gs.x = ((ps.x - 1) / tvx) * twx + 1;
gs.y = ((ps.y - 1) / tvy) * twy + 1;
return gs;
}
static Grect_t rpixtodraw (Gwidget_t *widget, PIXrect_t pr) {
Grect_t gr;
double tvx, tvy, twx, twy, n;
tvx = WCU->vsize.x - 1, tvy = WCU->vsize.y - 1;
twx = WCU->wrect.c.x - WCU->wrect.o.x;
twy = WCU->wrect.c.y - WCU->wrect.o.y;
gr.o.x = (pr.o.x / tvx) * twx + WCU->wrect.o.x;
gr.o.y = (1.0 - pr.c.y / tvy) * twy + WCU->wrect.o.y;
gr.c.x = (pr.c.x / tvx) * twx + WCU->wrect.o.x;
gr.c.y = (1.0 - pr.o.y / tvy) * twy + WCU->wrect.o.y;
if (gr.o.x > gr.c.x)
n = gr.o.x, gr.o.x = gr.c.x, gr.c.x = n;
if (gr.o.y > gr.c.y)
n = gr.o.y, gr.o.y = gr.c.y, gr.c.y = n;
return gr;
}
static PIXrect_t rdrawtobpix (Gbitmap_t *bitmap, Grect_t gr) {
PIXrect_t pr;
double tvy;
tvy = (int) ((bitmap->size.y - 1) * bitmap->scale.y);
pr.o.x = gr.o.x + 0.5;
pr.o.y = tvy - gr.c.y + 0.5;
pr.c.x = gr.c.x + 0.5;
pr.c.y = tvy - gr.o.y + 0.5;
return pr;
}
static PIXpoint_t pdrawtobpix (Gbitmap_t *bitmap, Gpoint_t gp) {
PIXpoint_t pp;
double tvy;
tvy = (int) ((bitmap->size.y - 1) * bitmap->scale.y);
pp.x = gp.x + 0.5;
pp.y = tvy - gp.y + 0.5;
return pp;
}
void Gadjustclip (Gwidget_t *widget) {
Gwidget_t *parent;
PIXrect_t pr;
RECT r1, r2, r3;
parent = &Gwidgets[widget->pwi];
GetWindowRect (widget->w, &r1);
GetClientRect (parent->w, &r2);
GetWindowRect (parent->w, &r3);
pr.o.x = max (0, -(r1.left - r3.left));
pr.o.y = max (0, -(r1.top - r3.top));
pr.c.x = min (r1.right - r1.left, pr.o.x + r2.right - r2.left);
pr.c.y = min (r1.bottom - r1.top, pr.o.y + r2.bottom - r2.top);
pr.c.x = max (pr.o.x, pr.c.x);
pr.c.y = max (pr.o.y, pr.c.y);
WCU->clip = rpixtodraw (widget, pr);
}
| epl-1.0 |
dupuisa/i-CodeCNES | fortran77-rules/src/test/resources/f77_1/ctprfb.f | 24 | 26285 | *> \brief \b CTPRFB applies a real or complex "triangular-pentagonal" blocked reflector to a real or complex matrix, which is composed of two blocks.
*
* =========== DOCUMENTATION ===========
*
* Online html documentation available at
* http://www.netlib.org/lapack/explore-html/
*
*> \htmlonly
*> Download CTPRFB + dependencies
*> <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/ctprfb.f">
*> [TGZ]</a>
*> <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/ctprfb.f">
*> [ZIP]</a>
*> <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/ctprfb.f">
*> [TXT]</a>
*> \endhtmlonly
*
* Definition:
* ===========
*
* SUBROUTINE CTPRFB( SIDE, TRANS, DIRECT, STOREV, M, N, K, L,
* V, LDV, T, LDT, A, LDA, B, LDB, WORK, LDWORK )
*
* .. Scalar Arguments ..
* CHARACTER DIRECT, SIDE, STOREV, TRANS
* INTEGER K, L, LDA, LDB, LDT, LDV, LDWORK, M, N
* ..
* .. Array Arguments ..
* COMPLEX A( LDA, * ), B( LDB, * ), T( LDT, * ),
* $ V( LDV, * ), WORK( LDWORK, * )
* ..
*
*
*> \par Purpose:
* =============
*>
*> \verbatim
*>
*> CTPRFB applies a complex "triangular-pentagonal" block reflector H or its
*> conjugate transpose H**H to a complex matrix C, which is composed of two
*> blocks A and B, either from the left or right.
*>
*> \endverbatim
*
* Arguments:
* ==========
*
*> \param[in] SIDE
*> \verbatim
*> SIDE is CHARACTER*1
*> = 'L': apply H or H**H from the Left
*> = 'R': apply H or H**H from the Right
*> \endverbatim
*>
*> \param[in] TRANS
*> \verbatim
*> TRANS is CHARACTER*1
*> = 'N': apply H (No transpose)
*> = 'C': apply H**H (Conjugate transpose)
*> \endverbatim
*>
*> \param[in] DIRECT
*> \verbatim
*> DIRECT is CHARACTER*1
*> Indicates how H is formed from a product of elementary
*> reflectors
*> = 'F': H = H(1) H(2) . . . H(k) (Forward)
*> = 'B': H = H(k) . . . H(2) H(1) (Backward)
*> \endverbatim
*>
*> \param[in] STOREV
*> \verbatim
*> STOREV is CHARACTER*1
*> Indicates how the vectors which define the elementary
*> reflectors are stored:
*> = 'C': Columns
*> = 'R': Rows
*> \endverbatim
*>
*> \param[in] M
*> \verbatim
*> M is INTEGER
*> The number of rows of the matrix B.
*> M >= 0.
*> \endverbatim
*>
*> \param[in] N
*> \verbatim
*> N is INTEGER
*> The number of columns of the matrix B.
*> N >= 0.
*> \endverbatim
*>
*> \param[in] K
*> \verbatim
*> K is INTEGER
*> The order of the matrix T, i.e. the number of elementary
*> reflectors whose product defines the block reflector.
*> K >= 0.
*> \endverbatim
*>
*> \param[in] L
*> \verbatim
*> L is INTEGER
*> The order of the trapezoidal part of V.
*> K >= L >= 0. See Further Details.
*> \endverbatim
*>
*> \param[in] V
*> \verbatim
*> V is COMPLEX array, dimension
*> (LDV,K) if STOREV = 'C'
*> (LDV,M) if STOREV = 'R' and SIDE = 'L'
*> (LDV,N) if STOREV = 'R' and SIDE = 'R'
*> The pentagonal matrix V, which contains the elementary reflectors
*> H(1), H(2), ..., H(K). See Further Details.
*> \endverbatim
*>
*> \param[in] LDV
*> \verbatim
*> LDV is INTEGER
*> The leading dimension of the array V.
*> If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M);
*> if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N);
*> if STOREV = 'R', LDV >= K.
*> \endverbatim
*>
*> \param[in] T
*> \verbatim
*> T is COMPLEX array, dimension (LDT,K)
*> The triangular K-by-K matrix T in the representation of the
*> block reflector.
*> \endverbatim
*>
*> \param[in] LDT
*> \verbatim
*> LDT is INTEGER
*> The leading dimension of the array T.
*> LDT >= K.
*> \endverbatim
*>
*> \param[in,out] A
*> \verbatim
*> A is COMPLEX array, dimension
*> (LDA,N) if SIDE = 'L' or (LDA,K) if SIDE = 'R'
*> On entry, the K-by-N or M-by-K matrix A.
*> On exit, A is overwritten by the corresponding block of
*> H*C or H**H*C or C*H or C*H**H. See Futher Details.
*> \endverbatim
*>
*> \param[in] LDA
*> \verbatim
*> LDA is INTEGER
*> The leading dimension of the array A.
*> If SIDE = 'L', LDC >= max(1,K);
*> If SIDE = 'R', LDC >= max(1,M).
*> \endverbatim
*>
*> \param[in,out] B
*> \verbatim
*> B is COMPLEX array, dimension (LDB,N)
*> On entry, the M-by-N matrix B.
*> On exit, B is overwritten by the corresponding block of
*> H*C or H**H*C or C*H or C*H**H. See Further Details.
*> \endverbatim
*>
*> \param[in] LDB
*> \verbatim
*> LDB is INTEGER
*> The leading dimension of the array B.
*> LDB >= max(1,M).
*> \endverbatim
*>
*> \param[out] WORK
*> \verbatim
*> WORK is COMPLEX array, dimension
*> (LDWORK,N) if SIDE = 'L',
*> (LDWORK,K) if SIDE = 'R'.
*> \endverbatim
*>
*> \param[in] LDWORK
*> \verbatim
*> LDWORK is INTEGER
*> The leading dimension of the array WORK.
*> If SIDE = 'L', LDWORK >= K;
*> if SIDE = 'R', LDWORK >= M.
*> \endverbatim
*
* Authors:
* ========
*
*> \author Univ. of Tennessee
*> \author Univ. of California Berkeley
*> \author Univ. of Colorado Denver
*> \author NAG Ltd.
*
*> \date September 2012
*
*> \ingroup complexOTHERauxiliary
*
*> \par Further Details:
* =====================
*>
*> \verbatim
*>
*> The matrix C is a composite matrix formed from blocks A and B.
*> The block B is of size M-by-N; if SIDE = 'R', A is of size M-by-K,
*> and if SIDE = 'L', A is of size K-by-N.
*>
*> If SIDE = 'R' and DIRECT = 'F', C = [A B].
*>
*> If SIDE = 'L' and DIRECT = 'F', C = [A]
*> [B].
*>
*> If SIDE = 'R' and DIRECT = 'B', C = [B A].
*>
*> If SIDE = 'L' and DIRECT = 'B', C = [B]
*> [A].
*>
*> The pentagonal matrix V is composed of a rectangular block V1 and a
*> trapezoidal block V2. The size of the trapezoidal block is determined by
*> the parameter L, where 0<=L<=K. If L=K, the V2 block of V is triangular;
*> if L=0, there is no trapezoidal block, thus V = V1 is rectangular.
*>
*> If DIRECT = 'F' and STOREV = 'C': V = [V1]
*> [V2]
*> - V2 is upper trapezoidal (first L rows of K-by-K upper triangular)
*>
*> If DIRECT = 'F' and STOREV = 'R': V = [V1 V2]
*>
*> - V2 is lower trapezoidal (first L columns of K-by-K lower triangular)
*>
*> If DIRECT = 'B' and STOREV = 'C': V = [V2]
*> [V1]
*> - V2 is lower trapezoidal (last L rows of K-by-K lower triangular)
*>
*> If DIRECT = 'B' and STOREV = 'R': V = [V2 V1]
*>
*> - V2 is upper trapezoidal (last L columns of K-by-K upper triangular)
*>
*> If STOREV = 'C' and SIDE = 'L', V is M-by-K with V2 L-by-K.
*>
*> If STOREV = 'C' and SIDE = 'R', V is N-by-K with V2 L-by-K.
*>
*> If STOREV = 'R' and SIDE = 'L', V is K-by-M with V2 K-by-L.
*>
*> If STOREV = 'R' and SIDE = 'R', V is K-by-N with V2 K-by-L.
*> \endverbatim
*>
* =====================================================================
SUBROUTINE CTPRFB( SIDE, TRANS, DIRECT, STOREV, M, N, K, L,
$ V, LDV, T, LDT, A, LDA, B, LDB, WORK, LDWORK )
*
* -- LAPACK auxiliary routine (version 3.4.2) --
* -- LAPACK is a software package provided by Univ. of Tennessee, --
* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
* September 2012
*
* .. Scalar Arguments ..
CHARACTER DIRECT, SIDE, STOREV, TRANS
INTEGER K, L, LDA, LDB, LDT, LDV, LDWORK, M, N
* ..
* .. Array Arguments ..
COMPLEX A( LDA, * ), B( LDB, * ), T( LDT, * ),
$ V( LDV, * ), WORK( LDWORK, * )
* ..
*
* ==========================================================================
*
* .. Parameters ..
COMPLEX ONE, ZERO
PARAMETER ( ONE = (1.0,0.0), ZERO = (0.0,0.0) )
* ..
* .. Local Scalars ..
INTEGER I, J, MP, NP, KP
LOGICAL LEFT, FORWARD, COLUMN, RIGHT, BACKWARD, ROW
* ..
* .. External Functions ..
LOGICAL LSAME
EXTERNAL LSAME
* ..
* .. External Subroutines ..
EXTERNAL CGEMM, CTRMM
* ..
* .. Intrinsic Functions ..
INTRINSIC CONJG
* ..
* .. Executable Statements ..
*
* Quick return if possible
*
IF( M.LE.0 .OR. N.LE.0 .OR. K.LE.0 .OR. L.LT.0 ) RETURN
*
IF( LSAME( STOREV, 'C' ) ) THEN
COLUMN = .TRUE.
ROW = .FALSE.
ELSE IF ( LSAME( STOREV, 'R' ) ) THEN
COLUMN = .FALSE.
ROW = .TRUE.
ELSE
COLUMN = .FALSE.
ROW = .FALSE.
END IF
*
IF( LSAME( SIDE, 'L' ) ) THEN
LEFT = .TRUE.
RIGHT = .FALSE.
ELSE IF( LSAME( SIDE, 'R' ) ) THEN
LEFT = .FALSE.
RIGHT = .TRUE.
ELSE
LEFT = .FALSE.
RIGHT = .FALSE.
END IF
*
IF( LSAME( DIRECT, 'F' ) ) THEN
FORWARD = .TRUE.
BACKWARD = .FALSE.
ELSE IF( LSAME( DIRECT, 'B' ) ) THEN
FORWARD = .FALSE.
BACKWARD = .TRUE.
ELSE
FORWARD = .FALSE.
BACKWARD = .FALSE.
END IF
*
* ---------------------------------------------------------------------------
*
IF( COLUMN .AND. FORWARD .AND. LEFT ) THEN
*
* ---------------------------------------------------------------------------
*
* Let W = [ I ] (K-by-K)
* [ V ] (M-by-K)
*
* Form H C or H**H C where C = [ A ] (K-by-N)
* [ B ] (M-by-N)
*
* H = I - W T W**H or H**H = I - W T**H W**H
*
* A = A - T (A + V**H B) or A = A - T**H (A + V**H B)
* B = B - V T (A + V**H B) or B = B - V T**H (A + V**H B)
*
* ---------------------------------------------------------------------------
*
MP = MIN( M-L+1, M )
KP = MIN( L+1, K )
*
DO J = 1, N
DO I = 1, L
WORK( I, J ) = B( M-L+I, J )
END DO
END DO
CALL CTRMM( 'L', 'U', 'C', 'N', L, N, ONE, V( MP, 1 ), LDV,
$ WORK, LDWORK )
CALL CGEMM( 'C', 'N', L, N, M-L, ONE, V, LDV, B, LDB,
$ ONE, WORK, LDWORK )
CALL CGEMM( 'C', 'N', K-L, N, M, ONE, V( 1, KP ), LDV,
$ B, LDB, ZERO, WORK( KP, 1 ), LDWORK )
*
DO J = 1, N
DO I = 1, K
WORK( I, J ) = WORK( I, J ) + A( I, J )
END DO
END DO
*
CALL CTRMM( 'L', 'U', TRANS, 'N', K, N, ONE, T, LDT,
$ WORK, LDWORK )
*
DO J = 1, N
DO I = 1, K
A( I, J ) = A( I, J ) - WORK( I, J )
END DO
END DO
*
CALL CGEMM( 'N', 'N', M-L, N, K, -ONE, V, LDV, WORK, LDWORK,
$ ONE, B, LDB )
CALL CGEMM( 'N', 'N', L, N, K-L, -ONE, V( MP, KP ), LDV,
$ WORK( KP, 1 ), LDWORK, ONE, B( MP, 1 ), LDB )
CALL CTRMM( 'L', 'U', 'N', 'N', L, N, ONE, V( MP, 1 ), LDV,
$ WORK, LDWORK )
DO J = 1, N
DO I = 1, L
B( M-L+I, J ) = B( M-L+I, J ) - WORK( I, J )
END DO
END DO
*
* ---------------------------------------------------------------------------
*
ELSE IF( COLUMN .AND. FORWARD .AND. RIGHT ) THEN
*
* ---------------------------------------------------------------------------
*
* Let W = [ I ] (K-by-K)
* [ V ] (N-by-K)
*
* Form C H or C H**H where C = [ A B ] (A is M-by-K, B is M-by-N)
*
* H = I - W T W**H or H**H = I - W T**H W**H
*
* A = A - (A + B V) T or A = A - (A + B V) T**H
* B = B - (A + B V) T V**H or B = B - (A + B V) T**H V**H
*
* ---------------------------------------------------------------------------
*
NP = MIN( N-L+1, N )
KP = MIN( L+1, K )
*
DO J = 1, L
DO I = 1, M
WORK( I, J ) = B( I, N-L+J )
END DO
END DO
CALL CTRMM( 'R', 'U', 'N', 'N', M, L, ONE, V( NP, 1 ), LDV,
$ WORK, LDWORK )
CALL CGEMM( 'N', 'N', M, L, N-L, ONE, B, LDB,
$ V, LDV, ONE, WORK, LDWORK )
CALL CGEMM( 'N', 'N', M, K-L, N, ONE, B, LDB,
$ V( 1, KP ), LDV, ZERO, WORK( 1, KP ), LDWORK )
*
DO J = 1, K
DO I = 1, M
WORK( I, J ) = WORK( I, J ) + A( I, J )
END DO
END DO
*
CALL CTRMM( 'R', 'U', TRANS, 'N', M, K, ONE, T, LDT,
$ WORK, LDWORK )
*
DO J = 1, K
DO I = 1, M
A( I, J ) = A( I, J ) - WORK( I, J )
END DO
END DO
*
CALL CGEMM( 'N', 'C', M, N-L, K, -ONE, WORK, LDWORK,
$ V, LDV, ONE, B, LDB )
CALL CGEMM( 'N', 'C', M, L, K-L, -ONE, WORK( 1, KP ), LDWORK,
$ V( NP, KP ), LDV, ONE, B( 1, NP ), LDB )
CALL CTRMM( 'R', 'U', 'C', 'N', M, L, ONE, V( NP, 1 ), LDV,
$ WORK, LDWORK )
DO J = 1, L
DO I = 1, M
B( I, N-L+J ) = B( I, N-L+J ) - WORK( I, J )
END DO
END DO
*
* ---------------------------------------------------------------------------
*
ELSE IF( COLUMN .AND. BACKWARD .AND. LEFT ) THEN
*
* ---------------------------------------------------------------------------
*
* Let W = [ V ] (M-by-K)
* [ I ] (K-by-K)
*
* Form H C or H**H C where C = [ B ] (M-by-N)
* [ A ] (K-by-N)
*
* H = I - W T W**H or H**H = I - W T**H W**H
*
* A = A - T (A + V**H B) or A = A - T**H (A + V**H B)
* B = B - V T (A + V**H B) or B = B - V T**H (A + V**H B)
*
* ---------------------------------------------------------------------------
*
MP = MIN( L+1, M )
KP = MIN( K-L+1, K )
*
DO J = 1, N
DO I = 1, L
WORK( K-L+I, J ) = B( I, J )
END DO
END DO
*
CALL CTRMM( 'L', 'L', 'C', 'N', L, N, ONE, V( 1, KP ), LDV,
$ WORK( KP, 1 ), LDWORK )
CALL CGEMM( 'C', 'N', L, N, M-L, ONE, V( MP, KP ), LDV,
$ B( MP, 1 ), LDB, ONE, WORK( KP, 1 ), LDWORK )
CALL CGEMM( 'C', 'N', K-L, N, M, ONE, V, LDV,
$ B, LDB, ZERO, WORK, LDWORK )
*
DO J = 1, N
DO I = 1, K
WORK( I, J ) = WORK( I, J ) + A( I, J )
END DO
END DO
*
CALL CTRMM( 'L', 'L', TRANS, 'N', K, N, ONE, T, LDT,
$ WORK, LDWORK )
*
DO J = 1, N
DO I = 1, K
A( I, J ) = A( I, J ) - WORK( I, J )
END DO
END DO
*
CALL CGEMM( 'N', 'N', M-L, N, K, -ONE, V( MP, 1 ), LDV,
$ WORK, LDWORK, ONE, B( MP, 1 ), LDB )
CALL CGEMM( 'N', 'N', L, N, K-L, -ONE, V, LDV,
$ WORK, LDWORK, ONE, B, LDB )
CALL CTRMM( 'L', 'L', 'N', 'N', L, N, ONE, V( 1, KP ), LDV,
$ WORK( KP, 1 ), LDWORK )
DO J = 1, N
DO I = 1, L
B( I, J ) = B( I, J ) - WORK( K-L+I, J )
END DO
END DO
*
* ---------------------------------------------------------------------------
*
ELSE IF( COLUMN .AND. BACKWARD .AND. RIGHT ) THEN
*
* ---------------------------------------------------------------------------
*
* Let W = [ V ] (N-by-K)
* [ I ] (K-by-K)
*
* Form C H or C H**H where C = [ B A ] (B is M-by-N, A is M-by-K)
*
* H = I - W T W**H or H**H = I - W T**H W**H
*
* A = A - (A + B V) T or A = A - (A + B V) T**H
* B = B - (A + B V) T V**H or B = B - (A + B V) T**H V**H
*
* ---------------------------------------------------------------------------
*
NP = MIN( L+1, N )
KP = MIN( K-L+1, K )
*
DO J = 1, L
DO I = 1, M
WORK( I, K-L+J ) = B( I, J )
END DO
END DO
CALL CTRMM( 'R', 'L', 'N', 'N', M, L, ONE, V( 1, KP ), LDV,
$ WORK( 1, KP ), LDWORK )
CALL CGEMM( 'N', 'N', M, L, N-L, ONE, B( 1, NP ), LDB,
$ V( NP, KP ), LDV, ONE, WORK( 1, KP ), LDWORK )
CALL CGEMM( 'N', 'N', M, K-L, N, ONE, B, LDB,
$ V, LDV, ZERO, WORK, LDWORK )
*
DO J = 1, K
DO I = 1, M
WORK( I, J ) = WORK( I, J ) + A( I, J )
END DO
END DO
*
CALL CTRMM( 'R', 'L', TRANS, 'N', M, K, ONE, T, LDT,
$ WORK, LDWORK )
*
DO J = 1, K
DO I = 1, M
A( I, J ) = A( I, J ) - WORK( I, J )
END DO
END DO
*
CALL CGEMM( 'N', 'C', M, N-L, K, -ONE, WORK, LDWORK,
$ V( NP, 1 ), LDV, ONE, B( 1, NP ), LDB )
CALL CGEMM( 'N', 'C', M, L, K-L, -ONE, WORK, LDWORK,
$ V, LDV, ONE, B, LDB )
CALL CTRMM( 'R', 'L', 'C', 'N', M, L, ONE, V( 1, KP ), LDV,
$ WORK( 1, KP ), LDWORK )
DO J = 1, L
DO I = 1, M
B( I, J ) = B( I, J ) - WORK( I, K-L+J )
END DO
END DO
*
* ---------------------------------------------------------------------------
*
ELSE IF( ROW .AND. FORWARD .AND. LEFT ) THEN
*
* ---------------------------------------------------------------------------
*
* Let W = [ I V ] ( I is K-by-K, V is K-by-M )
*
* Form H C or H**H C where C = [ A ] (K-by-N)
* [ B ] (M-by-N)
*
* H = I - W**H T W or H**H = I - W**H T**H W
*
* A = A - T (A + V B) or A = A - T**H (A + V B)
* B = B - V**H T (A + V B) or B = B - V**H T**H (A + V B)
*
* ---------------------------------------------------------------------------
*
MP = MIN( M-L+1, M )
KP = MIN( L+1, K )
*
DO J = 1, N
DO I = 1, L
WORK( I, J ) = B( M-L+I, J )
END DO
END DO
CALL CTRMM( 'L', 'L', 'N', 'N', L, N, ONE, V( 1, MP ), LDV,
$ WORK, LDB )
CALL CGEMM( 'N', 'N', L, N, M-L, ONE, V, LDV,B, LDB,
$ ONE, WORK, LDWORK )
CALL CGEMM( 'N', 'N', K-L, N, M, ONE, V( KP, 1 ), LDV,
$ B, LDB, ZERO, WORK( KP, 1 ), LDWORK )
*
DO J = 1, N
DO I = 1, K
WORK( I, J ) = WORK( I, J ) + A( I, J )
END DO
END DO
*
CALL CTRMM( 'L', 'U', TRANS, 'N', K, N, ONE, T, LDT,
$ WORK, LDWORK )
*
DO J = 1, N
DO I = 1, K
A( I, J ) = A( I, J ) - WORK( I, J )
END DO
END DO
*
CALL CGEMM( 'C', 'N', M-L, N, K, -ONE, V, LDV, WORK, LDWORK,
$ ONE, B, LDB )
CALL CGEMM( 'C', 'N', L, N, K-L, -ONE, V( KP, MP ), LDV,
$ WORK( KP, 1 ), LDWORK, ONE, B( MP, 1 ), LDB )
CALL CTRMM( 'L', 'L', 'C', 'N', L, N, ONE, V( 1, MP ), LDV,
$ WORK, LDWORK )
DO J = 1, N
DO I = 1, L
B( M-L+I, J ) = B( M-L+I, J ) - WORK( I, J )
END DO
END DO
*
* ---------------------------------------------------------------------------
*
ELSE IF( ROW .AND. FORWARD .AND. RIGHT ) THEN
*
* ---------------------------------------------------------------------------
*
* Let W = [ I V ] ( I is K-by-K, V is K-by-N )
*
* Form C H or C H**H where C = [ A B ] (A is M-by-K, B is M-by-N)
*
* H = I - W**H T W or H**H = I - W**H T**H W
*
* A = A - (A + B V**H) T or A = A - (A + B V**H) T**H
* B = B - (A + B V**H) T V or B = B - (A + B V**H) T**H V
*
* ---------------------------------------------------------------------------
*
NP = MIN( N-L+1, N )
KP = MIN( L+1, K )
*
DO J = 1, L
DO I = 1, M
WORK( I, J ) = B( I, N-L+J )
END DO
END DO
CALL CTRMM( 'R', 'L', 'C', 'N', M, L, ONE, V( 1, NP ), LDV,
$ WORK, LDWORK )
CALL CGEMM( 'N', 'C', M, L, N-L, ONE, B, LDB, V, LDV,
$ ONE, WORK, LDWORK )
CALL CGEMM( 'N', 'C', M, K-L, N, ONE, B, LDB,
$ V( KP, 1 ), LDV, ZERO, WORK( 1, KP ), LDWORK )
*
DO J = 1, K
DO I = 1, M
WORK( I, J ) = WORK( I, J ) + A( I, J )
END DO
END DO
*
CALL CTRMM( 'R', 'U', TRANS, 'N', M, K, ONE, T, LDT,
$ WORK, LDWORK )
*
DO J = 1, K
DO I = 1, M
A( I, J ) = A( I, J ) - WORK( I, J )
END DO
END DO
*
CALL CGEMM( 'N', 'N', M, N-L, K, -ONE, WORK, LDWORK,
$ V, LDV, ONE, B, LDB )
CALL CGEMM( 'N', 'N', M, L, K-L, -ONE, WORK( 1, KP ), LDWORK,
$ V( KP, NP ), LDV, ONE, B( 1, NP ), LDB )
CALL CTRMM( 'R', 'L', 'N', 'N', M, L, ONE, V( 1, NP ), LDV,
$ WORK, LDWORK )
DO J = 1, L
DO I = 1, M
B( I, N-L+J ) = B( I, N-L+J ) - WORK( I, J )
END DO
END DO
*
* ---------------------------------------------------------------------------
*
ELSE IF( ROW .AND. BACKWARD .AND. LEFT ) THEN
*
* ---------------------------------------------------------------------------
*
* Let W = [ V I ] ( I is K-by-K, V is K-by-M )
*
* Form H C or H**H C where C = [ B ] (M-by-N)
* [ A ] (K-by-N)
*
* H = I - W**H T W or H**H = I - W**H T**H W
*
* A = A - T (A + V B) or A = A - T**H (A + V B)
* B = B - V**H T (A + V B) or B = B - V**H T**H (A + V B)
*
* ---------------------------------------------------------------------------
*
MP = MIN( L+1, M )
KP = MIN( K-L+1, K )
*
DO J = 1, N
DO I = 1, L
WORK( K-L+I, J ) = B( I, J )
END DO
END DO
CALL CTRMM( 'L', 'U', 'N', 'N', L, N, ONE, V( KP, 1 ), LDV,
$ WORK( KP, 1 ), LDWORK )
CALL CGEMM( 'N', 'N', L, N, M-L, ONE, V( KP, MP ), LDV,
$ B( MP, 1 ), LDB, ONE, WORK( KP, 1 ), LDWORK )
CALL CGEMM( 'N', 'N', K-L, N, M, ONE, V, LDV, B, LDB,
$ ZERO, WORK, LDWORK )
*
DO J = 1, N
DO I = 1, K
WORK( I, J ) = WORK( I, J ) + A( I, J )
END DO
END DO
*
CALL CTRMM( 'L', 'L ', TRANS, 'N', K, N, ONE, T, LDT,
$ WORK, LDWORK )
*
DO J = 1, N
DO I = 1, K
A( I, J ) = A( I, J ) - WORK( I, J )
END DO
END DO
*
CALL CGEMM( 'C', 'N', M-L, N, K, -ONE, V( 1, MP ), LDV,
$ WORK, LDWORK, ONE, B( MP, 1 ), LDB )
CALL CGEMM( 'C', 'N', L, N, K-L, -ONE, V, LDV,
$ WORK, LDWORK, ONE, B, LDB )
CALL CTRMM( 'L', 'U', 'C', 'N', L, N, ONE, V( KP, 1 ), LDV,
$ WORK( KP, 1 ), LDWORK )
DO J = 1, N
DO I = 1, L
B( I, J ) = B( I, J ) - WORK( K-L+I, J )
END DO
END DO
*
* ---------------------------------------------------------------------------
*
ELSE IF( ROW .AND. BACKWARD .AND. RIGHT ) THEN
*
* ---------------------------------------------------------------------------
*
* Let W = [ V I ] ( I is K-by-K, V is K-by-N )
*
* Form C H or C H**H where C = [ B A ] (A is M-by-K, B is M-by-N)
*
* H = I - W**H T W or H**H = I - W**H T**H W
*
* A = A - (A + B V**H) T or A = A - (A + B V**H) T**H
* B = B - (A + B V**H) T V or B = B - (A + B V**H) T**H V
*
* ---------------------------------------------------------------------------
*
NP = MIN( L+1, N )
KP = MIN( K-L+1, K )
*
DO J = 1, L
DO I = 1, M
WORK( I, K-L+J ) = B( I, J )
END DO
END DO
CALL CTRMM( 'R', 'U', 'C', 'N', M, L, ONE, V( KP, 1 ), LDV,
$ WORK( 1, KP ), LDWORK )
CALL CGEMM( 'N', 'C', M, L, N-L, ONE, B( 1, NP ), LDB,
$ V( KP, NP ), LDV, ONE, WORK( 1, KP ), LDWORK )
CALL CGEMM( 'N', 'C', M, K-L, N, ONE, B, LDB, V, LDV,
$ ZERO, WORK, LDWORK )
*
DO J = 1, K
DO I = 1, M
WORK( I, J ) = WORK( I, J ) + A( I, J )
END DO
END DO
*
CALL CTRMM( 'R', 'L', TRANS, 'N', M, K, ONE, T, LDT,
$ WORK, LDWORK )
*
DO J = 1, K
DO I = 1, M
A( I, J ) = A( I, J ) - WORK( I, J )
END DO
END DO
*
CALL CGEMM( 'N', 'N', M, N-L, K, -ONE, WORK, LDWORK,
$ V( 1, NP ), LDV, ONE, B( 1, NP ), LDB )
CALL CGEMM( 'N', 'N', M, L, K-L , -ONE, WORK, LDWORK,
$ V, LDV, ONE, B, LDB )
CALL CTRMM( 'R', 'U', 'N', 'N', M, L, ONE, V( KP, 1 ), LDV,
$ WORK( 1, KP ), LDWORK )
DO J = 1, L
DO I = 1, M
B( I, J ) = B( I, J ) - WORK( I, K-L+J )
END DO
END DO
*
END IF
*
RETURN
*
* End of CTPRFB
*
END
| epl-1.0 |
modulexcite/msiext | externals/cppunit/examples/msvc6/CppUnitTestApp/CppUnitTestApp.cpp | 28 | 2148 | // CppUnitTestApp.cpp : Defines the class behaviors for the application.
//
#include "stdafx.h"
#include "CppUnitTestApp.h"
#include "CppUnitTestAppDlg.h"
#include <cppunit/ui/mfc/TestRunner.h>
#include <cppunit/extensions/TestFactoryRegistry.h>
#ifdef _DEBUG
#define new DEBUG_NEW
#undef THIS_FILE
static char THIS_FILE[] = __FILE__;
#endif
/////////////////////////////////////////////////////////////////////////////
// CppUnitTestApp
BEGIN_MESSAGE_MAP(CppUnitTestApp, CWinApp)
//{{AFX_MSG_MAP(CppUnitTestApp)
// NOTE - the ClassWizard will add and remove mapping macros here.
// DO NOT EDIT what you see in these blocks of generated code!
//}}AFX_MSG
ON_COMMAND(ID_HELP, CWinApp::OnHelp)
END_MESSAGE_MAP()
/////////////////////////////////////////////////////////////////////////////
// CppUnitTestApp construction
CppUnitTestApp::CppUnitTestApp()
{
// TODO: add construction code here,
// Place all significant initialization in InitInstance
}
/////////////////////////////////////////////////////////////////////////////
// The one and only CppUnitTestApp object
CppUnitTestApp theApp;
/////////////////////////////////////////////////////////////////////////////
// CppUnitTestApp initialization
BOOL
CppUnitTestApp::InitInstance()
{
AfxEnableControlContainer();
// Standard initialization
// If you are not using these features and wish to reduce the size
// of your final executable, you should remove from the following
// the specific initialization routines you do not need.
#ifdef _AFXDLL
# if _MSC_VER < 1300 // vc6
Enable3dControls(); // Call this when using MFC in a shared DLL
# endif
#else
Enable3dControlsStatic(); // Call this when linking to MFC statically
#endif
SetRegistryKey(_T("Local AppWizard-Generated Applications"));
RunTests();
// Since the dialog has been closed, return FALSE so that we exit the
// application, rather than start the application's message pump.
return FALSE;
}
void
CppUnitTestApp::RunTests()
{
CPPUNIT_NS::MfcUi::TestRunner runner;
runner.addTest( CPPUNIT_NS::TestFactoryRegistry::getRegistry().makeTest() );
runner.run();
}
| epl-1.0 |
dupuisa/i-CodeCNES | fortran77-rules/src/test/resources/f77_4/zlahilb.f | 32 | 8002 | *> \brief \b ZLAHILB
*
* =========== DOCUMENTATION ===========
*
* Online html documentation available at
* http://www.netlib.org/lapack/explore-html/
*
* Definition:
* ===========
*
* SUBROUTINE ZLAHILB(N, NRHS, A, LDA, X, LDX, B, LDB, WORK,
* INFO, PATH)
*
* .. Scalar Arguments ..
* INTEGER N, NRHS, LDA, LDX, LDB, INFO
* .. Array Arguments ..
* DOUBLE PRECISION WORK(N)
* COMPLEX*16 A(LDA,N), X(LDX, NRHS), B(LDB, NRHS)
* CHARACTER*3 PATH
* ..
*
*
*> \par Purpose:
* =============
*>
*> \verbatim
*>
*> ZLAHILB generates an N by N scaled Hilbert matrix in A along with
*> NRHS right-hand sides in B and solutions in X such that A*X=B.
*>
*> The Hilbert matrix is scaled by M = LCM(1, 2, ..., 2*N-1) so that all
*> entries are integers. The right-hand sides are the first NRHS
*> columns of M * the identity matrix, and the solutions are the
*> first NRHS columns of the inverse Hilbert matrix.
*>
*> The condition number of the Hilbert matrix grows exponentially with
*> its size, roughly as O(e ** (3.5*N)). Additionally, the inverse
*> Hilbert matrices beyond a relatively small dimension cannot be
*> generated exactly without extra precision. Precision is exhausted
*> when the largest entry in the inverse Hilbert matrix is greater than
*> 2 to the power of the number of bits in the fraction of the data type
*> used plus one, which is 24 for single precision.
*>
*> In single, the generated solution is exact for N <= 6 and has
*> small componentwise error for 7 <= N <= 11.
*> \endverbatim
*
* Arguments:
* ==========
*
*> \param[in] N
*> \verbatim
*> N is INTEGER
*> The dimension of the matrix A.
*> \endverbatim
*>
*> \param[in] NRHS
*> \verbatim
*> NRHS is NRHS
*> The requested number of right-hand sides.
*> \endverbatim
*>
*> \param[out] A
*> \verbatim
*> A is COMPLEX array, dimension (LDA, N)
*> The generated scaled Hilbert matrix.
*> \endverbatim
*>
*> \param[in] LDA
*> \verbatim
*> LDA is INTEGER
*> The leading dimension of the array A. LDA >= N.
*> \endverbatim
*>
*> \param[out] X
*> \verbatim
*> X is COMPLEX array, dimension (LDX, NRHS)
*> The generated exact solutions. Currently, the first NRHS
*> columns of the inverse Hilbert matrix.
*> \endverbatim
*>
*> \param[in] LDX
*> \verbatim
*> LDX is INTEGER
*> The leading dimension of the array X. LDX >= N.
*> \endverbatim
*>
*> \param[out] B
*> \verbatim
*> B is REAL array, dimension (LDB, NRHS)
*> The generated right-hand sides. Currently, the first NRHS
*> columns of LCM(1, 2, ..., 2*N-1) * the identity matrix.
*> \endverbatim
*>
*> \param[in] LDB
*> \verbatim
*> LDB is INTEGER
*> The leading dimension of the array B. LDB >= N.
*> \endverbatim
*>
*> \param[out] WORK
*> \verbatim
*> WORK is REAL array, dimension (N)
*> \endverbatim
*>
*> \param[out] INFO
*> \verbatim
*> INFO is INTEGER
*> = 0: successful exit
*> = 1: N is too large; the data is still generated but may not
*> be not exact.
*> < 0: if INFO = -i, the i-th argument had an illegal value
*> \endverbatim
*>
*> \param[in] PATH
*> \verbatim
*> PATH is CHARACTER*3
*> The LAPACK path name.
*> \endverbatim
*
* Authors:
* ========
*
*> \author Univ. of Tennessee
*> \author Univ. of California Berkeley
*> \author Univ. of Colorado Denver
*> \author NAG Ltd.
*
*> \date November 2011
*
*> \ingroup complex16_lin
*
* =====================================================================
SUBROUTINE ZLAHILB(N, NRHS, A, LDA, X, LDX, B, LDB, WORK,
$ INFO, PATH)
*
* -- LAPACK test routine (version 3.4.0) --
* -- LAPACK is a software package provided by Univ. of Tennessee, --
* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
* November 2011
*
* .. Scalar Arguments ..
INTEGER N, NRHS, LDA, LDX, LDB, INFO
* .. Array Arguments ..
DOUBLE PRECISION WORK(N)
COMPLEX*16 A(LDA,N), X(LDX, NRHS), B(LDB, NRHS)
CHARACTER*3 PATH
* ..
*
* =====================================================================
* .. Local Scalars ..
INTEGER TM, TI, R
INTEGER M
INTEGER I, J
COMPLEX*16 TMP
CHARACTER*2 C2
* ..
* .. Parameters ..
* NMAX_EXACT the largest dimension where the generated data is
* exact.
* NMAX_APPROX the largest dimension where the generated data has
* a small componentwise relative error.
* ??? complex uses how many bits ???
INTEGER NMAX_EXACT, NMAX_APPROX, SIZE_D
PARAMETER (NMAX_EXACT = 6, NMAX_APPROX = 11, SIZE_D = 8)
*
* d's are generated from random permuation of those eight elements.
COMPLEX*16 d1(8), d2(8), invd1(8), invd2(8)
DATA D1 /(-1,0),(0,1),(-1,-1),(0,-1),(1,0),(-1,1),(1,1),(1,-1)/
DATA D2 /(-1,0),(0,-1),(-1,1),(0,1),(1,0),(-1,-1),(1,-1),(1,1)/
DATA INVD1 /(-1,0),(0,-1),(-.5,.5),(0,1),(1,0),
$ (-.5,-.5),(.5,-.5),(.5,.5)/
DATA INVD2 /(-1,0),(0,1),(-.5,-.5),(0,-1),(1,0),
$ (-.5,.5),(.5,.5),(.5,-.5)/
* ..
* .. External Functions
EXTERNAL ZLASET, LSAMEN
INTRINSIC DBLE
LOGICAL LSAMEN
* ..
* .. Executable Statements ..
C2 = PATH( 2: 3 )
*
* Test the input arguments
*
INFO = 0
IF (N .LT. 0 .OR. N .GT. NMAX_APPROX) THEN
INFO = -1
ELSE IF (NRHS .LT. 0) THEN
INFO = -2
ELSE IF (LDA .LT. N) THEN
INFO = -4
ELSE IF (LDX .LT. N) THEN
INFO = -6
ELSE IF (LDB .LT. N) THEN
INFO = -8
END IF
IF (INFO .LT. 0) THEN
CALL XERBLA('ZLAHILB', -INFO)
RETURN
END IF
IF (N .GT. NMAX_EXACT) THEN
INFO = 1
END IF
*
* Compute M = the LCM of the integers [1, 2*N-1]. The largest
* reasonable N is small enough that integers suffice (up to N = 11).
M = 1
DO I = 2, (2*N-1)
TM = M
TI = I
R = MOD(TM, TI)
DO WHILE (R .NE. 0)
TM = TI
TI = R
R = MOD(TM, TI)
END DO
M = (M / TI) * I
END DO
*
* Generate the scaled Hilbert matrix in A
* If we are testing SY routines, take D1_i = D2_i, else, D1_i = D2_i*
IF ( LSAMEN( 2, C2, 'SY' ) ) THEN
DO J = 1, N
DO I = 1, N
A(I, J) = D1(MOD(J,SIZE_D)+1) * (DBLE(M) / (I + J - 1))
$ * D1(MOD(I,SIZE_D)+1)
END DO
END DO
ELSE
DO J = 1, N
DO I = 1, N
A(I, J) = D1(MOD(J,SIZE_D)+1) * (DBLE(M) / (I + J - 1))
$ * D2(MOD(I,SIZE_D)+1)
END DO
END DO
END IF
*
* Generate matrix B as simply the first NRHS columns of M * the
* identity.
TMP = DBLE(M)
CALL ZLASET('Full', N, NRHS, (0.0D+0,0.0D+0), TMP, B, LDB)
*
* Generate the true solutions in X. Because B = the first NRHS
* columns of M*I, the true solutions are just the first NRHS columns
* of the inverse Hilbert matrix.
WORK(1) = N
DO J = 2, N
WORK(J) = ( ( (WORK(J-1)/(J-1)) * (J-1 - N) ) /(J-1) )
$ * (N +J -1)
END DO
*
* If we are testing SY routines, take D1_i = D2_i, else, D1_i = D2_i*
IF ( LSAMEN( 2, C2, 'SY' ) ) THEN
DO J = 1, NRHS
DO I = 1, N
X(I, J) = INVD1(MOD(J,SIZE_D)+1) *
$ ((WORK(I)*WORK(J)) / (I + J - 1))
$ * INVD1(MOD(I,SIZE_D)+1)
END DO
END DO
ELSE
DO J = 1, NRHS
DO I = 1, N
X(I, J) = INVD2(MOD(J,SIZE_D)+1) *
$ ((WORK(I)*WORK(J)) / (I + J - 1))
$ * INVD1(MOD(I,SIZE_D)+1)
END DO
END DO
END IF
END
| epl-1.0 |
wojwal/msiext | externals/cryptopp/randpool.cpp | 73 | 1473 | // randpool.cpp - written and placed in the public domain by Wei Dai
// RandomPool used to follow the design of randpool in PGP 2.6.x,
// but as of version 5.5 it has been redesigned to reduce the risk
// of reusing random numbers after state rollback (which may occur
// when running in a virtual machine like VMware).
#include "pch.h"
#ifndef CRYPTOPP_IMPORTS
#include "randpool.h"
#include "aes.h"
#include "sha.h"
#include "hrtimer.h"
#include <time.h>
NAMESPACE_BEGIN(CryptoPP)
RandomPool::RandomPool()
: m_pCipher(new AES::Encryption), m_keySet(false)
{
memset(m_key, 0, m_key.SizeInBytes());
memset(m_seed, 0, m_seed.SizeInBytes());
}
void RandomPool::IncorporateEntropy(const byte *input, size_t length)
{
SHA256 hash;
hash.Update(m_key, 32);
hash.Update(input, length);
hash.Final(m_key);
m_keySet = false;
}
void RandomPool::GenerateIntoBufferedTransformation(BufferedTransformation &target, const std::string &channel, lword size)
{
if (size > 0)
{
if (!m_keySet)
m_pCipher->SetKey(m_key, 32);
Timer timer;
TimerWord tw = timer.GetCurrentTimerValue();
CRYPTOPP_COMPILE_ASSERT(sizeof(tw) <= 16);
*(TimerWord *)m_seed.data() += tw;
time_t t = time(NULL);
CRYPTOPP_COMPILE_ASSERT(sizeof(t) <= 8);
*(time_t *)(m_seed.data()+8) += t;
do
{
m_pCipher->ProcessBlock(m_seed);
size_t len = UnsignedMin(16, size);
target.ChannelPut(channel, m_seed, len);
size -= len;
} while (size > 0);
}
}
NAMESPACE_END
#endif
| epl-1.0 |
mantera/WX_435_Kernel-CM7 | drivers/mtd/maps/omap_nor.c | 512 | 5008 | /*
* Flash memory support for various TI OMAP boards
*
* Copyright (C) 2001-2002 MontaVista Software Inc.
* Copyright (C) 2003-2004 Texas Instruments
* Copyright (C) 2004 Nokia Corporation
*
* Assembled using driver code copyright the companies above
* and written by David Brownell, Jian Zhang <jzhang@ti.com>,
* Tony Lindgren <tony@atomide.com> and others.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
#include <mach/hardware.h>
#include <asm/mach/flash.h>
#include <mach/tc.h>
#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { /* "RedBoot", */ "cmdlinepart", NULL };
#endif
struct omapflash_info {
struct mtd_partition *parts;
struct mtd_info *mtd;
struct map_info map;
};
static void omap_set_vpp(struct map_info *map, int enable)
{
static int count;
u32 l;
if (cpu_class_is_omap1()) {
if (enable) {
if (count++ == 0) {
l = omap_readl(EMIFS_CONFIG);
l |= OMAP_EMIFS_CONFIG_WP;
omap_writel(l, EMIFS_CONFIG);
}
} else {
if (count && (--count == 0)) {
l = omap_readl(EMIFS_CONFIG);
l &= ~OMAP_EMIFS_CONFIG_WP;
omap_writel(l, EMIFS_CONFIG);
}
}
}
}
static int __init omapflash_probe(struct platform_device *pdev)
{
int err;
struct omapflash_info *info;
struct flash_platform_data *pdata = pdev->dev.platform_data;
struct resource *res = pdev->resource;
unsigned long size = res->end - res->start + 1;
info = kzalloc(sizeof(struct omapflash_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
if (!request_mem_region(res->start, size, "flash")) {
err = -EBUSY;
goto out_free_info;
}
info->map.virt = ioremap(res->start, size);
if (!info->map.virt) {
err = -ENOMEM;
goto out_release_mem_region;
}
info->map.name = dev_name(&pdev->dev);
info->map.phys = res->start;
info->map.size = size;
info->map.bankwidth = pdata->width;
info->map.set_vpp = omap_set_vpp;
simple_map_init(&info->map);
info->mtd = do_map_probe(pdata->map_name, &info->map);
if (!info->mtd) {
err = -EIO;
goto out_iounmap;
}
info->mtd->owner = THIS_MODULE;
info->mtd->dev.parent = &pdev->dev;
#ifdef CONFIG_MTD_PARTITIONS
err = parse_mtd_partitions(info->mtd, part_probes, &info->parts, 0);
if (err > 0)
add_mtd_partitions(info->mtd, info->parts, err);
else if (err <= 0 && pdata->parts)
add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts);
else
#endif
add_mtd_device(info->mtd);
platform_set_drvdata(pdev, info);
return 0;
out_iounmap:
iounmap(info->map.virt);
out_release_mem_region:
release_mem_region(res->start, size);
out_free_info:
kfree(info);
return err;
}
static int __exit omapflash_remove(struct platform_device *pdev)
{
struct omapflash_info *info = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
if (info) {
if (info->parts) {
del_mtd_partitions(info->mtd);
kfree(info->parts);
} else
del_mtd_device(info->mtd);
map_destroy(info->mtd);
release_mem_region(info->map.phys, info->map.size);
iounmap((void __iomem *) info->map.virt);
kfree(info);
}
return 0;
}
static struct platform_driver omapflash_driver = {
.remove = __exit_p(omapflash_remove),
.driver = {
.name = "omapflash",
.owner = THIS_MODULE,
},
};
static int __init omapflash_init(void)
{
return platform_driver_probe(&omapflash_driver, omapflash_probe);
}
static void __exit omapflash_exit(void)
{
platform_driver_unregister(&omapflash_driver);
}
module_init(omapflash_init);
module_exit(omapflash_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MTD NOR map driver for TI OMAP boards");
MODULE_ALIAS("platform:omapflash");
| gpl-2.0 |
GameTheory-/android_kernel_d505 | arch/arm/mach-msm/acpuclock-8930ab.c | 768 | 12431 | /*
* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <mach/rpm-regulator.h>
#include <mach/msm_bus_board.h>
#include <mach/msm_bus.h>
#include "acpuclock.h"
#include "acpuclock-krait.h"
/* Corner type vreg VDD values */
#define LVL_NONE RPM_VREG_CORNER_NONE
#define LVL_LOW RPM_VREG_CORNER_LOW
#define LVL_NOM RPM_VREG_CORNER_NOMINAL
#define LVL_HIGH RPM_VREG_CORNER_HIGH
static struct hfpll_data hfpll_data __initdata = {
.mode_offset = 0x00,
.l_offset = 0x08,
.m_offset = 0x0C,
.n_offset = 0x10,
.config_offset = 0x04,
.config_val = 0x7845C665,
.has_droop_ctl = true,
.droop_offset = 0x14,
.droop_val = 0x0108C000,
.low_vdd_l_max = 37,
.nom_vdd_l_max = 74,
.vdd[HFPLL_VDD_NONE] = LVL_NONE,
.vdd[HFPLL_VDD_LOW] = LVL_LOW,
.vdd[HFPLL_VDD_NOM] = LVL_NOM,
.vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
};
static struct scalable scalable_pm8917[] __initdata = {
[CPU0] = {
.hfpll_phys_base = 0x00903200,
.aux_clk_sel_phys = 0x02088014,
.aux_clk_sel = 3,
.sec_clk_sel = 2,
.l2cpmr_iaddr = 0x4501,
.vreg[VREG_CORE] = { "krait0", 1300000 },
.vreg[VREG_MEM] = { "krait0_mem", 1150000 },
.vreg[VREG_DIG] = { "krait0_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait0_s8", 2050000 },
.vreg[VREG_HFPLL_B] = { "krait0_l23", 1800000 },
},
[CPU1] = {
.hfpll_phys_base = 0x00903300,
.aux_clk_sel_phys = 0x02098014,
.aux_clk_sel = 3,
.sec_clk_sel = 2,
.l2cpmr_iaddr = 0x5501,
.vreg[VREG_CORE] = { "krait1", 1300000 },
.vreg[VREG_MEM] = { "krait1_mem", 1150000 },
.vreg[VREG_DIG] = { "krait1_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait1_s8", 2050000 },
.vreg[VREG_HFPLL_B] = { "krait1_l23", 1800000 },
},
[L2] = {
.hfpll_phys_base = 0x00903400,
.aux_clk_sel_phys = 0x02011028,
.aux_clk_sel = 3,
.sec_clk_sel = 2,
.l2cpmr_iaddr = 0x0500,
.vreg[VREG_HFPLL_A] = { "l2_s8", 2050000 },
.vreg[VREG_HFPLL_B] = { "l2_l23", 1800000 },
},
};
static struct scalable scalable[] __initdata = {
[CPU0] = {
.hfpll_phys_base = 0x00903200,
.aux_clk_sel_phys = 0x02088014,
.aux_clk_sel = 3,
.sec_clk_sel = 2,
.l2cpmr_iaddr = 0x4501,
.vreg[VREG_CORE] = { "krait0", 1300000 },
.vreg[VREG_MEM] = { "krait0_mem", 1150000 },
.vreg[VREG_DIG] = { "krait0_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
},
[CPU1] = {
.hfpll_phys_base = 0x00903300,
.aux_clk_sel_phys = 0x02098014,
.aux_clk_sel = 3,
.sec_clk_sel = 2,
.l2cpmr_iaddr = 0x5501,
.vreg[VREG_CORE] = { "krait1", 1300000 },
.vreg[VREG_MEM] = { "krait1_mem", 1150000 },
.vreg[VREG_DIG] = { "krait1_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
},
[L2] = {
.hfpll_phys_base = 0x00903400,
.aux_clk_sel_phys = 0x02011028,
.aux_clk_sel = 3,
.sec_clk_sel = 2,
.l2cpmr_iaddr = 0x0500,
.vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
},
};
static struct msm_bus_paths bw_level_tbl[] __initdata = {
[0] = BW_MBPS(640), /* At least 80 MHz on bus. */
[1] = BW_MBPS(1064), /* At least 133 MHz on bus. */
[2] = BW_MBPS(1600), /* At least 200 MHz on bus. */
[3] = BW_MBPS(2128), /* At least 266 MHz on bus. */
[4] = BW_MBPS(3200), /* At least 400 MHz on bus. */
[5] = BW_MBPS(4800), /* At least 600 MHz on bus. */
};
static struct msm_bus_scale_pdata bus_scale_data __initdata = {
.usecase = bw_level_tbl,
.num_usecases = ARRAY_SIZE(bw_level_tbl),
.active_only = 1,
.name = "acpuclk-8930ab",
};
static struct l2_level l2_freq_tbl[] __initdata = {
[0] = { { 384000, PLL_8, 0, 0x00 }, LVL_LOW, 1050000, 1 },
[1] = { { 432000, HFPLL, 2, 0x20 }, LVL_NOM, 1050000, 2 },
[2] = { { 486000, HFPLL, 2, 0x24 }, LVL_NOM, 1050000, 2 },
[3] = { { 540000, HFPLL, 2, 0x28 }, LVL_NOM, 1050000, 2 },
[4] = { { 594000, HFPLL, 1, 0x16 }, LVL_NOM, 1050000, 2 },
[5] = { { 648000, HFPLL, 1, 0x18 }, LVL_NOM, 1050000, 4 },
[6] = { { 702000, HFPLL, 1, 0x1A }, LVL_NOM, 1050000, 4 },
[7] = { { 756000, HFPLL, 1, 0x1C }, LVL_HIGH, 1150000, 4 },
[8] = { { 810000, HFPLL, 1, 0x1E }, LVL_HIGH, 1150000, 4 },
[9] = { { 864000, HFPLL, 1, 0x20 }, LVL_HIGH, 1150000, 4 },
[10] = { { 918000, HFPLL, 1, 0x22 }, LVL_HIGH, 1150000, 5 },
[11] = { { 972000, HFPLL, 1, 0x24 }, LVL_HIGH, 1150000, 5 },
[12] = { { 1026000, HFPLL, 1, 0x26 }, LVL_HIGH, 1150000, 5 },
[13] = { { 1080000, HFPLL, 1, 0x28 }, LVL_HIGH, 1150000, 5 },
[14] = { { 1134000, HFPLL, 1, 0x2A }, LVL_HIGH, 1150000, 5 },
[15] = { { 1188000, HFPLL, 1, 0x2C }, LVL_HIGH, 1150000, 5 },
{ }
};
static struct acpu_level tbl_PVS0_1700MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 1000000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 1000000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 1000000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 1025000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(10), 1050000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(10), 1075000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(10), 1100000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1125000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1150000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1175000 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1200000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1225000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1250000 },
{ 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1275000 },
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS1_1700MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 975000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 975000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 1000000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 1000000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(10), 1025000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(10), 1050000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(10), 1075000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1100000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1125000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1150000 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1175000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1200000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1225000 },
{ 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1250000 },
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS2_1700MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 950000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 950000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 950000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 975000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(10), 1000000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(10), 1025000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(10), 1050000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1075000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1100000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1125000 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1150000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1175000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1200000 },
{ 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1225000 },
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS3_1700MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 925000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 925000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 925000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 950000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(10), 975000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(10), 1000000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(10), 1025000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1050000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1075000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1100000 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1125000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1150000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1175000 },
{ 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1200000 },
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS4_1700MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 925000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 925000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 925000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 925000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(10), 950000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(10), 975000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(10), 1000000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1025000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1050000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1075000 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1100000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1125000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1150000 },
{ 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1175000 },
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS5_1700MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 900000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 900000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 900000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 900000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(10), 925000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(10), 950000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(10), 975000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1000000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1025000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1050000 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1075000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1100000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1125000 },
{ 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1150000 },
{ 0, { 0 } }
};
static struct acpu_level tbl_PVS6_1700MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 875000 },
{ 1, { 486000, HFPLL, 2, 0x24 }, L2(5), 875000 },
{ 1, { 594000, HFPLL, 1, 0x16 }, L2(5), 875000 },
{ 1, { 702000, HFPLL, 1, 0x1A }, L2(5), 875000 },
{ 1, { 810000, HFPLL, 1, 0x1E }, L2(10), 900000 },
{ 1, { 918000, HFPLL, 1, 0x22 }, L2(10), 925000 },
{ 1, { 1026000, HFPLL, 1, 0x26 }, L2(10), 950000 },
{ 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 975000 },
{ 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1000000 },
{ 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1025000 },
{ 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1050000 },
{ 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1075000 },
{ 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1100000 },
{ 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1125000 },
{ 0, { 0 } }
};
static struct pvs_table pvs_tables[NUM_SPEED_BINS][NUM_PVS] __initdata = {
[0][0] = { tbl_PVS0_1700MHz, sizeof(tbl_PVS0_1700MHz), 0 },
[0][1] = { tbl_PVS1_1700MHz, sizeof(tbl_PVS1_1700MHz), 25000 },
[0][2] = { tbl_PVS2_1700MHz, sizeof(tbl_PVS2_1700MHz), 25000 },
[0][3] = { tbl_PVS3_1700MHz, sizeof(tbl_PVS3_1700MHz), 25000 },
[0][4] = { tbl_PVS4_1700MHz, sizeof(tbl_PVS4_1700MHz), 25000 },
[0][5] = { tbl_PVS5_1700MHz, sizeof(tbl_PVS5_1700MHz), 25000 },
[0][6] = { tbl_PVS6_1700MHz, sizeof(tbl_PVS6_1700MHz), 25000 },
};
static struct acpuclk_krait_params acpuclk_8930ab_params __initdata = {
.scalable = scalable,
.scalable_size = sizeof(scalable),
.hfpll_data = &hfpll_data,
.pvs_tables = pvs_tables,
.l2_freq_tbl = l2_freq_tbl,
.l2_freq_tbl_size = sizeof(l2_freq_tbl),
.bus_scale = &bus_scale_data,
.pte_efuse_phys = 0x007000C0,
.stby_khz = 384000,
};
static int __init acpuclk_8930ab_probe(struct platform_device *pdev)
{
struct acpuclk_platform_data *pdata = pdev->dev.platform_data;
if (pdata && pdata->uses_pm8917)
acpuclk_8930ab_params.scalable = scalable_pm8917;
return acpuclk_krait_init(&pdev->dev, &acpuclk_8930ab_params);
}
static struct platform_driver acpuclk_8930ab_driver = {
.driver = {
.name = "acpuclk-8930ab",
.owner = THIS_MODULE,
},
};
static int __init acpuclk_8930ab_init(void)
{
return platform_driver_probe(&acpuclk_8930ab_driver,
acpuclk_8930ab_probe);
}
device_initcall(acpuclk_8930ab_init);
| gpl-2.0 |
CyanogenMod/android_kernel_motorola_msm8610 | drivers/staging/speakup/main.c | 768 | 58434 | /* speakup.c
* review functions for the speakup screen review package.
* originally written by: Kirk Reiser and Andy Berdan.
*
* extensively modified by David Borowski.
*
** Copyright (C) 1998 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/vt.h>
#include <linux/tty.h>
#include <linux/mm.h> /* __get_free_page() and friends */
#include <linux/vt_kern.h>
#include <linux/ctype.h>
#include <linux/selection.h>
#include <linux/unistd.h>
#include <linux/jiffies.h>
#include <linux/kthread.h>
#include <linux/keyboard.h> /* for KT_SHIFT */
#include <linux/kbd_kern.h> /* for vc_kbd_* and friends */
#include <linux/input.h>
#include <linux/kmod.h>
#include <linux/bootmem.h> /* for alloc_bootmem */
/* speakup_*_selection */
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/consolemap.h>
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <linux/uaccess.h> /* copy_from|to|user() and others */
#include "spk_priv.h"
#include "speakup.h"
#define MAX_DELAY msecs_to_jiffies(500)
#define MINECHOCHAR SPACE
MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>");
MODULE_AUTHOR("Daniel Drake <dsd@gentoo.org>");
MODULE_DESCRIPTION("Speakup console speech");
MODULE_LICENSE("GPL");
MODULE_VERSION(SPEAKUP_VERSION);
char *synth_name;
module_param_named(synth, synth_name, charp, S_IRUGO);
module_param_named(quiet, spk_quiet_boot, bool, S_IRUGO);
MODULE_PARM_DESC(synth, "Synth to start if speakup is built in.");
MODULE_PARM_DESC(quiet, "Do not announce when the synthesizer is found.");
special_func spk_special_handler;
short spk_pitch_shift, synth_flags;
static char buf[256];
int spk_attrib_bleep, spk_bleeps, spk_bleep_time = 10;
int spk_no_intr, spk_spell_delay;
int spk_key_echo, spk_say_word_ctl;
int spk_say_ctrl, spk_bell_pos;
short spk_punc_mask;
int spk_punc_level, spk_reading_punc;
char spk_str_caps_start[MAXVARLEN + 1] = "\0", spk_str_caps_stop[MAXVARLEN + 1] = "\0";
const struct st_bits_data spk_punc_info[] = {
{"none", "", 0},
{"some", "/$%&@", SOME},
{"most", "$%&#()=+*/@^<>|\\", MOST},
{"all", "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", PUNC},
{"delimiters", "", B_WDLM},
{"repeats", "()", CH_RPT},
{"extended numeric", "", B_EXNUM},
{"symbols", "", B_SYM},
{0, 0}
};
static char mark_cut_flag;
#define MAX_KEY 160
u_char *spk_our_keys[MAX_KEY], *spk_shift_table;
u_char spk_key_buf[600];
const u_char spk_key_defaults[] = {
#include "speakupmap.h"
};
/* Speakup Cursor Track Variables */
static int cursor_track = 1, prev_cursor_track = 1;
/* cursor track modes, must be ordered same as cursor_msgs */
enum {
CT_Off = 0,
CT_On,
CT_Highlight,
CT_Window,
CT_Max
};
#define read_all_mode CT_Max
static struct tty_struct *tty;
static void spkup_write(const char *in_buf, int count);
static char *phonetic[] = {
"alfa", "bravo", "charlie", "delta", "echo", "foxtrot", "golf", "hotel",
"india", "juliett", "keelo", "leema", "mike", "november", "oscar",
"papa",
"keh beck", "romeo", "sierra", "tango", "uniform", "victer", "whiskey",
"x ray", "yankee", "zulu"
};
/* array of 256 char pointers (one for each character description)
* initialized to default_chars and user selectable via
* /proc/speakup/characters */
char *spk_characters[256];
char *spk_default_chars[256] = {
/*000*/ "null", "^a", "^b", "^c", "^d", "^e", "^f", "^g",
/*008*/ "^h", "^i", "^j", "^k", "^l", "^m", "^n", "^o",
/*016*/ "^p", "^q", "^r", "^s", "^t", "^u", "^v", "^w",
/*024*/ "^x", "^y", "^z", "control", "control", "control", "control",
"control",
/*032*/ "space", "bang!", "quote", "number", "dollar", "percent", "and",
"tick",
/*040*/ "left paren", "right paren", "star", "plus", "comma", "dash",
"dot",
"slash",
/*048*/ "zero", "one", "two", "three", "four", "five", "six", "seven",
"eight", "nine",
/*058*/ "colon", "semmy", "less", "equals", "greater", "question", "at",
/*065*/ "EIGH", "B", "C", "D", "E", "F", "G",
/*072*/ "H", "I", "J", "K", "L", "M", "N", "O",
/*080*/ "P", "Q", "R", "S", "T", "U", "V", "W", "X",
/*089*/ "Y", "ZED", "left bracket", "backslash", "right bracket",
"caret",
"line",
/*096*/ "accent", "a", "b", "c", "d", "e", "f", "g",
/*104*/ "h", "i", "j", "k", "l", "m", "n", "o",
/*112*/ "p", "q", "r", "s", "t", "u", "v", "w",
/*120*/ "x", "y", "zed", "left brace", "bar", "right brace", "tihlduh",
/*127*/ "del", "control", "control", "control", "control", "control",
"control", "control", "control", "control", "control",
/*138*/ "control", "control", "control", "control", "control",
"control", "control", "control", "control", "control",
"control", "control",
/*150*/ "control", "control", "control", "control", "control",
"control", "control", "control", "control", "control",
/*160*/ "nbsp", "inverted bang",
/*162*/ "cents", "pounds", "currency", "yen", "broken bar", "section",
/*168*/ "diaeresis", "copyright", "female ordinal", "double left angle",
/*172*/ "not", "soft hyphen", "registered", "macron",
/*176*/ "degrees", "plus or minus", "super two", "super three",
/*180*/ "acute accent", "micro", "pilcrow", "middle dot",
/*184*/ "cedilla", "super one", "male ordinal", "double right angle",
/*188*/ "one quarter", "one half", "three quarters",
"inverted question",
/*192*/ "A GRAVE", "A ACUTE", "A CIRCUMFLEX", "A TILDE", "A OOMLAUT",
"A RING",
/*198*/ "AE", "C CIDELLA", "E GRAVE", "E ACUTE", "E CIRCUMFLEX",
"E OOMLAUT",
/*204*/ "I GRAVE", "I ACUTE", "I CIRCUMFLEX", "I OOMLAUT", "ETH",
"N TILDE",
/*210*/ "O GRAVE", "O ACUTE", "O CIRCUMFLEX", "O TILDE", "O OOMLAUT",
/*215*/ "multiplied by", "O STROKE", "U GRAVE", "U ACUTE",
"U CIRCUMFLEX",
/*220*/ "U OOMLAUT", "Y ACUTE", "THORN", "sharp s", "a grave",
/*225*/ "a acute", "a circumflex", "a tilde", "a oomlaut", "a ring",
/*230*/ "ae", "c cidella", "e grave", "e acute",
/*234*/ "e circumflex", "e oomlaut", "i grave", "i acute",
"i circumflex",
/*239*/ "i oomlaut", "eth", "n tilde", "o grave", "o acute",
"o circumflex",
/*245*/ "o tilde", "o oomlaut", "divided by", "o stroke", "u grave",
"u acute",
/* 251 */ "u circumflex", "u oomlaut", "y acute", "thorn", "y oomlaut"
};
/* array of 256 u_short (one for each character)
* initialized to default_chartab and user selectable via
* /sys/module/speakup/parameters/chartab */
u_short spk_chartab[256];
static u_short default_chartab[256] = {
B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, /* 0-7 */
B_CTL, B_CTL, A_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, /* 8-15 */
B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, /*16-23 */
B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, /* 24-31 */
WDLM, A_PUNC, PUNC, PUNC, PUNC, PUNC, PUNC, A_PUNC, /* !"#$%&' */
PUNC, PUNC, PUNC, PUNC, A_PUNC, A_PUNC, A_PUNC, PUNC, /* ()*+, -./ */
NUM, NUM, NUM, NUM, NUM, NUM, NUM, NUM, /* 01234567 */
NUM, NUM, A_PUNC, PUNC, PUNC, PUNC, PUNC, A_PUNC, /* 89:;<=>? */
PUNC, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, /* @ABCDEFG */
A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, /* HIJKLMNO */
A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, /* PQRSTUVW */
A_CAP, A_CAP, A_CAP, PUNC, PUNC, PUNC, PUNC, PUNC, /* XYZ[\]^_ */
PUNC, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, /* `abcdefg */
ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, /* hijklmno */
ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, /* pqrstuvw */
ALPHA, ALPHA, ALPHA, PUNC, PUNC, PUNC, PUNC, 0, /* xyz{|}~ */
B_CAPSYM, B_CAPSYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, /* 128-134 */
B_SYM, /* 135 */
B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, /* 136-142 */
B_CAPSYM, /* 143 */
B_CAPSYM, B_CAPSYM, B_SYM, B_CAPSYM, B_SYM, B_SYM, B_SYM, /* 144-150 */
B_SYM, /* 151 */
B_SYM, B_SYM, B_CAPSYM, B_CAPSYM, B_SYM, B_SYM, B_SYM, /*152-158 */
B_SYM, /* 159 */
WDLM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_CAPSYM, /* 160-166 */
B_SYM, /* 167 */
B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, /* 168-175 */
B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, /* 176-183 */
B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, /* 184-191 */
A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, /* 192-199 */
A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, /* 200-207 */
A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, B_SYM, /* 208-215 */
A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, ALPHA, /* 216-223 */
ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, /* 224-231 */
ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, /* 232-239 */
ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, B_SYM, /* 240-247 */
ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA /* 248-255 */
};
struct task_struct *speakup_task;
struct bleep spk_unprocessed_sound;
static int spk_keydown;
static u_char spk_lastkey, spk_close_press, keymap_flags;
static u_char last_keycode, this_speakup_key;
static u_long last_spk_jiffy;
struct st_spk_t *speakup_console[MAX_NR_CONSOLES];
DEFINE_MUTEX(spk_mutex);
static int keyboard_notifier_call(struct notifier_block *,
unsigned long code, void *param);
struct notifier_block keyboard_notifier_block = {
.notifier_call = keyboard_notifier_call,
};
static int vt_notifier_call(struct notifier_block *,
unsigned long code, void *param);
struct notifier_block vt_notifier_block = {
.notifier_call = vt_notifier_call,
};
static unsigned char get_attributes(u16 *pos)
{
return (u_char) (scr_readw(pos) >> 8);
}
static void speakup_date(struct vc_data *vc)
{
spk_x = spk_cx = vc->vc_x;
spk_y = spk_cy = vc->vc_y;
spk_pos = spk_cp = vc->vc_pos;
spk_old_attr = spk_attr;
spk_attr = get_attributes((u_short *) spk_pos);
}
static void bleep(u_short val)
{
static const short vals[] = {
350, 370, 392, 414, 440, 466, 491, 523, 554, 587, 619, 659
};
short freq;
int time = spk_bleep_time;
freq = vals[val % 12];
if (val > 11)
freq *= (1 << (val / 12));
spk_unprocessed_sound.freq = freq;
spk_unprocessed_sound.jiffies = msecs_to_jiffies(time);
spk_unprocessed_sound.active = 1;
/* We can only have 1 active sound at a time. */
}
static void speakup_shut_up(struct vc_data *vc)
{
if (spk_killed)
return;
spk_shut_up |= 0x01;
spk_parked &= 0xfe;
speakup_date(vc);
if (synth != NULL)
spk_do_flush();
}
static void speech_kill(struct vc_data *vc)
{
char val = synth->is_alive(synth);
if (val == 0)
return;
/* re-enables synth, if disabled */
if (val == 2 || spk_killed) {
/* dead */
spk_shut_up &= ~0x40;
synth_printf("%s\n", spk_msg_get(MSG_IAM_ALIVE));
} else {
synth_printf("%s\n", spk_msg_get(MSG_YOU_KILLED_SPEAKUP));
spk_shut_up |= 0x40;
}
}
static void speakup_off(struct vc_data *vc)
{
if (spk_shut_up & 0x80) {
spk_shut_up &= 0x7f;
synth_printf("%s\n", spk_msg_get(MSG_HEY_THATS_BETTER));
} else {
spk_shut_up |= 0x80;
synth_printf("%s\n", spk_msg_get(MSG_YOU_TURNED_ME_OFF));
}
speakup_date(vc);
}
static void speakup_parked(struct vc_data *vc)
{
if (spk_parked & 0x80) {
spk_parked = 0;
synth_printf("%s\n", spk_msg_get(MSG_UNPARKED));
} else {
spk_parked |= 0x80;
synth_printf("%s\n", spk_msg_get(MSG_PARKED));
}
}
static void speakup_cut(struct vc_data *vc)
{
static const char err_buf[] = "set selection failed";
int ret;
if (!mark_cut_flag) {
mark_cut_flag = 1;
spk_xs = (u_short) spk_x;
spk_ys = (u_short) spk_y;
spk_sel_cons = vc;
synth_printf("%s\n", spk_msg_get(MSG_MARK));
return;
}
spk_xe = (u_short) spk_x;
spk_ye = (u_short) spk_y;
mark_cut_flag = 0;
synth_printf("%s\n", spk_msg_get(MSG_CUT));
speakup_clear_selection();
ret = speakup_set_selection(tty);
switch (ret) {
case 0:
break; /* no error */
case -EFAULT:
pr_warn("%sEFAULT\n", err_buf);
break;
case -EINVAL:
pr_warn("%sEINVAL\n", err_buf);
break;
case -ENOMEM:
pr_warn("%sENOMEM\n", err_buf);
break;
}
}
static void speakup_paste(struct vc_data *vc)
{
if (mark_cut_flag) {
mark_cut_flag = 0;
synth_printf("%s\n", spk_msg_get(MSG_MARK_CLEARED));
} else {
synth_printf("%s\n", spk_msg_get(MSG_PASTE));
speakup_paste_selection(tty);
}
}
static void say_attributes(struct vc_data *vc)
{
int fg = spk_attr & 0x0f;
int bg = spk_attr >> 4;
if (fg > 8) {
synth_printf("%s ", spk_msg_get(MSG_BRIGHT));
fg -= 8;
}
synth_printf("%s", spk_msg_get(MSG_COLORS_START + fg));
if (bg > 7) {
synth_printf(" %s ", spk_msg_get(MSG_ON_BLINKING));
bg -= 8;
} else
synth_printf(" %s ", spk_msg_get(MSG_ON));
synth_printf("%s\n", spk_msg_get(MSG_COLORS_START + bg));
}
enum {
edge_top = 1,
edge_bottom,
edge_left,
edge_right,
edge_quiet
};
static void announce_edge(struct vc_data *vc, int msg_id)
{
if (spk_bleeps & 1)
bleep(spk_y);
if ((spk_bleeps & 2) && (msg_id < edge_quiet))
synth_printf("%s\n", spk_msg_get(MSG_EDGE_MSGS_START + msg_id - 1));
}
static void speak_char(u_char ch)
{
char *cp = spk_characters[ch];
struct var_t *direct = spk_get_var(DIRECT);
if (direct && direct->u.n.value) {
if (IS_CHAR(ch, B_CAP)) {
spk_pitch_shift++;
synth_printf("%s", spk_str_caps_start);
}
synth_printf("%c", ch);
if (IS_CHAR(ch, B_CAP))
synth_printf("%s", spk_str_caps_stop);
return;
}
if (cp == NULL) {
pr_info("speak_char: cp == NULL!\n");
return;
}
synth_buffer_add(SPACE);
if (IS_CHAR(ch, B_CAP)) {
spk_pitch_shift++;
synth_printf("%s", spk_str_caps_start);
synth_printf("%s", cp);
synth_printf("%s", spk_str_caps_stop);
} else {
if (*cp == '^') {
synth_printf("%s", spk_msg_get(MSG_CTRL));
cp++;
}
synth_printf("%s", cp);
}
synth_buffer_add(SPACE);
}
static u16 get_char(struct vc_data *vc, u16 * pos, u_char * attribs)
{
u16 ch = ' ';
if (vc && pos) {
u16 w = scr_readw(pos);
u16 c = w & 0xff;
if (w & vc->vc_hi_font_mask)
c |= 0x100;
ch = inverse_translate(vc, c, 0);
*attribs = (w & 0xff00) >> 8;
}
return ch;
}
static void say_char(struct vc_data *vc)
{
u_short ch;
spk_old_attr = spk_attr;
ch = get_char(vc, (u_short *) spk_pos, &spk_attr);
if (spk_attr != spk_old_attr) {
if (spk_attrib_bleep & 1)
bleep(spk_y);
if (spk_attrib_bleep & 2)
say_attributes(vc);
}
speak_char(ch & 0xff);
}
static void say_phonetic_char(struct vc_data *vc)
{
u_short ch;
spk_old_attr = spk_attr;
ch = get_char(vc, (u_short *) spk_pos, &spk_attr);
if (isascii(ch) && isalpha(ch)) {
ch &= 0x1f;
synth_printf("%s\n", phonetic[--ch]);
} else {
if (IS_CHAR(ch, B_NUM))
synth_printf("%s ", spk_msg_get(MSG_NUMBER));
speak_char(ch);
}
}
static void say_prev_char(struct vc_data *vc)
{
spk_parked |= 0x01;
if (spk_x == 0) {
announce_edge(vc, edge_left);
return;
}
spk_x--;
spk_pos -= 2;
say_char(vc);
}
static void say_next_char(struct vc_data *vc)
{
spk_parked |= 0x01;
if (spk_x == vc->vc_cols - 1) {
announce_edge(vc, edge_right);
return;
}
spk_x++;
spk_pos += 2;
say_char(vc);
}
/* get_word - will first check to see if the character under the
* reading cursor is a space and if spk_say_word_ctl is true it will
* return the word space. If spk_say_word_ctl is not set it will check to
* see if there is a word starting on the next position to the right
* and return that word if it exists. If it does not exist it will
* move left to the beginning of any previous word on the line or the
* beginning off the line whichever comes first.. */
static u_long get_word(struct vc_data *vc)
{
u_long cnt = 0, tmpx = spk_x, tmp_pos = spk_pos;
char ch;
u_short attr_ch;
u_char temp;
spk_old_attr = spk_attr;
ch = (char)get_char(vc, (u_short *) tmp_pos, &temp);
/* decided to take out the sayword if on a space (mis-information */
if (spk_say_word_ctl && ch == SPACE) {
*buf = '\0';
synth_printf("%s\n", spk_msg_get(MSG_SPACE));
return 0;
} else if ((tmpx < vc->vc_cols - 2)
&& (ch == SPACE || ch == 0 || IS_WDLM(ch))
&& ((char)get_char(vc, (u_short *) &tmp_pos + 1, &temp) >
SPACE)) {
tmp_pos += 2;
tmpx++;
} else
while (tmpx > 0) {
ch = (char)get_char(vc, (u_short *) tmp_pos - 1, &temp);
if ((ch == SPACE || ch == 0 || IS_WDLM(ch))
&& ((char)get_char(vc, (u_short *) tmp_pos, &temp) >
SPACE))
break;
tmp_pos -= 2;
tmpx--;
}
attr_ch = get_char(vc, (u_short *) tmp_pos, &spk_attr);
buf[cnt++] = attr_ch & 0xff;
while (tmpx < vc->vc_cols - 1) {
tmp_pos += 2;
tmpx++;
ch = (char)get_char(vc, (u_short *) tmp_pos, &temp);
if ((ch == SPACE) || ch == 0
|| (IS_WDLM(buf[cnt - 1]) && (ch > SPACE)))
break;
buf[cnt++] = ch;
}
buf[cnt] = '\0';
return cnt;
}
static void say_word(struct vc_data *vc)
{
u_long cnt = get_word(vc);
u_short saved_punc_mask = spk_punc_mask;
if (cnt == 0)
return;
spk_punc_mask = PUNC;
buf[cnt++] = SPACE;
spkup_write(buf, cnt);
spk_punc_mask = saved_punc_mask;
}
static void say_prev_word(struct vc_data *vc)
{
u_char temp;
char ch;
u_short edge_said = 0, last_state = 0, state = 0;
spk_parked |= 0x01;
if (spk_x == 0) {
if (spk_y == 0) {
announce_edge(vc, edge_top);
return;
}
spk_y--;
spk_x = vc->vc_cols;
edge_said = edge_quiet;
}
while (1) {
if (spk_x == 0) {
if (spk_y == 0) {
edge_said = edge_top;
break;
}
if (edge_said != edge_quiet)
edge_said = edge_left;
if (state > 0)
break;
spk_y--;
spk_x = vc->vc_cols - 1;
} else
spk_x--;
spk_pos -= 2;
ch = (char)get_char(vc, (u_short *) spk_pos, &temp);
if (ch == SPACE || ch == 0)
state = 0;
else if (IS_WDLM(ch))
state = 1;
else
state = 2;
if (state < last_state) {
spk_pos += 2;
spk_x++;
break;
}
last_state = state;
}
if (spk_x == 0 && edge_said == edge_quiet)
edge_said = edge_left;
if (edge_said > 0 && edge_said < edge_quiet)
announce_edge(vc, edge_said);
say_word(vc);
}
static void say_next_word(struct vc_data *vc)
{
u_char temp;
char ch;
u_short edge_said = 0, last_state = 2, state = 0;
spk_parked |= 0x01;
if (spk_x == vc->vc_cols - 1 && spk_y == vc->vc_rows - 1) {
announce_edge(vc, edge_bottom);
return;
}
while (1) {
ch = (char)get_char(vc, (u_short *) spk_pos, &temp);
if (ch == SPACE || ch == 0)
state = 0;
else if (IS_WDLM(ch))
state = 1;
else
state = 2;
if (state > last_state)
break;
if (spk_x >= vc->vc_cols - 1) {
if (spk_y == vc->vc_rows - 1) {
edge_said = edge_bottom;
break;
}
state = 0;
spk_y++;
spk_x = 0;
edge_said = edge_right;
} else
spk_x++;
spk_pos += 2;
last_state = state;
}
if (edge_said > 0)
announce_edge(vc, edge_said);
say_word(vc);
}
static void spell_word(struct vc_data *vc)
{
static char *delay_str[] = { "", ",", ".", ". .", ". . ." };
char *cp = buf, *str_cap = spk_str_caps_stop;
char *cp1, *last_cap = spk_str_caps_stop;
u_char ch;
if (!get_word(vc))
return;
while ((ch = (u_char) *cp)) {
if (cp != buf)
synth_printf(" %s ", delay_str[spk_spell_delay]);
if (IS_CHAR(ch, B_CAP)) {
str_cap = spk_str_caps_start;
if (*spk_str_caps_stop)
spk_pitch_shift++;
else /* synth has no pitch */
last_cap = spk_str_caps_stop;
} else
str_cap = spk_str_caps_stop;
if (str_cap != last_cap) {
synth_printf("%s", str_cap);
last_cap = str_cap;
}
if (this_speakup_key == SPELL_PHONETIC
&& (isascii(ch) && isalpha(ch))) {
ch &= 31;
cp1 = phonetic[--ch];
} else {
cp1 = spk_characters[ch];
if (*cp1 == '^') {
synth_printf("%s", spk_msg_get(MSG_CTRL));
cp1++;
}
}
synth_printf("%s", cp1);
cp++;
}
if (str_cap != spk_str_caps_stop)
synth_printf("%s", spk_str_caps_stop);
}
static int get_line(struct vc_data *vc)
{
u_long tmp = spk_pos - (spk_x * 2);
int i = 0;
u_char tmp2;
spk_old_attr = spk_attr;
spk_attr = get_attributes((u_short *) spk_pos);
for (i = 0; i < vc->vc_cols; i++) {
buf[i] = (u_char) get_char(vc, (u_short *) tmp, &tmp2);
tmp += 2;
}
for (--i; i >= 0; i--)
if (buf[i] != SPACE)
break;
return ++i;
}
static void say_line(struct vc_data *vc)
{
int i = get_line(vc);
char *cp;
u_short saved_punc_mask = spk_punc_mask;
if (i == 0) {
synth_printf("%s\n", spk_msg_get(MSG_BLANK));
return;
}
buf[i++] = '\n';
if (this_speakup_key == SAY_LINE_INDENT) {
cp = buf;
while (*cp == SPACE)
cp++;
synth_printf("%d, ", (cp - buf) + 1);
}
spk_punc_mask = spk_punc_masks[spk_reading_punc];
spkup_write(buf, i);
spk_punc_mask = saved_punc_mask;
}
static void say_prev_line(struct vc_data *vc)
{
spk_parked |= 0x01;
if (spk_y == 0) {
announce_edge(vc, edge_top);
return;
}
spk_y--;
spk_pos -= vc->vc_size_row;
say_line(vc);
}
static void say_next_line(struct vc_data *vc)
{
spk_parked |= 0x01;
if (spk_y == vc->vc_rows - 1) {
announce_edge(vc, edge_bottom);
return;
}
spk_y++;
spk_pos += vc->vc_size_row;
say_line(vc);
}
static int say_from_to(struct vc_data *vc, u_long from, u_long to,
int read_punc)
{
int i = 0;
u_char tmp;
u_short saved_punc_mask = spk_punc_mask;
spk_old_attr = spk_attr;
spk_attr = get_attributes((u_short *) from);
while (from < to) {
buf[i++] = (char)get_char(vc, (u_short *) from, &tmp);
from += 2;
if (i >= vc->vc_size_row)
break;
}
for (--i; i >= 0; i--)
if (buf[i] != SPACE)
break;
buf[++i] = SPACE;
buf[++i] = '\0';
if (i < 1)
return i;
if (read_punc)
spk_punc_mask = spk_punc_info[spk_reading_punc].mask;
spkup_write(buf, i);
if (read_punc)
spk_punc_mask = saved_punc_mask;
return i - 1;
}
static void say_line_from_to(struct vc_data *vc, u_long from, u_long to,
int read_punc)
{
u_long start = vc->vc_origin + (spk_y * vc->vc_size_row);
u_long end = start + (to * 2);
start += from * 2;
if (say_from_to(vc, start, end, read_punc) <= 0)
if (cursor_track != read_all_mode)
synth_printf("%s\n", spk_msg_get(MSG_BLANK));
}
/* Sentence Reading Commands */
static int currsentence;
static int numsentences[2];
static char *sentbufend[2];
static char *sentmarks[2][10];
static int currbuf;
static int bn;
static char sentbuf[2][256];
static int say_sentence_num(int num, int prev)
{
bn = currbuf;
currsentence = num + 1;
if (prev && --bn == -1)
bn = 1;
if (num > numsentences[bn])
return 0;
spkup_write(sentmarks[bn][num], sentbufend[bn] - sentmarks[bn][num]);
return 1;
}
static int get_sentence_buf(struct vc_data *vc, int read_punc)
{
u_long start, end;
int i, bn;
u_char tmp;
currbuf++;
if (currbuf == 2)
currbuf = 0;
bn = currbuf;
start = vc->vc_origin + ((spk_y) * vc->vc_size_row);
end = vc->vc_origin + ((spk_y) * vc->vc_size_row) + vc->vc_cols * 2;
numsentences[bn] = 0;
sentmarks[bn][0] = &sentbuf[bn][0];
i = 0;
spk_old_attr = spk_attr;
spk_attr = get_attributes((u_short *) start);
while (start < end) {
sentbuf[bn][i] = (char)get_char(vc, (u_short *) start, &tmp);
if (i > 0) {
if (sentbuf[bn][i] == SPACE && sentbuf[bn][i - 1] == '.'
&& numsentences[bn] < 9) {
/* Sentence Marker */
numsentences[bn]++;
sentmarks[bn][numsentences[bn]] =
&sentbuf[bn][i];
}
}
i++;
start += 2;
if (i >= vc->vc_size_row)
break;
}
for (--i; i >= 0; i--)
if (sentbuf[bn][i] != SPACE)
break;
if (i < 1)
return -1;
sentbuf[bn][++i] = SPACE;
sentbuf[bn][++i] = '\0';
sentbufend[bn] = &sentbuf[bn][i];
return numsentences[bn];
}
static void say_screen_from_to(struct vc_data *vc, u_long from, u_long to)
{
u_long start = vc->vc_origin, end;
if (from > 0)
start += from * vc->vc_size_row;
if (to > vc->vc_rows)
to = vc->vc_rows;
end = vc->vc_origin + (to * vc->vc_size_row);
for (from = start; from < end; from = to) {
to = from + vc->vc_size_row;
say_from_to(vc, from, to, 1);
}
}
static void say_screen(struct vc_data *vc)
{
say_screen_from_to(vc, 0, vc->vc_rows);
}
static void speakup_win_say(struct vc_data *vc)
{
u_long start, end, from, to;
if (win_start < 2) {
synth_printf("%s\n", spk_msg_get(MSG_NO_WINDOW));
return;
}
start = vc->vc_origin + (win_top * vc->vc_size_row);
end = vc->vc_origin + (win_bottom * vc->vc_size_row);
while (start <= end) {
from = start + (win_left * 2);
to = start + (win_right * 2);
say_from_to(vc, from, to, 1);
start += vc->vc_size_row;
}
}
static void top_edge(struct vc_data *vc)
{
spk_parked |= 0x01;
spk_pos = vc->vc_origin + 2 * spk_x;
spk_y = 0;
say_line(vc);
}
static void bottom_edge(struct vc_data *vc)
{
spk_parked |= 0x01;
spk_pos += (vc->vc_rows - spk_y - 1) * vc->vc_size_row;
spk_y = vc->vc_rows - 1;
say_line(vc);
}
static void left_edge(struct vc_data *vc)
{
spk_parked |= 0x01;
spk_pos -= spk_x * 2;
spk_x = 0;
say_char(vc);
}
static void right_edge(struct vc_data *vc)
{
spk_parked |= 0x01;
spk_pos += (vc->vc_cols - spk_x - 1) * 2;
spk_x = vc->vc_cols - 1;
say_char(vc);
}
static void say_first_char(struct vc_data *vc)
{
int i, len = get_line(vc);
u_char ch;
spk_parked |= 0x01;
if (len == 0) {
synth_printf("%s\n", spk_msg_get(MSG_BLANK));
return;
}
for (i = 0; i < len; i++)
if (buf[i] != SPACE)
break;
ch = buf[i];
spk_pos -= (spk_x - i) * 2;
spk_x = i;
synth_printf("%d, ", ++i);
speak_char(ch);
}
static void say_last_char(struct vc_data *vc)
{
int len = get_line(vc);
u_char ch;
spk_parked |= 0x01;
if (len == 0) {
synth_printf("%s\n", spk_msg_get(MSG_BLANK));
return;
}
ch = buf[--len];
spk_pos -= (spk_x - len) * 2;
spk_x = len;
synth_printf("%d, ", ++len);
speak_char(ch);
}
static void say_position(struct vc_data *vc)
{
synth_printf(spk_msg_get(MSG_POS_INFO), spk_y + 1, spk_x + 1,
vc->vc_num + 1);
synth_printf("\n");
}
/* Added by brianb */
static void say_char_num(struct vc_data *vc)
{
u_char tmp;
u_short ch = get_char(vc, (u_short *) spk_pos, &tmp);
ch &= 0xff;
synth_printf(spk_msg_get(MSG_CHAR_INFO), ch, ch);
}
/* these are stub functions to keep keyboard.c happy. */
static void say_from_top(struct vc_data *vc)
{
say_screen_from_to(vc, 0, spk_y);
}
static void say_to_bottom(struct vc_data *vc)
{
say_screen_from_to(vc, spk_y, vc->vc_rows);
}
static void say_from_left(struct vc_data *vc)
{
say_line_from_to(vc, 0, spk_x, 1);
}
static void say_to_right(struct vc_data *vc)
{
say_line_from_to(vc, spk_x, vc->vc_cols, 1);
}
/* end of stub functions. */
static void spkup_write(const char *in_buf, int count)
{
static int rep_count;
static u_char ch = '\0', old_ch = '\0';
static u_short char_type, last_type;
int in_count = count;
spk_keydown = 0;
while (count--) {
if (cursor_track == read_all_mode) {
/* Insert Sentence Index */
if ((in_buf == sentmarks[bn][currsentence]) &&
(currsentence <= numsentences[bn]))
synth_insert_next_index(currsentence++);
}
ch = (u_char) *in_buf++;
char_type = spk_chartab[ch];
if (ch == old_ch && !(char_type & B_NUM)) {
if (++rep_count > 2)
continue;
} else {
if ((last_type & CH_RPT) && rep_count > 2) {
synth_printf(" ");
synth_printf(spk_msg_get(MSG_REPEAT_DESC),
++rep_count);
synth_printf(" ");
}
rep_count = 0;
}
if (ch == spk_lastkey) {
rep_count = 0;
if (spk_key_echo == 1 && ch >= MINECHOCHAR)
speak_char(ch);
} else if (char_type & B_ALPHA) {
if ((synth_flags & SF_DEC) && (last_type & PUNC))
synth_buffer_add(SPACE);
synth_printf("%c", ch);
} else if (char_type & B_NUM) {
rep_count = 0;
synth_printf("%c", ch);
} else if (char_type & spk_punc_mask) {
speak_char(ch);
char_type &= ~PUNC; /* for dec nospell processing */
} else if (char_type & SYNTH_OK) {
/* these are usually puncts like . and , which synth
* needs for expression.
* suppress multiple to get rid of long pauses and
* clear repeat count
* so if someone has
* repeats on you don't get nothing repeated count */
if (ch != old_ch)
synth_printf("%c", ch);
else
rep_count = 0;
} else {
/* send space and record position, if next is num overwrite space */
if (old_ch != ch)
synth_buffer_add(SPACE);
else
rep_count = 0;
}
old_ch = ch;
last_type = char_type;
}
spk_lastkey = 0;
if (in_count > 2 && rep_count > 2) {
if (last_type & CH_RPT) {
synth_printf(" ");
synth_printf(spk_msg_get(MSG_REPEAT_DESC2), ++rep_count);
synth_printf(" ");
}
rep_count = 0;
}
}
static const int NUM_CTL_LABELS = (MSG_CTL_END - MSG_CTL_START + 1);
static void read_all_doc(struct vc_data *vc);
static void cursor_done(u_long data);
static DEFINE_TIMER(cursor_timer, cursor_done, 0, 0);
static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag)
{
unsigned long flags;
if (synth == NULL || up_flag || spk_killed)
return;
spk_lock(flags);
if (cursor_track == read_all_mode) {
switch (value) {
case KVAL(K_SHIFT):
del_timer(&cursor_timer);
spk_shut_up &= 0xfe;
spk_do_flush();
read_all_doc(vc);
break;
case KVAL(K_CTRL):
del_timer(&cursor_timer);
cursor_track = prev_cursor_track;
spk_shut_up &= 0xfe;
spk_do_flush();
break;
}
} else {
spk_shut_up &= 0xfe;
spk_do_flush();
}
if (spk_say_ctrl && value < NUM_CTL_LABELS)
synth_printf("%s", spk_msg_get(MSG_CTL_START + value));
spk_unlock(flags);
}
static void do_handle_latin(struct vc_data *vc, u_char value, char up_flag)
{
unsigned long flags;
spk_lock(flags);
if (up_flag) {
spk_lastkey = spk_keydown = 0;
spk_unlock(flags);
return;
}
if (synth == NULL || spk_killed) {
spk_unlock(flags);
return;
}
spk_shut_up &= 0xfe;
spk_lastkey = value;
spk_keydown++;
spk_parked &= 0xfe;
if (spk_key_echo == 2 && value >= MINECHOCHAR)
speak_char(value);
spk_unlock(flags);
}
int spk_set_key_info(const u_char *key_info, u_char *k_buffer)
{
int i = 0, states, key_data_len;
const u_char *cp = key_info;
u_char *cp1 = k_buffer;
u_char ch, version, num_keys;
version = *cp++;
if (version != KEY_MAP_VER)
return -1;
num_keys = *cp;
states = (int)cp[1];
key_data_len = (states + 1) * (num_keys + 1);
if (key_data_len + SHIFT_TBL_SIZE + 4 >= sizeof(spk_key_buf))
return -2;
memset(k_buffer, 0, SHIFT_TBL_SIZE);
memset(spk_our_keys, 0, sizeof(spk_our_keys));
spk_shift_table = k_buffer;
spk_our_keys[0] = spk_shift_table;
cp1 += SHIFT_TBL_SIZE;
memcpy(cp1, cp, key_data_len + 3);
/* get num_keys, states and data */
cp1 += 2; /* now pointing at shift states */
for (i = 1; i <= states; i++) {
ch = *cp1++;
if (ch >= SHIFT_TBL_SIZE)
return -3;
spk_shift_table[ch] = i;
}
keymap_flags = *cp1++;
while ((ch = *cp1)) {
if (ch >= MAX_KEY)
return -4;
spk_our_keys[ch] = cp1;
cp1 += states + 1;
}
return 0;
}
static struct var_t spk_vars[] = {
/* bell must be first to set high limit */
{BELL_POS, .u.n = {NULL, 0, 0, 0, 0, 0, NULL} },
{SPELL_DELAY, .u.n = {NULL, 0, 0, 4, 0, 0, NULL} },
{ATTRIB_BLEEP, .u.n = {NULL, 1, 0, 3, 0, 0, NULL} },
{BLEEPS, .u.n = {NULL, 3, 0, 3, 0, 0, NULL} },
{BLEEP_TIME, .u.n = {NULL, 30, 1, 200, 0, 0, NULL} },
{PUNC_LEVEL, .u.n = {NULL, 1, 0, 4, 0, 0, NULL} },
{READING_PUNC, .u.n = {NULL, 1, 0, 4, 0, 0, NULL} },
{CURSOR_TIME, .u.n = {NULL, 120, 50, 600, 0, 0, NULL} },
{SAY_CONTROL, TOGGLE_0},
{SAY_WORD_CTL, TOGGLE_0},
{NO_INTERRUPT, TOGGLE_0},
{KEY_ECHO, .u.n = {NULL, 1, 0, 2, 0, 0, NULL} },
V_LAST_VAR
};
static void toggle_cursoring(struct vc_data *vc)
{
if (cursor_track == read_all_mode)
cursor_track = prev_cursor_track;
if (++cursor_track >= CT_Max)
cursor_track = 0;
synth_printf("%s\n", spk_msg_get(MSG_CURSOR_MSGS_START + cursor_track));
}
void spk_reset_default_chars(void)
{
int i;
/* First, free any non-default */
for (i = 0; i < 256; i++) {
if ((spk_characters[i] != NULL)
&& (spk_characters[i] != spk_default_chars[i]))
kfree(spk_characters[i]);
}
memcpy(spk_characters, spk_default_chars, sizeof(spk_default_chars));
}
void spk_reset_default_chartab(void)
{
memcpy(spk_chartab, default_chartab, sizeof(default_chartab));
}
static const struct st_bits_data *pb_edit;
static int edit_bits(struct vc_data *vc, u_char type, u_char ch, u_short key)
{
short mask = pb_edit->mask, ch_type = spk_chartab[ch];
if (type != KT_LATIN || (ch_type & B_NUM) || ch < SPACE)
return -1;
if (ch == SPACE) {
synth_printf("%s\n", spk_msg_get(MSG_EDIT_DONE));
spk_special_handler = NULL;
return 1;
}
if (mask < PUNC && !(ch_type & PUNC))
return -1;
spk_chartab[ch] ^= mask;
speak_char(ch);
synth_printf(" %s\n",
(spk_chartab[ch] & mask) ? spk_msg_get(MSG_ON) :
spk_msg_get(MSG_OFF));
return 1;
}
/* Allocation concurrency is protected by the console semaphore */
int speakup_allocate(struct vc_data *vc)
{
int vc_num;
vc_num = vc->vc_num;
if (speakup_console[vc_num] == NULL) {
speakup_console[vc_num] = kzalloc(sizeof(*speakup_console[0]),
GFP_ATOMIC);
if (speakup_console[vc_num] == NULL)
return -ENOMEM;
speakup_date(vc);
} else if (!spk_parked)
speakup_date(vc);
return 0;
}
void speakup_deallocate(struct vc_data *vc)
{
int vc_num;
vc_num = vc->vc_num;
kfree(speakup_console[vc_num]);
speakup_console[vc_num] = NULL;
}
static u_char is_cursor;
static u_long old_cursor_pos, old_cursor_x, old_cursor_y;
static int cursor_con;
static void reset_highlight_buffers(struct vc_data *);
static int read_all_key;
static void start_read_all_timer(struct vc_data *vc, int command);
enum {
RA_NOTHING,
RA_NEXT_SENT,
RA_PREV_LINE,
RA_NEXT_LINE,
RA_PREV_SENT,
RA_DOWN_ARROW,
RA_TIMER,
RA_FIND_NEXT_SENT,
RA_FIND_PREV_SENT,
};
static void kbd_fakekey2(struct vc_data *vc, int command)
{
del_timer(&cursor_timer);
speakup_fake_down_arrow();
start_read_all_timer(vc, command);
}
static void read_all_doc(struct vc_data *vc)
{
if ((vc->vc_num != fg_console) || synth == NULL || spk_shut_up)
return;
if (!synth_supports_indexing())
return;
if (cursor_track != read_all_mode)
prev_cursor_track = cursor_track;
cursor_track = read_all_mode;
spk_reset_index_count(0);
if (get_sentence_buf(vc, 0) == -1)
kbd_fakekey2(vc, RA_DOWN_ARROW);
else {
say_sentence_num(0, 0);
synth_insert_next_index(0);
start_read_all_timer(vc, RA_TIMER);
}
}
static void stop_read_all(struct vc_data *vc)
{
del_timer(&cursor_timer);
cursor_track = prev_cursor_track;
spk_shut_up &= 0xfe;
spk_do_flush();
}
static void start_read_all_timer(struct vc_data *vc, int command)
{
struct var_t *cursor_timeout;
cursor_con = vc->vc_num;
read_all_key = command;
cursor_timeout = spk_get_var(CURSOR_TIME);
mod_timer(&cursor_timer,
jiffies + msecs_to_jiffies(cursor_timeout->u.n.value));
}
static void handle_cursor_read_all(struct vc_data *vc, int command)
{
int indcount, sentcount, rv, sn;
switch (command) {
case RA_NEXT_SENT:
/* Get Current Sentence */
spk_get_index_count(&indcount, &sentcount);
/*printk("%d %d ", indcount, sentcount); */
spk_reset_index_count(sentcount + 1);
if (indcount == 1) {
if (!say_sentence_num(sentcount + 1, 0)) {
kbd_fakekey2(vc, RA_FIND_NEXT_SENT);
return;
}
synth_insert_next_index(0);
} else {
sn = 0;
if (!say_sentence_num(sentcount + 1, 1)) {
sn = 1;
spk_reset_index_count(sn);
} else
synth_insert_next_index(0);
if (!say_sentence_num(sn, 0)) {
kbd_fakekey2(vc, RA_FIND_NEXT_SENT);
return;
}
synth_insert_next_index(0);
}
start_read_all_timer(vc, RA_TIMER);
break;
case RA_PREV_SENT:
break;
case RA_NEXT_LINE:
read_all_doc(vc);
break;
case RA_PREV_LINE:
break;
case RA_DOWN_ARROW:
if (get_sentence_buf(vc, 0) == -1) {
kbd_fakekey2(vc, RA_DOWN_ARROW);
} else {
say_sentence_num(0, 0);
synth_insert_next_index(0);
start_read_all_timer(vc, RA_TIMER);
}
break;
case RA_FIND_NEXT_SENT:
rv = get_sentence_buf(vc, 0);
if (rv == -1)
read_all_doc(vc);
if (rv == 0)
kbd_fakekey2(vc, RA_FIND_NEXT_SENT);
else {
say_sentence_num(1, 0);
synth_insert_next_index(0);
start_read_all_timer(vc, RA_TIMER);
}
break;
case RA_FIND_PREV_SENT:
break;
case RA_TIMER:
spk_get_index_count(&indcount, &sentcount);
if (indcount < 2)
kbd_fakekey2(vc, RA_DOWN_ARROW);
else
start_read_all_timer(vc, RA_TIMER);
break;
}
}
static int pre_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
{
unsigned long flags;
spk_lock(flags);
if (cursor_track == read_all_mode) {
spk_parked &= 0xfe;
if (synth == NULL || up_flag || spk_shut_up) {
spk_unlock(flags);
return NOTIFY_STOP;
}
del_timer(&cursor_timer);
spk_shut_up &= 0xfe;
spk_do_flush();
start_read_all_timer(vc, value + 1);
spk_unlock(flags);
return NOTIFY_STOP;
}
spk_unlock(flags);
return NOTIFY_OK;
}
static void do_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
{
unsigned long flags;
struct var_t *cursor_timeout;
spk_lock(flags);
spk_parked &= 0xfe;
if (synth == NULL || up_flag || spk_shut_up || cursor_track == CT_Off) {
spk_unlock(flags);
return;
}
spk_shut_up &= 0xfe;
if (spk_no_intr)
spk_do_flush();
/* the key press flushes if !no_inter but we want to flush on cursor
* moves regardless of no_inter state */
is_cursor = value + 1;
old_cursor_pos = vc->vc_pos;
old_cursor_x = vc->vc_x;
old_cursor_y = vc->vc_y;
speakup_console[vc->vc_num]->ht.cy = vc->vc_y;
cursor_con = vc->vc_num;
if (cursor_track == CT_Highlight)
reset_highlight_buffers(vc);
cursor_timeout = spk_get_var(CURSOR_TIME);
mod_timer(&cursor_timer,
jiffies + msecs_to_jiffies(cursor_timeout->u.n.value));
spk_unlock(flags);
}
static void update_color_buffer(struct vc_data *vc, const char *ic, int len)
{
int i, bi, hi;
int vc_num = vc->vc_num;
bi = ((vc->vc_attr & 0x70) >> 4);
hi = speakup_console[vc_num]->ht.highsize[bi];
i = 0;
if (speakup_console[vc_num]->ht.highsize[bi] == 0) {
speakup_console[vc_num]->ht.rpos[bi] = vc->vc_pos;
speakup_console[vc_num]->ht.rx[bi] = vc->vc_x;
speakup_console[vc_num]->ht.ry[bi] = vc->vc_y;
}
while ((hi < COLOR_BUFFER_SIZE) && (i < len)) {
if ((ic[i] > 32) && (ic[i] < 127)) {
speakup_console[vc_num]->ht.highbuf[bi][hi] = ic[i];
hi++;
} else if ((ic[i] == 32) && (hi != 0)) {
if (speakup_console[vc_num]->ht.highbuf[bi][hi - 1] !=
32) {
speakup_console[vc_num]->ht.highbuf[bi][hi] =
ic[i];
hi++;
}
}
i++;
}
speakup_console[vc_num]->ht.highsize[bi] = hi;
}
static void reset_highlight_buffers(struct vc_data *vc)
{
int i;
int vc_num = vc->vc_num;
for (i = 0; i < 8; i++)
speakup_console[vc_num]->ht.highsize[i] = 0;
}
static int count_highlight_color(struct vc_data *vc)
{
int i, bg;
int cc;
int vc_num = vc->vc_num;
u16 ch;
u16 *start = (u16 *) vc->vc_origin;
for (i = 0; i < 8; i++)
speakup_console[vc_num]->ht.bgcount[i] = 0;
for (i = 0; i < vc->vc_rows; i++) {
u16 *end = start + vc->vc_cols * 2;
u16 *ptr;
for (ptr = start; ptr < end; ptr++) {
ch = get_attributes(ptr);
bg = (ch & 0x70) >> 4;
speakup_console[vc_num]->ht.bgcount[bg]++;
}
start += vc->vc_size_row;
}
cc = 0;
for (i = 0; i < 8; i++)
if (speakup_console[vc_num]->ht.bgcount[i] > 0)
cc++;
return cc;
}
static int get_highlight_color(struct vc_data *vc)
{
int i, j;
unsigned int cptr[8], tmp;
int vc_num = vc->vc_num;
for (i = 0; i < 8; i++)
cptr[i] = i;
for (i = 0; i < 7; i++)
for (j = i + 1; j < 8; j++)
if (speakup_console[vc_num]->ht.bgcount[cptr[i]] >
speakup_console[vc_num]->ht.bgcount[cptr[j]]) {
tmp = cptr[i];
cptr[i] = cptr[j];
cptr[j] = tmp;
}
for (i = 0; i < 8; i++)
if (speakup_console[vc_num]->ht.bgcount[cptr[i]] != 0)
if (speakup_console[vc_num]->ht.highsize[cptr[i]] > 0)
return cptr[i];
return -1;
}
static int speak_highlight(struct vc_data *vc)
{
int hc, d;
int vc_num = vc->vc_num;
if (count_highlight_color(vc) == 1)
return 0;
hc = get_highlight_color(vc);
if (hc != -1) {
d = vc->vc_y - speakup_console[vc_num]->ht.cy;
if ((d == 1) || (d == -1))
if (speakup_console[vc_num]->ht.ry[hc] != vc->vc_y)
return 0;
spk_parked |= 0x01;
spk_do_flush();
spkup_write(speakup_console[vc_num]->ht.highbuf[hc],
speakup_console[vc_num]->ht.highsize[hc]);
spk_pos = spk_cp = speakup_console[vc_num]->ht.rpos[hc];
spk_x = spk_cx = speakup_console[vc_num]->ht.rx[hc];
spk_y = spk_cy = speakup_console[vc_num]->ht.ry[hc];
return 1;
}
return 0;
}
static void cursor_done(u_long data)
{
struct vc_data *vc = vc_cons[cursor_con].d;
unsigned long flags;
del_timer(&cursor_timer);
spk_lock(flags);
if (cursor_con != fg_console) {
is_cursor = 0;
goto out;
}
speakup_date(vc);
if (win_enabled) {
if (vc->vc_x >= win_left && vc->vc_x <= win_right &&
vc->vc_y >= win_top && vc->vc_y <= win_bottom) {
spk_keydown = is_cursor = 0;
goto out;
}
}
if (cursor_track == read_all_mode) {
handle_cursor_read_all(vc, read_all_key);
goto out;
}
if (cursor_track == CT_Highlight) {
if (speak_highlight(vc)) {
spk_keydown = is_cursor = 0;
goto out;
}
}
if (cursor_track == CT_Window)
speakup_win_say(vc);
else if (is_cursor == 1 || is_cursor == 4)
say_line_from_to(vc, 0, vc->vc_cols, 0);
else
say_char(vc);
spk_keydown = is_cursor = 0;
out:
spk_unlock(flags);
}
/* called by: vt_notifier_call() */
static void speakup_bs(struct vc_data *vc)
{
unsigned long flags;
if (!speakup_console[vc->vc_num])
return;
if (!spk_trylock(flags))
/* Speakup output, discard */
return;
if (!spk_parked)
speakup_date(vc);
if (spk_shut_up || synth == NULL) {
spk_unlock(flags);
return;
}
if (vc->vc_num == fg_console && spk_keydown) {
spk_keydown = 0;
if (!is_cursor)
say_char(vc);
}
spk_unlock(flags);
}
/* called by: vt_notifier_call() */
static void speakup_con_write(struct vc_data *vc, const char *str, int len)
{
unsigned long flags;
if ((vc->vc_num != fg_console) || spk_shut_up || synth == NULL)
return;
if (!spk_trylock(flags))
/* Speakup output, discard */
return;
if (spk_bell_pos && spk_keydown && (vc->vc_x == spk_bell_pos - 1))
bleep(3);
if ((is_cursor) || (cursor_track == read_all_mode)) {
if (cursor_track == CT_Highlight)
update_color_buffer(vc, str, len);
spk_unlock(flags);
return;
}
if (win_enabled) {
if (vc->vc_x >= win_left && vc->vc_x <= win_right &&
vc->vc_y >= win_top && vc->vc_y <= win_bottom) {
spk_unlock(flags);
return;
}
}
spkup_write(str, len);
spk_unlock(flags);
}
void speakup_con_update(struct vc_data *vc)
{
unsigned long flags;
if (speakup_console[vc->vc_num] == NULL || spk_parked)
return;
if (!spk_trylock(flags))
/* Speakup output, discard */
return;
speakup_date(vc);
spk_unlock(flags);
}
static void do_handle_spec(struct vc_data *vc, u_char value, char up_flag)
{
unsigned long flags;
int on_off = 2;
char *label;
if (synth == NULL || up_flag || spk_killed)
return;
spk_lock(flags);
spk_shut_up &= 0xfe;
if (spk_no_intr)
spk_do_flush();
switch (value) {
case KVAL(K_CAPS):
label = spk_msg_get(MSG_KEYNAME_CAPSLOCK);
on_off = vt_get_leds(fg_console, VC_CAPSLOCK);
break;
case KVAL(K_NUM):
label = spk_msg_get(MSG_KEYNAME_NUMLOCK);
on_off = vt_get_leds(fg_console, VC_NUMLOCK);
break;
case KVAL(K_HOLD):
label = spk_msg_get(MSG_KEYNAME_SCROLLLOCK);
on_off = vt_get_leds(fg_console, VC_SCROLLOCK);
if (speakup_console[vc->vc_num])
speakup_console[vc->vc_num]->tty_stopped = on_off;
break;
default:
spk_parked &= 0xfe;
spk_unlock(flags);
return;
}
if (on_off < 2)
synth_printf("%s %s\n",
label, spk_msg_get(MSG_STATUS_START + on_off));
spk_unlock(flags);
}
static int inc_dec_var(u_char value)
{
struct st_var_header *p_header;
struct var_t *var_data;
char num_buf[32];
char *cp = num_buf;
char *pn;
int var_id = (int)value - VAR_START;
int how = (var_id & 1) ? E_INC : E_DEC;
var_id = var_id / 2 + FIRST_SET_VAR;
p_header = spk_get_var_header(var_id);
if (p_header == NULL)
return -1;
if (p_header->var_type != VAR_NUM)
return -1;
var_data = p_header->data;
if (spk_set_num_var(1, p_header, how) != 0)
return -1;
if (!spk_close_press) {
for (pn = p_header->name; *pn; pn++) {
if (*pn == '_')
*cp = SPACE;
else
*cp++ = *pn;
}
}
snprintf(cp, sizeof(num_buf) - (cp - num_buf), " %d ",
var_data->u.n.value);
synth_printf("%s", num_buf);
return 0;
}
static void speakup_win_set(struct vc_data *vc)
{
char info[40];
if (win_start > 1) {
synth_printf("%s\n", spk_msg_get(MSG_WINDOW_ALREADY_SET));
return;
}
if (spk_x < win_left || spk_y < win_top) {
synth_printf("%s\n", spk_msg_get(MSG_END_BEFORE_START));
return;
}
if (win_start && spk_x == win_left && spk_y == win_top) {
win_left = 0;
win_right = vc->vc_cols - 1;
win_bottom = spk_y;
snprintf(info, sizeof(info), spk_msg_get(MSG_WINDOW_LINE),
(int)win_top + 1);
} else {
if (!win_start) {
win_top = spk_y;
win_left = spk_x;
} else {
win_bottom = spk_y;
win_right = spk_x;
}
snprintf(info, sizeof(info), spk_msg_get(MSG_WINDOW_BOUNDARY),
(win_start) ? spk_msg_get(MSG_END) : spk_msg_get(MSG_START),
(int)spk_y + 1, (int)spk_x + 1);
}
synth_printf("%s\n", info);
win_start++;
}
static void speakup_win_clear(struct vc_data *vc)
{
win_top = win_bottom = 0;
win_left = win_right = 0;
win_start = 0;
synth_printf("%s\n", spk_msg_get(MSG_WINDOW_CLEARED));
}
static void speakup_win_enable(struct vc_data *vc)
{
if (win_start < 2) {
synth_printf("%s\n", spk_msg_get(MSG_NO_WINDOW));
return;
}
win_enabled ^= 1;
if (win_enabled)
synth_printf("%s\n", spk_msg_get(MSG_WINDOW_SILENCED));
else
synth_printf("%s\n", spk_msg_get(MSG_WINDOW_SILENCE_DISABLED));
}
static void speakup_bits(struct vc_data *vc)
{
int val = this_speakup_key - (FIRST_EDIT_BITS - 1);
if (spk_special_handler != NULL || val < 1 || val > 6) {
synth_printf("%s\n", spk_msg_get(MSG_ERROR));
return;
}
pb_edit = &spk_punc_info[val];
synth_printf(spk_msg_get(MSG_EDIT_PROMPT), pb_edit->name);
spk_special_handler = edit_bits;
}
static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key)
{
static u_char goto_buf[8];
static int num;
int maxlen, go_pos;
char *cp;
if (type == KT_SPKUP && ch == SPEAKUP_GOTO)
goto do_goto;
if (type == KT_LATIN && ch == '\n')
goto do_goto;
if (type != 0)
goto oops;
if (ch == 8) {
if (num == 0)
return -1;
ch = goto_buf[--num];
goto_buf[num] = '\0';
spkup_write(&ch, 1);
return 1;
}
if (ch < '+' || ch > 'y')
goto oops;
goto_buf[num++] = ch;
goto_buf[num] = '\0';
spkup_write(&ch, 1);
maxlen = (*goto_buf >= '0') ? 3 : 4;
if ((ch == '+' || ch == '-') && num == 1)
return 1;
if (ch >= '0' && ch <= '9' && num < maxlen)
return 1;
if (num < maxlen - 1 || num > maxlen)
goto oops;
if (ch < 'x' || ch > 'y') {
oops:
if (!spk_killed)
synth_printf(" %s\n", spk_msg_get(MSG_GOTO_CANCELED));
goto_buf[num = 0] = '\0';
spk_special_handler = NULL;
return 1;
}
cp = speakup_s2i(goto_buf, &go_pos);
goto_pos = (u_long) go_pos;
if (*cp == 'x') {
if (*goto_buf < '0')
goto_pos += spk_x;
else
goto_pos--;
if (goto_pos < 0)
goto_pos = 0;
if (goto_pos >= vc->vc_cols)
goto_pos = vc->vc_cols - 1;
goto_x = 1;
} else {
if (*goto_buf < '0')
goto_pos += spk_y;
else
goto_pos--;
if (goto_pos < 0)
goto_pos = 0;
if (goto_pos >= vc->vc_rows)
goto_pos = vc->vc_rows - 1;
goto_x = 0;
}
goto_buf[num = 0] = '\0';
do_goto:
spk_special_handler = NULL;
spk_parked |= 0x01;
if (goto_x) {
spk_pos -= spk_x * 2;
spk_x = goto_pos;
spk_pos += goto_pos * 2;
say_word(vc);
} else {
spk_y = goto_pos;
spk_pos = vc->vc_origin + (goto_pos * vc->vc_size_row);
say_line(vc);
}
return 1;
}
static void speakup_goto(struct vc_data *vc)
{
if (spk_special_handler != NULL) {
synth_printf("%s\n", spk_msg_get(MSG_ERROR));
return;
}
synth_printf("%s\n", spk_msg_get(MSG_GOTO));
spk_special_handler = handle_goto;
return;
}
static void speakup_help(struct vc_data *vc)
{
spk_handle_help(vc, KT_SPKUP, SPEAKUP_HELP, 0);
}
static void do_nothing(struct vc_data *vc)
{
return; /* flush done in do_spkup */
}
static u_char key_speakup, spk_key_locked;
static void speakup_lock(struct vc_data *vc)
{
if (!spk_key_locked)
spk_key_locked = key_speakup = 16;
else
spk_key_locked = key_speakup = 0;
}
typedef void (*spkup_hand) (struct vc_data *);
spkup_hand spkup_handler[] = {
/* must be ordered same as defines in speakup.h */
do_nothing, speakup_goto, speech_kill, speakup_shut_up,
speakup_cut, speakup_paste, say_first_char, say_last_char,
say_char, say_prev_char, say_next_char,
say_word, say_prev_word, say_next_word,
say_line, say_prev_line, say_next_line,
top_edge, bottom_edge, left_edge, right_edge,
spell_word, spell_word, say_screen,
say_position, say_attributes,
speakup_off, speakup_parked, say_line, /* this is for indent */
say_from_top, say_to_bottom,
say_from_left, say_to_right,
say_char_num, speakup_bits, speakup_bits, say_phonetic_char,
speakup_bits, speakup_bits, speakup_bits,
speakup_win_set, speakup_win_clear, speakup_win_enable, speakup_win_say,
speakup_lock, speakup_help, toggle_cursoring, read_all_doc, NULL
};
static void do_spkup(struct vc_data *vc, u_char value)
{
if (spk_killed && value != SPEECH_KILL)
return;
spk_keydown = 0;
spk_lastkey = 0;
spk_shut_up &= 0xfe;
this_speakup_key = value;
if (value < SPKUP_MAX_FUNC && spkup_handler[value]) {
spk_do_flush();
(*spkup_handler[value]) (vc);
} else {
if (inc_dec_var(value) < 0)
bleep(9);
}
}
static const char *pad_chars = "0123456789+-*/\015,.?()";
int
speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
int up_flag)
{
unsigned long flags;
int kh;
u_char *key_info;
u_char type = KTYP(keysym), value = KVAL(keysym), new_key = 0;
u_char shift_info, offset;
int ret = 0;
if (synth == NULL)
return 0;
spk_lock(flags);
tty = vc->port.tty;
if (type >= 0xf0)
type -= 0xf0;
if (type == KT_PAD
&& (vt_get_leds(fg_console, VC_NUMLOCK))) {
if (up_flag) {
spk_keydown = 0;
goto out;
}
value = spk_lastkey = pad_chars[value];
spk_keydown++;
spk_parked &= 0xfe;
goto no_map;
}
if (keycode >= MAX_KEY)
goto no_map;
key_info = spk_our_keys[keycode];
if (key_info == 0)
goto no_map;
/* Check valid read all mode keys */
if ((cursor_track == read_all_mode) && (!up_flag)) {
switch (value) {
case KVAL(K_DOWN):
case KVAL(K_UP):
case KVAL(K_LEFT):
case KVAL(K_RIGHT):
case KVAL(K_PGUP):
case KVAL(K_PGDN):
break;
default:
stop_read_all(vc);
break;
}
}
shift_info = (shift_state & 0x0f) + key_speakup;
offset = spk_shift_table[shift_info];
if (offset) {
new_key = key_info[offset];
if (new_key) {
ret = 1;
if (new_key == SPK_KEY) {
if (!spk_key_locked)
key_speakup = (up_flag) ? 0 : 16;
if (up_flag || spk_killed)
goto out;
spk_shut_up &= 0xfe;
spk_do_flush();
goto out;
}
if (up_flag)
goto out;
if (last_keycode == keycode &&
last_spk_jiffy + MAX_DELAY > jiffies) {
spk_close_press = 1;
offset = spk_shift_table[shift_info + 32];
/* double press? */
if (offset && key_info[offset])
new_key = key_info[offset];
}
last_keycode = keycode;
last_spk_jiffy = jiffies;
type = KT_SPKUP;
value = new_key;
}
}
no_map:
if (type == KT_SPKUP && spk_special_handler == NULL) {
do_spkup(vc, new_key);
spk_close_press = 0;
ret = 1;
goto out;
}
if (up_flag || spk_killed || type == KT_SHIFT)
goto out;
spk_shut_up &= 0xfe;
kh = (value == KVAL(K_DOWN))
|| (value == KVAL(K_UP))
|| (value == KVAL(K_LEFT))
|| (value == KVAL(K_RIGHT));
if ((cursor_track != read_all_mode) || !kh)
if (!spk_no_intr)
spk_do_flush();
if (spk_special_handler) {
if (type == KT_SPEC && value == 1) {
value = '\n';
type = KT_LATIN;
} else if (type == KT_LETTER)
type = KT_LATIN;
else if (value == 0x7f)
value = 8; /* make del = backspace */
ret = (*spk_special_handler) (vc, type, value, keycode);
spk_close_press = 0;
if (ret < 0)
bleep(9);
goto out;
}
last_keycode = 0;
out:
spk_unlock(flags);
return ret;
}
static int keyboard_notifier_call(struct notifier_block *nb,
unsigned long code, void *_param)
{
struct keyboard_notifier_param *param = _param;
struct vc_data *vc = param->vc;
int up = !param->down;
int ret = NOTIFY_OK;
static int keycode; /* to hold the current keycode */
if (vc->vc_mode == KD_GRAPHICS)
return ret;
/*
* First, determine whether we are handling a fake keypress on
* the current processor. If we are, then return NOTIFY_OK,
* to pass the keystroke up the chain. This prevents us from
* trying to take the Speakup lock while it is held by the
* processor on which the simulated keystroke was generated.
* Also, the simulated keystrokes should be ignored by Speakup.
*/
if (speakup_fake_key_pressed())
return ret;
switch (code) {
case KBD_KEYCODE:
/* speakup requires keycode and keysym currently */
keycode = param->value;
break;
case KBD_UNBOUND_KEYCODE:
/* not used yet */
break;
case KBD_UNICODE:
/* not used yet */
break;
case KBD_KEYSYM:
if (speakup_key(vc, param->shift, keycode, param->value, up))
ret = NOTIFY_STOP;
else if (KTYP(param->value) == KT_CUR)
ret = pre_handle_cursor(vc, KVAL(param->value), up);
break;
case KBD_POST_KEYSYM:{
unsigned char type = KTYP(param->value) - 0xf0;
unsigned char val = KVAL(param->value);
switch (type) {
case KT_SHIFT:
do_handle_shift(vc, val, up);
break;
case KT_LATIN:
case KT_LETTER:
do_handle_latin(vc, val, up);
break;
case KT_CUR:
do_handle_cursor(vc, val, up);
break;
case KT_SPEC:
do_handle_spec(vc, val, up);
break;
}
break;
}
}
return ret;
}
static int vt_notifier_call(struct notifier_block *nb,
unsigned long code, void *_param)
{
struct vt_notifier_param *param = _param;
struct vc_data *vc = param->vc;
switch (code) {
case VT_ALLOCATE:
if (vc->vc_mode == KD_TEXT)
speakup_allocate(vc);
break;
case VT_DEALLOCATE:
speakup_deallocate(vc);
break;
case VT_WRITE:
if (param->c == '\b')
speakup_bs(vc);
else if (param->c < 0x100) {
char d = param->c;
speakup_con_write(vc, &d, 1);
}
break;
case VT_UPDATE:
speakup_con_update(vc);
break;
}
return NOTIFY_OK;
}
/* called by: module_exit() */
static void __exit speakup_exit(void)
{
int i;
unregister_keyboard_notifier(&keyboard_notifier_block);
unregister_vt_notifier(&vt_notifier_block);
speakup_unregister_devsynth();
speakup_cancel_paste();
del_timer(&cursor_timer);
kthread_stop(speakup_task);
speakup_task = NULL;
mutex_lock(&spk_mutex);
synth_release();
mutex_unlock(&spk_mutex);
speakup_kobj_exit();
for (i = 0; i < MAX_NR_CONSOLES; i++)
kfree(speakup_console[i]);
speakup_remove_virtual_keyboard();
for (i = 0; i < MAXVARS; i++)
speakup_unregister_var(i);
for (i = 0; i < 256; i++) {
if (spk_characters[i] != spk_default_chars[i])
kfree(spk_characters[i]);
}
spk_free_user_msgs();
}
/* call by: module_init() */
static int __init speakup_init(void)
{
int i;
long err = 0;
struct st_spk_t *first_console;
struct vc_data *vc = vc_cons[fg_console].d;
struct var_t *var;
/* These first few initializations cannot fail. */
spk_initialize_msgs(); /* Initialize arrays for i18n. */
spk_reset_default_chars();
spk_reset_default_chartab();
spk_strlwr(synth_name);
spk_vars[0].u.n.high = vc->vc_cols;
for (var = spk_vars; var->var_id != MAXVARS; var++)
speakup_register_var(var);
for (var = synth_time_vars;
(var->var_id >= 0) && (var->var_id < MAXVARS); var++)
speakup_register_var(var);
for (i = 1; spk_punc_info[i].mask != 0; i++)
spk_set_mask_bits(0, i, 2);
spk_set_key_info(spk_key_defaults, spk_key_buf);
/* From here on out, initializations can fail. */
err = speakup_add_virtual_keyboard();
if (err)
goto error_virtkeyboard;
first_console = kzalloc(sizeof(*first_console), GFP_KERNEL);
if (!first_console) {
err = -ENOMEM;
goto error_alloc;
}
speakup_console[vc->vc_num] = first_console;
speakup_date(vc);
for (i = 0; i < MAX_NR_CONSOLES; i++)
if (vc_cons[i].d) {
err = speakup_allocate(vc_cons[i].d);
if (err)
goto error_kobjects;
}
if (spk_quiet_boot)
spk_shut_up |= 0x01;
err = speakup_kobj_init();
if (err)
goto error_kobjects;
synth_init(synth_name);
speakup_register_devsynth();
/*
* register_devsynth might fail, but this error is not fatal.
* /dev/synth is an extra feature; the rest of Speakup
* will work fine without it.
*/
err = register_keyboard_notifier(&keyboard_notifier_block);
if (err)
goto error_kbdnotifier;
err = register_vt_notifier(&vt_notifier_block);
if (err)
goto error_vtnotifier;
speakup_task = kthread_create(speakup_thread, NULL, "speakup");
if (IS_ERR(speakup_task)) {
err = PTR_ERR(speakup_task);
goto error_task;
}
set_user_nice(speakup_task, 10);
wake_up_process(speakup_task);
pr_info("speakup %s: initialized\n", SPEAKUP_VERSION);
pr_info("synth name on entry is: %s\n", synth_name);
goto out;
error_task:
unregister_vt_notifier(&vt_notifier_block);
error_vtnotifier:
unregister_keyboard_notifier(&keyboard_notifier_block);
del_timer(&cursor_timer);
error_kbdnotifier:
speakup_unregister_devsynth();
mutex_lock(&spk_mutex);
synth_release();
mutex_unlock(&spk_mutex);
speakup_kobj_exit();
error_kobjects:
for (i = 0; i < MAX_NR_CONSOLES; i++)
kfree(speakup_console[i]);
error_alloc:
speakup_remove_virtual_keyboard();
error_virtkeyboard:
for (i = 0; i < MAXVARS; i++)
speakup_unregister_var(i);
for (i = 0; i < 256; i++) {
if (spk_characters[i] != spk_default_chars[i])
kfree(spk_characters[i]);
}
spk_free_user_msgs();
out:
return err;
}
module_init(speakup_init);
module_exit(speakup_exit);
| gpl-2.0 |
AOSP-TEAM/kernel_i9100g | drivers/gpu/drm/sis/sis_mm.c | 1536 | 8491 | /**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#include "sis_drm.h"
#include "sis_drv.h"
#include <video/sisfb.h>
#define VIDEO_TYPE 0
#define AGP_TYPE 1
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
/* fb management via fb device */
#define SIS_MM_ALIGN_SHIFT 0
#define SIS_MM_ALIGN_MASK 0
static void *sis_sman_mm_allocate(void *private, unsigned long size,
unsigned alignment)
{
struct sis_memreq req;
req.size = size;
sis_malloc(&req);
if (req.size == 0)
return NULL;
else
return (void *)(unsigned long)~req.offset;
}
static void sis_sman_mm_free(void *private, void *ref)
{
sis_free(~((unsigned long)ref));
}
static void sis_sman_mm_destroy(void *private)
{
;
}
static unsigned long sis_sman_mm_offset(void *private, void *ref)
{
return ~((unsigned long)ref);
}
#else /* CONFIG_FB_SIS[_MODULE] */
#define SIS_MM_ALIGN_SHIFT 4
#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1)
#endif /* CONFIG_FB_SIS[_MODULE] */
static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_fb_t *fb = data;
int ret;
mutex_lock(&dev->struct_mutex);
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
{
struct drm_sman_mm sman_mm;
sman_mm.private = (void *)0xFFFFFFFF;
sman_mm.allocate = sis_sman_mm_allocate;
sman_mm.free = sis_sman_mm_free;
sman_mm.destroy = sis_sman_mm_destroy;
sman_mm.offset = sis_sman_mm_offset;
ret =
drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
}
#else
ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
fb->size >> SIS_MM_ALIGN_SHIFT);
#endif
if (ret) {
DRM_ERROR("VRAM memory manager initialisation error\n");
mutex_unlock(&dev->struct_mutex);
return ret;
}
dev_priv->vram_initialized = 1;
dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
return 0;
}
static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
void *data, int pool)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *mem = data;
int retval = 0;
struct drm_memblock_item *item;
mutex_lock(&dev->struct_mutex);
if (0 == ((pool == 0) ? dev_priv->vram_initialized :
dev_priv->agp_initialized)) {
DRM_ERROR
("Attempt to allocate from uninitialized memory manager.\n");
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
(unsigned long)file_priv);
mutex_unlock(&dev->struct_mutex);
if (item) {
mem->offset = ((pool == 0) ?
dev_priv->vram_offset : dev_priv->agp_offset) +
(item->mm->
offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
mem->free = item->user_hash.key;
mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
} else {
mem->offset = 0;
mem->size = 0;
mem->free = 0;
retval = -ENOMEM;
}
DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
mem->offset);
return retval;
}
static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *mem = data;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm_sman_free_key(&dev_priv->sman, mem->free);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("free = 0x%lx\n", mem->free);
return ret;
}
static int sis_fb_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE);
}
static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_agp_t *agp = data;
int ret;
dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
agp->size >> SIS_MM_ALIGN_SHIFT);
if (ret) {
DRM_ERROR("AGP memory manager initialisation error\n");
mutex_unlock(&dev->struct_mutex);
return ret;
}
dev_priv->agp_initialized = 1;
dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
return 0;
}
static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return sis_drm_alloc(dev, file_priv, data, AGP_TYPE);
}
static drm_local_map_t *sis_reg_init(struct drm_device *dev)
{
struct drm_map_list *entry;
drm_local_map_t *map;
list_for_each_entry(entry, &dev->maplist, head) {
map = entry->map;
if (!map)
continue;
if (map->type == _DRM_REGISTERS) {
return map;
}
}
return NULL;
}
int sis_idle(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
uint32_t idle_reg;
unsigned long end;
int i;
if (dev_priv->idle_fault)
return 0;
if (dev_priv->mmio == NULL) {
dev_priv->mmio = sis_reg_init(dev);
if (dev_priv->mmio == NULL) {
DRM_ERROR("Could not find register map.\n");
return 0;
}
}
/*
* Implement a device switch here if needed
*/
if (dev_priv->chipset != SIS_CHIP_315)
return 0;
/*
* Timeout after 3 seconds. We cannot use DRM_WAIT_ON here
* because its polling frequency is too low.
*/
end = jiffies + (DRM_HZ * 3);
for (i=0; i<4; ++i) {
do {
idle_reg = SIS_READ(0x85cc);
} while ( !time_after_eq(jiffies, end) &&
((idle_reg & 0x80000000) != 0x80000000));
}
if (time_after_eq(jiffies, end)) {
DRM_ERROR("Graphics engine idle timeout. "
"Disabling idle check\n");
dev_priv->idle_fault = 1;
}
/*
* The caller never sees an error code. It gets trapped
* in libdrm.
*/
return 0;
}
void sis_lastclose(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
if (!dev_priv)
return;
mutex_lock(&dev->struct_mutex);
drm_sman_cleanup(&dev_priv->sman);
dev_priv->vram_initialized = 0;
dev_priv->agp_initialized = 0;
dev_priv->mmio = NULL;
mutex_unlock(&dev->struct_mutex);
}
void sis_reclaim_buffers_locked(struct drm_device * dev,
struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
mutex_unlock(&dev->struct_mutex);
return;
}
if (dev->driver->dma_quiescent) {
dev->driver->dma_quiescent(dev);
}
drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
mutex_unlock(&dev->struct_mutex);
return;
}
struct drm_ioctl_desc sis_ioctls[] = {
DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
};
int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
| gpl-2.0 |
tjstyle/FIH-Kernel | drivers/gpu/drm/sis/sis_mm.c | 1536 | 8491 | /**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#include "sis_drm.h"
#include "sis_drv.h"
#include <video/sisfb.h>
#define VIDEO_TYPE 0
#define AGP_TYPE 1
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
/* fb management via fb device */
#define SIS_MM_ALIGN_SHIFT 0
#define SIS_MM_ALIGN_MASK 0
static void *sis_sman_mm_allocate(void *private, unsigned long size,
unsigned alignment)
{
struct sis_memreq req;
req.size = size;
sis_malloc(&req);
if (req.size == 0)
return NULL;
else
return (void *)(unsigned long)~req.offset;
}
static void sis_sman_mm_free(void *private, void *ref)
{
sis_free(~((unsigned long)ref));
}
static void sis_sman_mm_destroy(void *private)
{
;
}
static unsigned long sis_sman_mm_offset(void *private, void *ref)
{
return ~((unsigned long)ref);
}
#else /* CONFIG_FB_SIS[_MODULE] */
#define SIS_MM_ALIGN_SHIFT 4
#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1)
#endif /* CONFIG_FB_SIS[_MODULE] */
static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_fb_t *fb = data;
int ret;
mutex_lock(&dev->struct_mutex);
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
{
struct drm_sman_mm sman_mm;
sman_mm.private = (void *)0xFFFFFFFF;
sman_mm.allocate = sis_sman_mm_allocate;
sman_mm.free = sis_sman_mm_free;
sman_mm.destroy = sis_sman_mm_destroy;
sman_mm.offset = sis_sman_mm_offset;
ret =
drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
}
#else
ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
fb->size >> SIS_MM_ALIGN_SHIFT);
#endif
if (ret) {
DRM_ERROR("VRAM memory manager initialisation error\n");
mutex_unlock(&dev->struct_mutex);
return ret;
}
dev_priv->vram_initialized = 1;
dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
return 0;
}
static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
void *data, int pool)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *mem = data;
int retval = 0;
struct drm_memblock_item *item;
mutex_lock(&dev->struct_mutex);
if (0 == ((pool == 0) ? dev_priv->vram_initialized :
dev_priv->agp_initialized)) {
DRM_ERROR
("Attempt to allocate from uninitialized memory manager.\n");
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
(unsigned long)file_priv);
mutex_unlock(&dev->struct_mutex);
if (item) {
mem->offset = ((pool == 0) ?
dev_priv->vram_offset : dev_priv->agp_offset) +
(item->mm->
offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
mem->free = item->user_hash.key;
mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
} else {
mem->offset = 0;
mem->size = 0;
mem->free = 0;
retval = -ENOMEM;
}
DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
mem->offset);
return retval;
}
static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *mem = data;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm_sman_free_key(&dev_priv->sman, mem->free);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("free = 0x%lx\n", mem->free);
return ret;
}
static int sis_fb_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE);
}
static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_agp_t *agp = data;
int ret;
dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
agp->size >> SIS_MM_ALIGN_SHIFT);
if (ret) {
DRM_ERROR("AGP memory manager initialisation error\n");
mutex_unlock(&dev->struct_mutex);
return ret;
}
dev_priv->agp_initialized = 1;
dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
return 0;
}
static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return sis_drm_alloc(dev, file_priv, data, AGP_TYPE);
}
static drm_local_map_t *sis_reg_init(struct drm_device *dev)
{
struct drm_map_list *entry;
drm_local_map_t *map;
list_for_each_entry(entry, &dev->maplist, head) {
map = entry->map;
if (!map)
continue;
if (map->type == _DRM_REGISTERS) {
return map;
}
}
return NULL;
}
int sis_idle(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
uint32_t idle_reg;
unsigned long end;
int i;
if (dev_priv->idle_fault)
return 0;
if (dev_priv->mmio == NULL) {
dev_priv->mmio = sis_reg_init(dev);
if (dev_priv->mmio == NULL) {
DRM_ERROR("Could not find register map.\n");
return 0;
}
}
/*
* Implement a device switch here if needed
*/
if (dev_priv->chipset != SIS_CHIP_315)
return 0;
/*
* Timeout after 3 seconds. We cannot use DRM_WAIT_ON here
* because its polling frequency is too low.
*/
end = jiffies + (DRM_HZ * 3);
for (i=0; i<4; ++i) {
do {
idle_reg = SIS_READ(0x85cc);
} while ( !time_after_eq(jiffies, end) &&
((idle_reg & 0x80000000) != 0x80000000));
}
if (time_after_eq(jiffies, end)) {
DRM_ERROR("Graphics engine idle timeout. "
"Disabling idle check\n");
dev_priv->idle_fault = 1;
}
/*
* The caller never sees an error code. It gets trapped
* in libdrm.
*/
return 0;
}
void sis_lastclose(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
if (!dev_priv)
return;
mutex_lock(&dev->struct_mutex);
drm_sman_cleanup(&dev_priv->sman);
dev_priv->vram_initialized = 0;
dev_priv->agp_initialized = 0;
dev_priv->mmio = NULL;
mutex_unlock(&dev->struct_mutex);
}
void sis_reclaim_buffers_locked(struct drm_device * dev,
struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
mutex_unlock(&dev->struct_mutex);
return;
}
if (dev->driver->dma_quiescent) {
dev->driver->dma_quiescent(dev);
}
drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
mutex_unlock(&dev->struct_mutex);
return;
}
struct drm_ioctl_desc sis_ioctls[] = {
DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
};
int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
| gpl-2.0 |
fortunave3gxx/android_kernel_samsung_fortuna-common-old | drivers/base/regmap/regmap-spi.c | 2304 | 3812 | /*
* Register map access API - SPI support
*
* Copyright 2011 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/init.h>
#include <linux/module.h>
#include "internal.h"
struct regmap_async_spi {
struct regmap_async core;
struct spi_message m;
struct spi_transfer t[2];
};
static void regmap_spi_complete(void *data)
{
struct regmap_async_spi *async = data;
regmap_async_complete_cb(&async->core, async->m.status);
}
static int regmap_spi_write(void *context, const void *data, size_t count)
{
struct device *dev = context;
struct spi_device *spi = to_spi_device(dev);
return spi_write(spi, data, count);
}
static int regmap_spi_gather_write(void *context,
const void *reg, size_t reg_len,
const void *val, size_t val_len)
{
struct device *dev = context;
struct spi_device *spi = to_spi_device(dev);
struct spi_message m;
struct spi_transfer t[2] = { { .tx_buf = reg, .len = reg_len, },
{ .tx_buf = val, .len = val_len, }, };
spi_message_init(&m);
spi_message_add_tail(&t[0], &m);
spi_message_add_tail(&t[1], &m);
return spi_sync(spi, &m);
}
static int regmap_spi_async_write(void *context,
const void *reg, size_t reg_len,
const void *val, size_t val_len,
struct regmap_async *a)
{
struct regmap_async_spi *async = container_of(a,
struct regmap_async_spi,
core);
struct device *dev = context;
struct spi_device *spi = to_spi_device(dev);
async->t[0].tx_buf = reg;
async->t[0].len = reg_len;
async->t[1].tx_buf = val;
async->t[1].len = val_len;
spi_message_init(&async->m);
spi_message_add_tail(&async->t[0], &async->m);
spi_message_add_tail(&async->t[1], &async->m);
async->m.complete = regmap_spi_complete;
async->m.context = async;
return spi_async(spi, &async->m);
}
static struct regmap_async *regmap_spi_async_alloc(void)
{
struct regmap_async_spi *async_spi;
async_spi = kzalloc(sizeof(*async_spi), GFP_KERNEL);
if (!async_spi)
return NULL;
return &async_spi->core;
}
static int regmap_spi_read(void *context,
const void *reg, size_t reg_size,
void *val, size_t val_size)
{
struct device *dev = context;
struct spi_device *spi = to_spi_device(dev);
return spi_write_then_read(spi, reg, reg_size, val, val_size);
}
static struct regmap_bus regmap_spi = {
.write = regmap_spi_write,
.gather_write = regmap_spi_gather_write,
.async_write = regmap_spi_async_write,
.async_alloc = regmap_spi_async_alloc,
.read = regmap_spi_read,
.read_flag_mask = 0x80,
};
/**
* regmap_init_spi(): Initialise register map
*
* @spi: Device that will be interacted with
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
* a struct regmap.
*/
struct regmap *regmap_init_spi(struct spi_device *spi,
const struct regmap_config *config)
{
return regmap_init(&spi->dev, ®map_spi, &spi->dev, config);
}
EXPORT_SYMBOL_GPL(regmap_init_spi);
/**
* devm_regmap_init_spi(): Initialise register map
*
* @spi: Device that will be interacted with
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer
* to a struct regmap. The map will be automatically freed by the
* device management code.
*/
struct regmap *devm_regmap_init_spi(struct spi_device *spi,
const struct regmap_config *config)
{
return devm_regmap_init(&spi->dev, ®map_spi, &spi->dev, config);
}
EXPORT_SYMBOL_GPL(devm_regmap_init_spi);
MODULE_LICENSE("GPL");
| gpl-2.0 |
ptmr3/smdk4412 | drivers/mmc/host/wbsd.c | 2816 | 40832 | /*
* linux/drivers/mmc/host/wbsd.c - Winbond W83L51xD SD/MMC driver
*
* Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
*
* Warning!
*
* Changes to the FIFO system should be done with extreme care since
* the hardware is full of bugs related to the FIFO. Known issues are:
*
* - FIFO size field in FSR is always zero.
*
* - FIFO interrupts tend not to work as they should. Interrupts are
* triggered only for full/empty events, not for threshold values.
*
* - On APIC systems the FIFO empty interrupt is sometimes lost.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/pnp.h>
#include <linux/highmem.h>
#include <linux/mmc/host.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/dma.h>
#include "wbsd.h"
#define DRIVER_NAME "wbsd"
#define DBG(x...) \
pr_debug(DRIVER_NAME ": " x)
#define DBGF(f, x...) \
pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
/*
* Device resources
*/
#ifdef CONFIG_PNP
static const struct pnp_device_id pnp_dev_table[] = {
{ "WEC0517", 0 },
{ "WEC0518", 0 },
{ "", 0 },
};
MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
#endif /* CONFIG_PNP */
static const int config_ports[] = { 0x2E, 0x4E };
static const int unlock_codes[] = { 0x83, 0x87 };
static const int valid_ids[] = {
0x7112,
};
#ifdef CONFIG_PNP
static unsigned int param_nopnp = 0;
#else
static const unsigned int param_nopnp = 1;
#endif
static unsigned int param_io = 0x248;
static unsigned int param_irq = 6;
static int param_dma = 2;
/*
* Basic functions
*/
static inline void wbsd_unlock_config(struct wbsd_host *host)
{
BUG_ON(host->config == 0);
outb(host->unlock_code, host->config);
outb(host->unlock_code, host->config);
}
static inline void wbsd_lock_config(struct wbsd_host *host)
{
BUG_ON(host->config == 0);
outb(LOCK_CODE, host->config);
}
static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value)
{
BUG_ON(host->config == 0);
outb(reg, host->config);
outb(value, host->config + 1);
}
static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg)
{
BUG_ON(host->config == 0);
outb(reg, host->config);
return inb(host->config + 1);
}
static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value)
{
outb(index, host->base + WBSD_IDXR);
outb(value, host->base + WBSD_DATAR);
}
static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index)
{
outb(index, host->base + WBSD_IDXR);
return inb(host->base + WBSD_DATAR);
}
/*
* Common routines
*/
static void wbsd_init_device(struct wbsd_host *host)
{
u8 setup, ier;
/*
* Reset chip (SD/MMC part) and fifo.
*/
setup = wbsd_read_index(host, WBSD_IDX_SETUP);
setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
wbsd_write_index(host, WBSD_IDX_SETUP, setup);
/*
* Set DAT3 to input
*/
setup &= ~WBSD_DAT3_H;
wbsd_write_index(host, WBSD_IDX_SETUP, setup);
host->flags &= ~WBSD_FIGNORE_DETECT;
/*
* Read back default clock.
*/
host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
/*
* Power down port.
*/
outb(WBSD_POWER_N, host->base + WBSD_CSR);
/*
* Set maximum timeout.
*/
wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
/*
* Test for card presence
*/
if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT)
host->flags |= WBSD_FCARD_PRESENT;
else
host->flags &= ~WBSD_FCARD_PRESENT;
/*
* Enable interesting interrupts.
*/
ier = 0;
ier |= WBSD_EINT_CARD;
ier |= WBSD_EINT_FIFO_THRE;
ier |= WBSD_EINT_CRC;
ier |= WBSD_EINT_TIMEOUT;
ier |= WBSD_EINT_TC;
outb(ier, host->base + WBSD_EIR);
/*
* Clear interrupts.
*/
inb(host->base + WBSD_ISR);
}
static void wbsd_reset(struct wbsd_host *host)
{
u8 setup;
printk(KERN_ERR "%s: Resetting chip\n", mmc_hostname(host->mmc));
/*
* Soft reset of chip (SD/MMC part).
*/
setup = wbsd_read_index(host, WBSD_IDX_SETUP);
setup |= WBSD_SOFT_RESET;
wbsd_write_index(host, WBSD_IDX_SETUP, setup);
}
static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq)
{
unsigned long dmaflags;
if (host->dma >= 0) {
/*
* Release ISA DMA controller.
*/
dmaflags = claim_dma_lock();
disable_dma(host->dma);
clear_dma_ff(host->dma);
release_dma_lock(dmaflags);
/*
* Disable DMA on host.
*/
wbsd_write_index(host, WBSD_IDX_DMA, 0);
}
host->mrq = NULL;
/*
* MMC layer might call back into the driver so first unlock.
*/
spin_unlock(&host->lock);
mmc_request_done(host->mmc, mrq);
spin_lock(&host->lock);
}
/*
* Scatter/gather functions
*/
static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data)
{
/*
* Get info. about SG list from data structure.
*/
host->cur_sg = data->sg;
host->num_sg = data->sg_len;
host->offset = 0;
host->remain = host->cur_sg->length;
}
static inline int wbsd_next_sg(struct wbsd_host *host)
{
/*
* Skip to next SG entry.
*/
host->cur_sg++;
host->num_sg--;
/*
* Any entries left?
*/
if (host->num_sg > 0) {
host->offset = 0;
host->remain = host->cur_sg->length;
}
return host->num_sg;
}
static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
{
return sg_virt(host->cur_sg);
}
static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
{
unsigned int len, i;
struct scatterlist *sg;
char *dmabuf = host->dma_buffer;
char *sgbuf;
sg = data->sg;
len = data->sg_len;
for (i = 0; i < len; i++) {
sgbuf = sg_virt(&sg[i]);
memcpy(dmabuf, sgbuf, sg[i].length);
dmabuf += sg[i].length;
}
}
static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
{
unsigned int len, i;
struct scatterlist *sg;
char *dmabuf = host->dma_buffer;
char *sgbuf;
sg = data->sg;
len = data->sg_len;
for (i = 0; i < len; i++) {
sgbuf = sg_virt(&sg[i]);
memcpy(sgbuf, dmabuf, sg[i].length);
dmabuf += sg[i].length;
}
}
/*
* Command handling
*/
static inline void wbsd_get_short_reply(struct wbsd_host *host,
struct mmc_command *cmd)
{
/*
* Correct response type?
*/
if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) {
cmd->error = -EILSEQ;
return;
}
cmd->resp[0] = wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
cmd->resp[1] = wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
}
static inline void wbsd_get_long_reply(struct wbsd_host *host,
struct mmc_command *cmd)
{
int i;
/*
* Correct response type?
*/
if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) {
cmd->error = -EILSEQ;
return;
}
for (i = 0; i < 4; i++) {
cmd->resp[i] =
wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
cmd->resp[i] |=
wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
cmd->resp[i] |=
wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
cmd->resp[i] |=
wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
}
}
static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd)
{
int i;
u8 status, isr;
/*
* Clear accumulated ISR. The interrupt routine
* will fill this one with events that occur during
* transfer.
*/
host->isr = 0;
/*
* Send the command (CRC calculated by host).
*/
outb(cmd->opcode, host->base + WBSD_CMDR);
for (i = 3; i >= 0; i--)
outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
cmd->error = 0;
/*
* Wait for the request to complete.
*/
do {
status = wbsd_read_index(host, WBSD_IDX_STATUS);
} while (status & WBSD_CARDTRAFFIC);
/*
* Do we expect a reply?
*/
if (cmd->flags & MMC_RSP_PRESENT) {
/*
* Read back status.
*/
isr = host->isr;
/* Card removed? */
if (isr & WBSD_INT_CARD)
cmd->error = -ENOMEDIUM;
/* Timeout? */
else if (isr & WBSD_INT_TIMEOUT)
cmd->error = -ETIMEDOUT;
/* CRC? */
else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
cmd->error = -EILSEQ;
/* All ok */
else {
if (cmd->flags & MMC_RSP_136)
wbsd_get_long_reply(host, cmd);
else
wbsd_get_short_reply(host, cmd);
}
}
}
/*
* Data functions
*/
static void wbsd_empty_fifo(struct wbsd_host *host)
{
struct mmc_data *data = host->mrq->cmd->data;
char *buffer;
int i, fsr, fifo;
/*
* Handle excessive data.
*/
if (host->num_sg == 0)
return;
buffer = wbsd_sg_to_buffer(host) + host->offset;
/*
* Drain the fifo. This has a tendency to loop longer
* than the FIFO length (usually one block).
*/
while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) {
/*
* The size field in the FSR is broken so we have to
* do some guessing.
*/
if (fsr & WBSD_FIFO_FULL)
fifo = 16;
else if (fsr & WBSD_FIFO_FUTHRE)
fifo = 8;
else
fifo = 1;
for (i = 0; i < fifo; i++) {
*buffer = inb(host->base + WBSD_DFR);
buffer++;
host->offset++;
host->remain--;
data->bytes_xfered++;
/*
* End of scatter list entry?
*/
if (host->remain == 0) {
/*
* Get next entry. Check if last.
*/
if (!wbsd_next_sg(host))
return;
buffer = wbsd_sg_to_buffer(host);
}
}
}
/*
* This is a very dirty hack to solve a
* hardware problem. The chip doesn't trigger
* FIFO threshold interrupts properly.
*/
if ((data->blocks * data->blksz - data->bytes_xfered) < 16)
tasklet_schedule(&host->fifo_tasklet);
}
static void wbsd_fill_fifo(struct wbsd_host *host)
{
struct mmc_data *data = host->mrq->cmd->data;
char *buffer;
int i, fsr, fifo;
/*
* Check that we aren't being called after the
* entire buffer has been transferred.
*/
if (host->num_sg == 0)
return;
buffer = wbsd_sg_to_buffer(host) + host->offset;
/*
* Fill the fifo. This has a tendency to loop longer
* than the FIFO length (usually one block).
*/
while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) {
/*
* The size field in the FSR is broken so we have to
* do some guessing.
*/
if (fsr & WBSD_FIFO_EMPTY)
fifo = 0;
else if (fsr & WBSD_FIFO_EMTHRE)
fifo = 8;
else
fifo = 15;
for (i = 16; i > fifo; i--) {
outb(*buffer, host->base + WBSD_DFR);
buffer++;
host->offset++;
host->remain--;
data->bytes_xfered++;
/*
* End of scatter list entry?
*/
if (host->remain == 0) {
/*
* Get next entry. Check if last.
*/
if (!wbsd_next_sg(host))
return;
buffer = wbsd_sg_to_buffer(host);
}
}
}
/*
* The controller stops sending interrupts for
* 'FIFO empty' under certain conditions. So we
* need to be a bit more pro-active.
*/
tasklet_schedule(&host->fifo_tasklet);
}
static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
{
u16 blksize;
u8 setup;
unsigned long dmaflags;
unsigned int size;
/*
* Calculate size.
*/
size = data->blocks * data->blksz;
/*
* Check timeout values for overflow.
* (Yes, some cards cause this value to overflow).
*/
if (data->timeout_ns > 127000000)
wbsd_write_index(host, WBSD_IDX_TAAC, 127);
else {
wbsd_write_index(host, WBSD_IDX_TAAC,
data->timeout_ns / 1000000);
}
if (data->timeout_clks > 255)
wbsd_write_index(host, WBSD_IDX_NSAC, 255);
else
wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
/*
* Inform the chip of how large blocks will be
* sent. It needs this to determine when to
* calculate CRC.
*
* Space for CRC must be included in the size.
* Two bytes are needed for each data line.
*/
if (host->bus_width == MMC_BUS_WIDTH_1) {
blksize = data->blksz + 2;
wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
} else if (host->bus_width == MMC_BUS_WIDTH_4) {
blksize = data->blksz + 2 * 4;
wbsd_write_index(host, WBSD_IDX_PBSMSB,
((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
} else {
data->error = -EINVAL;
return;
}
/*
* Clear the FIFO. This is needed even for DMA
* transfers since the chip still uses the FIFO
* internally.
*/
setup = wbsd_read_index(host, WBSD_IDX_SETUP);
setup |= WBSD_FIFO_RESET;
wbsd_write_index(host, WBSD_IDX_SETUP, setup);
/*
* DMA transfer?
*/
if (host->dma >= 0) {
/*
* The buffer for DMA is only 64 kB.
*/
BUG_ON(size > 0x10000);
if (size > 0x10000) {
data->error = -EINVAL;
return;
}
/*
* Transfer data from the SG list to
* the DMA buffer.
*/
if (data->flags & MMC_DATA_WRITE)
wbsd_sg_to_dma(host, data);
/*
* Initialise the ISA DMA controller.
*/
dmaflags = claim_dma_lock();
disable_dma(host->dma);
clear_dma_ff(host->dma);
if (data->flags & MMC_DATA_READ)
set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
else
set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
set_dma_addr(host->dma, host->dma_addr);
set_dma_count(host->dma, size);
enable_dma(host->dma);
release_dma_lock(dmaflags);
/*
* Enable DMA on the host.
*/
wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
} else {
/*
* This flag is used to keep printk
* output to a minimum.
*/
host->firsterr = 1;
/*
* Initialise the SG list.
*/
wbsd_init_sg(host, data);
/*
* Turn off DMA.
*/
wbsd_write_index(host, WBSD_IDX_DMA, 0);
/*
* Set up FIFO threshold levels (and fill
* buffer if doing a write).
*/
if (data->flags & MMC_DATA_READ) {
wbsd_write_index(host, WBSD_IDX_FIFOEN,
WBSD_FIFOEN_FULL | 8);
} else {
wbsd_write_index(host, WBSD_IDX_FIFOEN,
WBSD_FIFOEN_EMPTY | 8);
wbsd_fill_fifo(host);
}
}
data->error = 0;
}
static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data)
{
unsigned long dmaflags;
int count;
u8 status;
WARN_ON(host->mrq == NULL);
/*
* Send a stop command if needed.
*/
if (data->stop)
wbsd_send_command(host, data->stop);
/*
* Wait for the controller to leave data
* transfer state.
*/
do {
status = wbsd_read_index(host, WBSD_IDX_STATUS);
} while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
/*
* DMA transfer?
*/
if (host->dma >= 0) {
/*
* Disable DMA on the host.
*/
wbsd_write_index(host, WBSD_IDX_DMA, 0);
/*
* Turn of ISA DMA controller.
*/
dmaflags = claim_dma_lock();
disable_dma(host->dma);
clear_dma_ff(host->dma);
count = get_dma_residue(host->dma);
release_dma_lock(dmaflags);
data->bytes_xfered = host->mrq->data->blocks *
host->mrq->data->blksz - count;
data->bytes_xfered -= data->bytes_xfered % data->blksz;
/*
* Any leftover data?
*/
if (count) {
printk(KERN_ERR "%s: Incomplete DMA transfer. "
"%d bytes left.\n",
mmc_hostname(host->mmc), count);
if (!data->error)
data->error = -EIO;
} else {
/*
* Transfer data from DMA buffer to
* SG list.
*/
if (data->flags & MMC_DATA_READ)
wbsd_dma_to_sg(host, data);
}
if (data->error) {
if (data->bytes_xfered)
data->bytes_xfered -= data->blksz;
}
}
wbsd_request_end(host, host->mrq);
}
/*****************************************************************************\
* *
* MMC layer callbacks *
* *
\*****************************************************************************/
static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct wbsd_host *host = mmc_priv(mmc);
struct mmc_command *cmd;
/*
* Disable tasklets to avoid a deadlock.
*/
spin_lock_bh(&host->lock);
BUG_ON(host->mrq != NULL);
cmd = mrq->cmd;
host->mrq = mrq;
/*
* Check that there is actually a card in the slot.
*/
if (!(host->flags & WBSD_FCARD_PRESENT)) {
cmd->error = -ENOMEDIUM;
goto done;
}
if (cmd->data) {
/*
* The hardware is so delightfully stupid that it has a list
* of "data" commands. If a command isn't on this list, it'll
* just go back to the idle state and won't send any data
* interrupts.
*/
switch (cmd->opcode) {
case 11:
case 17:
case 18:
case 20:
case 24:
case 25:
case 26:
case 27:
case 30:
case 42:
case 56:
break;
/* ACMDs. We don't keep track of state, so we just treat them
* like any other command. */
case 51:
break;
default:
#ifdef CONFIG_MMC_DEBUG
printk(KERN_WARNING "%s: Data command %d is not "
"supported by this controller.\n",
mmc_hostname(host->mmc), cmd->opcode);
#endif
cmd->error = -EINVAL;
goto done;
};
}
/*
* Does the request include data?
*/
if (cmd->data) {
wbsd_prepare_data(host, cmd->data);
if (cmd->data->error)
goto done;
}
wbsd_send_command(host, cmd);
/*
* If this is a data transfer the request
* will be finished after the data has
* transferred.
*/
if (cmd->data && !cmd->error) {
/*
* Dirty fix for hardware bug.
*/
if (host->dma == -1)
tasklet_schedule(&host->fifo_tasklet);
spin_unlock_bh(&host->lock);
return;
}
done:
wbsd_request_end(host, mrq);
spin_unlock_bh(&host->lock);
}
static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct wbsd_host *host = mmc_priv(mmc);
u8 clk, setup, pwr;
spin_lock_bh(&host->lock);
/*
* Reset the chip on each power off.
* Should clear out any weird states.
*/
if (ios->power_mode == MMC_POWER_OFF)
wbsd_init_device(host);
if (ios->clock >= 24000000)
clk = WBSD_CLK_24M;
else if (ios->clock >= 16000000)
clk = WBSD_CLK_16M;
else if (ios->clock >= 12000000)
clk = WBSD_CLK_12M;
else
clk = WBSD_CLK_375K;
/*
* Only write to the clock register when
* there is an actual change.
*/
if (clk != host->clk) {
wbsd_write_index(host, WBSD_IDX_CLK, clk);
host->clk = clk;
}
/*
* Power up card.
*/
if (ios->power_mode != MMC_POWER_OFF) {
pwr = inb(host->base + WBSD_CSR);
pwr &= ~WBSD_POWER_N;
outb(pwr, host->base + WBSD_CSR);
}
/*
* MMC cards need to have pin 1 high during init.
* It wreaks havoc with the card detection though so
* that needs to be disabled.
*/
setup = wbsd_read_index(host, WBSD_IDX_SETUP);
if (ios->chip_select == MMC_CS_HIGH) {
BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1);
setup |= WBSD_DAT3_H;
host->flags |= WBSD_FIGNORE_DETECT;
} else {
if (setup & WBSD_DAT3_H) {
setup &= ~WBSD_DAT3_H;
/*
* We cannot resume card detection immediately
* because of capacitance and delays in the chip.
*/
mod_timer(&host->ignore_timer, jiffies + HZ / 100);
}
}
wbsd_write_index(host, WBSD_IDX_SETUP, setup);
/*
* Store bus width for later. Will be used when
* setting up the data transfer.
*/
host->bus_width = ios->bus_width;
spin_unlock_bh(&host->lock);
}
static int wbsd_get_ro(struct mmc_host *mmc)
{
struct wbsd_host *host = mmc_priv(mmc);
u8 csr;
spin_lock_bh(&host->lock);
csr = inb(host->base + WBSD_CSR);
csr |= WBSD_MSLED;
outb(csr, host->base + WBSD_CSR);
mdelay(1);
csr = inb(host->base + WBSD_CSR);
csr &= ~WBSD_MSLED;
outb(csr, host->base + WBSD_CSR);
spin_unlock_bh(&host->lock);
return !!(csr & WBSD_WRPT);
}
static const struct mmc_host_ops wbsd_ops = {
.request = wbsd_request,
.set_ios = wbsd_set_ios,
.get_ro = wbsd_get_ro,
};
/*****************************************************************************\
* *
* Interrupt handling *
* *
\*****************************************************************************/
/*
* Helper function to reset detection ignore
*/
static void wbsd_reset_ignore(unsigned long data)
{
struct wbsd_host *host = (struct wbsd_host *)data;
BUG_ON(host == NULL);
DBG("Resetting card detection ignore\n");
spin_lock_bh(&host->lock);
host->flags &= ~WBSD_FIGNORE_DETECT;
/*
* Card status might have changed during the
* blackout.
*/
tasklet_schedule(&host->card_tasklet);
spin_unlock_bh(&host->lock);
}
/*
* Tasklets
*/
static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
{
WARN_ON(!host->mrq);
if (!host->mrq)
return NULL;
WARN_ON(!host->mrq->cmd);
if (!host->mrq->cmd)
return NULL;
WARN_ON(!host->mrq->cmd->data);
if (!host->mrq->cmd->data)
return NULL;
return host->mrq->cmd->data;
}
static void wbsd_tasklet_card(unsigned long param)
{
struct wbsd_host *host = (struct wbsd_host *)param;
u8 csr;
int delay = -1;
spin_lock(&host->lock);
if (host->flags & WBSD_FIGNORE_DETECT) {
spin_unlock(&host->lock);
return;
}
csr = inb(host->base + WBSD_CSR);
WARN_ON(csr == 0xff);
if (csr & WBSD_CARDPRESENT) {
if (!(host->flags & WBSD_FCARD_PRESENT)) {
DBG("Card inserted\n");
host->flags |= WBSD_FCARD_PRESENT;
delay = 500;
}
} else if (host->flags & WBSD_FCARD_PRESENT) {
DBG("Card removed\n");
host->flags &= ~WBSD_FCARD_PRESENT;
if (host->mrq) {
printk(KERN_ERR "%s: Card removed during transfer!\n",
mmc_hostname(host->mmc));
wbsd_reset(host);
host->mrq->cmd->error = -ENOMEDIUM;
tasklet_schedule(&host->finish_tasklet);
}
delay = 0;
}
/*
* Unlock first since we might get a call back.
*/
spin_unlock(&host->lock);
if (delay != -1)
mmc_detect_change(host->mmc, msecs_to_jiffies(delay));
}
static void wbsd_tasklet_fifo(unsigned long param)
{
struct wbsd_host *host = (struct wbsd_host *)param;
struct mmc_data *data;
spin_lock(&host->lock);
if (!host->mrq)
goto end;
data = wbsd_get_data(host);
if (!data)
goto end;
if (data->flags & MMC_DATA_WRITE)
wbsd_fill_fifo(host);
else
wbsd_empty_fifo(host);
/*
* Done?
*/
if (host->num_sg == 0) {
wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
tasklet_schedule(&host->finish_tasklet);
}
end:
spin_unlock(&host->lock);
}
static void wbsd_tasklet_crc(unsigned long param)
{
struct wbsd_host *host = (struct wbsd_host *)param;
struct mmc_data *data;
spin_lock(&host->lock);
if (!host->mrq)
goto end;
data = wbsd_get_data(host);
if (!data)
goto end;
DBGF("CRC error\n");
data->error = -EILSEQ;
tasklet_schedule(&host->finish_tasklet);
end:
spin_unlock(&host->lock);
}
static void wbsd_tasklet_timeout(unsigned long param)
{
struct wbsd_host *host = (struct wbsd_host *)param;
struct mmc_data *data;
spin_lock(&host->lock);
if (!host->mrq)
goto end;
data = wbsd_get_data(host);
if (!data)
goto end;
DBGF("Timeout\n");
data->error = -ETIMEDOUT;
tasklet_schedule(&host->finish_tasklet);
end:
spin_unlock(&host->lock);
}
static void wbsd_tasklet_finish(unsigned long param)
{
struct wbsd_host *host = (struct wbsd_host *)param;
struct mmc_data *data;
spin_lock(&host->lock);
WARN_ON(!host->mrq);
if (!host->mrq)
goto end;
data = wbsd_get_data(host);
if (!data)
goto end;
wbsd_finish_data(host, data);
end:
spin_unlock(&host->lock);
}
/*
* Interrupt handling
*/
static irqreturn_t wbsd_irq(int irq, void *dev_id)
{
struct wbsd_host *host = dev_id;
int isr;
isr = inb(host->base + WBSD_ISR);
/*
* Was it actually our hardware that caused the interrupt?
*/
if (isr == 0xff || isr == 0x00)
return IRQ_NONE;
host->isr |= isr;
/*
* Schedule tasklets as needed.
*/
if (isr & WBSD_INT_CARD)
tasklet_schedule(&host->card_tasklet);
if (isr & WBSD_INT_FIFO_THRE)
tasklet_schedule(&host->fifo_tasklet);
if (isr & WBSD_INT_CRC)
tasklet_hi_schedule(&host->crc_tasklet);
if (isr & WBSD_INT_TIMEOUT)
tasklet_hi_schedule(&host->timeout_tasklet);
if (isr & WBSD_INT_TC)
tasklet_schedule(&host->finish_tasklet);
return IRQ_HANDLED;
}
/*****************************************************************************\
* *
* Device initialisation and shutdown *
* *
\*****************************************************************************/
/*
* Allocate/free MMC structure.
*/
static int __devinit wbsd_alloc_mmc(struct device *dev)
{
struct mmc_host *mmc;
struct wbsd_host *host;
/*
* Allocate MMC structure.
*/
mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
if (!mmc)
return -ENOMEM;
host = mmc_priv(mmc);
host->mmc = mmc;
host->dma = -1;
/*
* Set host parameters.
*/
mmc->ops = &wbsd_ops;
mmc->f_min = 375000;
mmc->f_max = 24000000;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->caps = MMC_CAP_4_BIT_DATA;
spin_lock_init(&host->lock);
/*
* Set up timers
*/
init_timer(&host->ignore_timer);
host->ignore_timer.data = (unsigned long)host;
host->ignore_timer.function = wbsd_reset_ignore;
/*
* Maximum number of segments. Worst case is one sector per segment
* so this will be 64kB/512.
*/
mmc->max_segs = 128;
/*
* Maximum request size. Also limited by 64KiB buffer.
*/
mmc->max_req_size = 65536;
/*
* Maximum segment size. Could be one segment with the maximum number
* of bytes.
*/
mmc->max_seg_size = mmc->max_req_size;
/*
* Maximum block size. We have 12 bits (= 4095) but have to subtract
* space for CRC. So the maximum is 4095 - 4*2 = 4087.
*/
mmc->max_blk_size = 4087;
/*
* Maximum block count. There is no real limit so the maximum
* request size will be the only restriction.
*/
mmc->max_blk_count = mmc->max_req_size;
dev_set_drvdata(dev, mmc);
return 0;
}
static void wbsd_free_mmc(struct device *dev)
{
struct mmc_host *mmc;
struct wbsd_host *host;
mmc = dev_get_drvdata(dev);
if (!mmc)
return;
host = mmc_priv(mmc);
BUG_ON(host == NULL);
del_timer_sync(&host->ignore_timer);
mmc_free_host(mmc);
dev_set_drvdata(dev, NULL);
}
/*
* Scan for known chip id:s
*/
static int __devinit wbsd_scan(struct wbsd_host *host)
{
int i, j, k;
int id;
/*
* Iterate through all ports, all codes to
* find hardware that is in our known list.
*/
for (i = 0; i < ARRAY_SIZE(config_ports); i++) {
if (!request_region(config_ports[i], 2, DRIVER_NAME))
continue;
for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) {
id = 0xFFFF;
host->config = config_ports[i];
host->unlock_code = unlock_codes[j];
wbsd_unlock_config(host);
outb(WBSD_CONF_ID_HI, config_ports[i]);
id = inb(config_ports[i] + 1) << 8;
outb(WBSD_CONF_ID_LO, config_ports[i]);
id |= inb(config_ports[i] + 1);
wbsd_lock_config(host);
for (k = 0; k < ARRAY_SIZE(valid_ids); k++) {
if (id == valid_ids[k]) {
host->chip_id = id;
return 0;
}
}
if (id != 0xFFFF) {
DBG("Unknown hardware (id %x) found at %x\n",
id, config_ports[i]);
}
}
release_region(config_ports[i], 2);
}
host->config = 0;
host->unlock_code = 0;
return -ENODEV;
}
/*
* Allocate/free io port ranges
*/
static int __devinit wbsd_request_region(struct wbsd_host *host, int base)
{
if (base & 0x7)
return -EINVAL;
if (!request_region(base, 8, DRIVER_NAME))
return -EIO;
host->base = base;
return 0;
}
static void wbsd_release_regions(struct wbsd_host *host)
{
if (host->base)
release_region(host->base, 8);
host->base = 0;
if (host->config)
release_region(host->config, 2);
host->config = 0;
}
/*
* Allocate/free DMA port and buffer
*/
static void __devinit wbsd_request_dma(struct wbsd_host *host, int dma)
{
if (dma < 0)
return;
if (request_dma(dma, DRIVER_NAME))
goto err;
/*
* We need to allocate a special buffer in
* order for ISA to be able to DMA to it.
*/
host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
if (!host->dma_buffer)
goto free;
/*
* Translate the address to a physical address.
*/
host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer,
WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
/*
* ISA DMA must be aligned on a 64k basis.
*/
if ((host->dma_addr & 0xffff) != 0)
goto kfree;
/*
* ISA cannot access memory above 16 MB.
*/
else if (host->dma_addr >= 0x1000000)
goto kfree;
host->dma = dma;
return;
kfree:
/*
* If we've gotten here then there is some kind of alignment bug
*/
BUG_ON(1);
dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
host->dma_addr = 0;
kfree(host->dma_buffer);
host->dma_buffer = NULL;
free:
free_dma(dma);
err:
printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. "
"Falling back on FIFO.\n", dma);
}
static void wbsd_release_dma(struct wbsd_host *host)
{
if (host->dma_addr) {
dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
}
kfree(host->dma_buffer);
if (host->dma >= 0)
free_dma(host->dma);
host->dma = -1;
host->dma_buffer = NULL;
host->dma_addr = 0;
}
/*
* Allocate/free IRQ.
*/
static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq)
{
int ret;
/*
* Set up tasklets. Must be done before requesting interrupt.
*/
tasklet_init(&host->card_tasklet, wbsd_tasklet_card,
(unsigned long)host);
tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo,
(unsigned long)host);
tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc,
(unsigned long)host);
tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout,
(unsigned long)host);
tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish,
(unsigned long)host);
/*
* Allocate interrupt.
*/
ret = request_irq(irq, wbsd_irq, IRQF_SHARED, DRIVER_NAME, host);
if (ret)
return ret;
host->irq = irq;
return 0;
}
static void wbsd_release_irq(struct wbsd_host *host)
{
if (!host->irq)
return;
free_irq(host->irq, host);
host->irq = 0;
tasklet_kill(&host->card_tasklet);
tasklet_kill(&host->fifo_tasklet);
tasklet_kill(&host->crc_tasklet);
tasklet_kill(&host->timeout_tasklet);
tasklet_kill(&host->finish_tasklet);
}
/*
* Allocate all resources for the host.
*/
static int __devinit wbsd_request_resources(struct wbsd_host *host,
int base, int irq, int dma)
{
int ret;
/*
* Allocate I/O ports.
*/
ret = wbsd_request_region(host, base);
if (ret)
return ret;
/*
* Allocate interrupt.
*/
ret = wbsd_request_irq(host, irq);
if (ret)
return ret;
/*
* Allocate DMA.
*/
wbsd_request_dma(host, dma);
return 0;
}
/*
* Release all resources for the host.
*/
static void wbsd_release_resources(struct wbsd_host *host)
{
wbsd_release_dma(host);
wbsd_release_irq(host);
wbsd_release_regions(host);
}
/*
* Configure the resources the chip should use.
*/
static void wbsd_chip_config(struct wbsd_host *host)
{
wbsd_unlock_config(host);
/*
* Reset the chip.
*/
wbsd_write_config(host, WBSD_CONF_SWRST, 1);
wbsd_write_config(host, WBSD_CONF_SWRST, 0);
/*
* Select SD/MMC function.
*/
wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
/*
* Set up card detection.
*/
wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11);
/*
* Configure chip
*/
wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
if (host->dma >= 0)
wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
/*
* Enable and power up chip.
*/
wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
wbsd_lock_config(host);
}
/*
* Check that configured resources are correct.
*/
static int wbsd_chip_validate(struct wbsd_host *host)
{
int base, irq, dma;
wbsd_unlock_config(host);
/*
* Select SD/MMC function.
*/
wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
/*
* Read configuration.
*/
base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8;
base |= wbsd_read_config(host, WBSD_CONF_PORT_LO);
irq = wbsd_read_config(host, WBSD_CONF_IRQ);
dma = wbsd_read_config(host, WBSD_CONF_DRQ);
wbsd_lock_config(host);
/*
* Validate against given configuration.
*/
if (base != host->base)
return 0;
if (irq != host->irq)
return 0;
if ((dma != host->dma) && (host->dma != -1))
return 0;
return 1;
}
/*
* Powers down the SD function
*/
static void wbsd_chip_poweroff(struct wbsd_host *host)
{
wbsd_unlock_config(host);
wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
wbsd_lock_config(host);
}
/*****************************************************************************\
* *
* Devices setup and shutdown *
* *
\*****************************************************************************/
static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,
int pnp)
{
struct wbsd_host *host = NULL;
struct mmc_host *mmc = NULL;
int ret;
ret = wbsd_alloc_mmc(dev);
if (ret)
return ret;
mmc = dev_get_drvdata(dev);
host = mmc_priv(mmc);
/*
* Scan for hardware.
*/
ret = wbsd_scan(host);
if (ret) {
if (pnp && (ret == -ENODEV)) {
printk(KERN_WARNING DRIVER_NAME
": Unable to confirm device presence. You may "
"experience lock-ups.\n");
} else {
wbsd_free_mmc(dev);
return ret;
}
}
/*
* Request resources.
*/
ret = wbsd_request_resources(host, base, irq, dma);
if (ret) {
wbsd_release_resources(host);
wbsd_free_mmc(dev);
return ret;
}
/*
* See if chip needs to be configured.
*/
if (pnp) {
if ((host->config != 0) && !wbsd_chip_validate(host)) {
printk(KERN_WARNING DRIVER_NAME
": PnP active but chip not configured! "
"You probably have a buggy BIOS. "
"Configuring chip manually.\n");
wbsd_chip_config(host);
}
} else
wbsd_chip_config(host);
/*
* Power Management stuff. No idea how this works.
* Not tested.
*/
#ifdef CONFIG_PM
if (host->config) {
wbsd_unlock_config(host);
wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
wbsd_lock_config(host);
}
#endif
/*
* Allow device to initialise itself properly.
*/
mdelay(5);
/*
* Reset the chip into a known state.
*/
wbsd_init_device(host);
mmc_add_host(mmc);
printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc));
if (host->chip_id != 0)
printk(" id %x", (int)host->chip_id);
printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
if (host->dma >= 0)
printk(" dma %d", (int)host->dma);
else
printk(" FIFO");
if (pnp)
printk(" PnP");
printk("\n");
return 0;
}
static void __devexit wbsd_shutdown(struct device *dev, int pnp)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct wbsd_host *host;
if (!mmc)
return;
host = mmc_priv(mmc);
mmc_remove_host(mmc);
/*
* Power down the SD/MMC function.
*/
if (!pnp)
wbsd_chip_poweroff(host);
wbsd_release_resources(host);
wbsd_free_mmc(dev);
}
/*
* Non-PnP
*/
static int __devinit wbsd_probe(struct platform_device *dev)
{
/* Use the module parameters for resources */
return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0);
}
static int __devexit wbsd_remove(struct platform_device *dev)
{
wbsd_shutdown(&dev->dev, 0);
return 0;
}
/*
* PnP
*/
#ifdef CONFIG_PNP
static int __devinit
wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)
{
int io, irq, dma;
/*
* Get resources from PnP layer.
*/
io = pnp_port_start(pnpdev, 0);
irq = pnp_irq(pnpdev, 0);
if (pnp_dma_valid(pnpdev, 0))
dma = pnp_dma(pnpdev, 0);
else
dma = -1;
DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma);
return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
}
static void __devexit wbsd_pnp_remove(struct pnp_dev *dev)
{
wbsd_shutdown(&dev->dev, 1);
}
#endif /* CONFIG_PNP */
/*
* Power management
*/
#ifdef CONFIG_PM
static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
{
BUG_ON(host == NULL);
return mmc_suspend_host(host->mmc);
}
static int wbsd_resume(struct wbsd_host *host)
{
BUG_ON(host == NULL);
wbsd_init_device(host);
return mmc_resume_host(host->mmc);
}
static int wbsd_platform_suspend(struct platform_device *dev,
pm_message_t state)
{
struct mmc_host *mmc = platform_get_drvdata(dev);
struct wbsd_host *host;
int ret;
if (mmc == NULL)
return 0;
DBGF("Suspending...\n");
host = mmc_priv(mmc);
ret = wbsd_suspend(host, state);
if (ret)
return ret;
wbsd_chip_poweroff(host);
return 0;
}
static int wbsd_platform_resume(struct platform_device *dev)
{
struct mmc_host *mmc = platform_get_drvdata(dev);
struct wbsd_host *host;
if (mmc == NULL)
return 0;
DBGF("Resuming...\n");
host = mmc_priv(mmc);
wbsd_chip_config(host);
/*
* Allow device to initialise itself properly.
*/
mdelay(5);
return wbsd_resume(host);
}
#ifdef CONFIG_PNP
static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
{
struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
struct wbsd_host *host;
if (mmc == NULL)
return 0;
DBGF("Suspending...\n");
host = mmc_priv(mmc);
return wbsd_suspend(host, state);
}
static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
{
struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
struct wbsd_host *host;
if (mmc == NULL)
return 0;
DBGF("Resuming...\n");
host = mmc_priv(mmc);
/*
* See if chip needs to be configured.
*/
if (host->config != 0) {
if (!wbsd_chip_validate(host)) {
printk(KERN_WARNING DRIVER_NAME
": PnP active but chip not configured! "
"You probably have a buggy BIOS. "
"Configuring chip manually.\n");
wbsd_chip_config(host);
}
}
/*
* Allow device to initialise itself properly.
*/
mdelay(5);
return wbsd_resume(host);
}
#endif /* CONFIG_PNP */
#else /* CONFIG_PM */
#define wbsd_platform_suspend NULL
#define wbsd_platform_resume NULL
#define wbsd_pnp_suspend NULL
#define wbsd_pnp_resume NULL
#endif /* CONFIG_PM */
static struct platform_device *wbsd_device;
static struct platform_driver wbsd_driver = {
.probe = wbsd_probe,
.remove = __devexit_p(wbsd_remove),
.suspend = wbsd_platform_suspend,
.resume = wbsd_platform_resume,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
};
#ifdef CONFIG_PNP
static struct pnp_driver wbsd_pnp_driver = {
.name = DRIVER_NAME,
.id_table = pnp_dev_table,
.probe = wbsd_pnp_probe,
.remove = __devexit_p(wbsd_pnp_remove),
.suspend = wbsd_pnp_suspend,
.resume = wbsd_pnp_resume,
};
#endif /* CONFIG_PNP */
/*
* Module loading/unloading
*/
static int __init wbsd_drv_init(void)
{
int result;
printk(KERN_INFO DRIVER_NAME
": Winbond W83L51xD SD/MMC card interface driver\n");
printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
#ifdef CONFIG_PNP
if (!param_nopnp) {
result = pnp_register_driver(&wbsd_pnp_driver);
if (result < 0)
return result;
}
#endif /* CONFIG_PNP */
if (param_nopnp) {
result = platform_driver_register(&wbsd_driver);
if (result < 0)
return result;
wbsd_device = platform_device_alloc(DRIVER_NAME, -1);
if (!wbsd_device) {
platform_driver_unregister(&wbsd_driver);
return -ENOMEM;
}
result = platform_device_add(wbsd_device);
if (result) {
platform_device_put(wbsd_device);
platform_driver_unregister(&wbsd_driver);
return result;
}
}
return 0;
}
static void __exit wbsd_drv_exit(void)
{
#ifdef CONFIG_PNP
if (!param_nopnp)
pnp_unregister_driver(&wbsd_pnp_driver);
#endif /* CONFIG_PNP */
if (param_nopnp) {
platform_device_unregister(wbsd_device);
platform_driver_unregister(&wbsd_driver);
}
DBG("unloaded\n");
}
module_init(wbsd_drv_init);
module_exit(wbsd_drv_exit);
#ifdef CONFIG_PNP
module_param_named(nopnp, param_nopnp, uint, 0444);
#endif
module_param_named(io, param_io, uint, 0444);
module_param_named(irq, param_irq, uint, 0444);
module_param_named(dma, param_dma, int, 0444);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
#ifdef CONFIG_PNP
MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)");
#endif
MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");
| gpl-2.0 |
corcor67/SMPL_M8_SENSE | drivers/acpi/battery.c | 2816 | 32057 | /*
* battery.c - ACPI Battery Driver (Revision: 2.0)
*
* Copyright (C) 2007 Alexey Starikovskiy <astarikovskiy@suse.de>
* Copyright (C) 2004-2007 Vladimir Lebedev <vladimir.p.lebedev@intel.com>
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/jiffies.h>
#include <linux/async.h>
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#ifdef CONFIG_ACPI_PROCFS_POWER
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/uaccess.h>
#endif
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/power_supply.h>
#define PREFIX "ACPI: "
#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
#define ACPI_BATTERY_CLASS "battery"
#define ACPI_BATTERY_DEVICE_NAME "Battery"
#define ACPI_BATTERY_NOTIFY_STATUS 0x80
#define ACPI_BATTERY_NOTIFY_INFO 0x81
#define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82
/* Battery power unit: 0 means mW, 1 means mA */
#define ACPI_BATTERY_POWER_UNIT_MA 1
#define _COMPONENT ACPI_BATTERY_COMPONENT
ACPI_MODULE_NAME("battery");
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
MODULE_DESCRIPTION("ACPI Battery Driver");
MODULE_LICENSE("GPL");
static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
#ifdef CONFIG_ACPI_PROCFS_POWER
extern struct proc_dir_entry *acpi_lock_battery_dir(void);
extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
enum acpi_battery_files {
info_tag = 0,
state_tag,
alarm_tag,
ACPI_BATTERY_NUMFILES,
};
#endif
static const struct acpi_device_id battery_device_ids[] = {
{"PNP0C0A", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, battery_device_ids);
enum {
ACPI_BATTERY_ALARM_PRESENT,
ACPI_BATTERY_XINFO_PRESENT,
ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
};
struct acpi_battery {
struct mutex lock;
struct mutex sysfs_lock;
struct power_supply bat;
struct acpi_device *device;
struct notifier_block pm_nb;
unsigned long update_time;
int rate_now;
int capacity_now;
int voltage_now;
int design_capacity;
int full_charge_capacity;
int technology;
int design_voltage;
int design_capacity_warning;
int design_capacity_low;
int cycle_count;
int measurement_accuracy;
int max_sampling_time;
int min_sampling_time;
int max_averaging_interval;
int min_averaging_interval;
int capacity_granularity_1;
int capacity_granularity_2;
int alarm;
char model_number[32];
char serial_number[32];
char type[32];
char oem_info[32];
int state;
int power_unit;
unsigned long flags;
};
#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat)
inline int acpi_battery_present(struct acpi_battery *battery)
{
return battery->device->status.battery_present;
}
static int acpi_battery_technology(struct acpi_battery *battery)
{
if (!strcasecmp("NiCd", battery->type))
return POWER_SUPPLY_TECHNOLOGY_NiCd;
if (!strcasecmp("NiMH", battery->type))
return POWER_SUPPLY_TECHNOLOGY_NiMH;
if (!strcasecmp("LION", battery->type))
return POWER_SUPPLY_TECHNOLOGY_LION;
if (!strncasecmp("LI-ION", battery->type, 6))
return POWER_SUPPLY_TECHNOLOGY_LION;
if (!strcasecmp("LiP", battery->type))
return POWER_SUPPLY_TECHNOLOGY_LIPO;
return POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
}
static int acpi_battery_get_state(struct acpi_battery *battery);
static int acpi_battery_is_charged(struct acpi_battery *battery)
{
/* either charging or discharging */
if (battery->state != 0)
return 0;
/* battery not reporting charge */
if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN ||
battery->capacity_now == 0)
return 0;
/* good batteries update full_charge as the batteries degrade */
if (battery->full_charge_capacity == battery->capacity_now)
return 1;
/* fallback to using design values for broken batteries */
if (battery->design_capacity == battery->capacity_now)
return 1;
/* we don't do any sort of metric based on percentages */
return 0;
}
static int acpi_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
int ret = 0;
struct acpi_battery *battery = to_acpi_battery(psy);
if (acpi_battery_present(battery)) {
/* run battery update only if it is present */
acpi_battery_get_state(battery);
} else if (psp != POWER_SUPPLY_PROP_PRESENT)
return -ENODEV;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
if (battery->state & 0x01)
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
else if (battery->state & 0x02)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (acpi_battery_is_charged(battery))
val->intval = POWER_SUPPLY_STATUS_FULL;
else
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = acpi_battery_present(battery);
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
val->intval = acpi_battery_technology(battery);
break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
val->intval = battery->cycle_count;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->design_voltage * 1000;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->voltage_now * 1000;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_POWER_NOW:
if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->rate_now * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->design_capacity * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL:
if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->full_charge_capacity * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
case POWER_SUPPLY_PROP_ENERGY_NOW:
if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->capacity_now * 1000;
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = battery->model_number;
break;
case POWER_SUPPLY_PROP_MANUFACTURER:
val->strval = battery->oem_info;
break;
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
val->strval = battery->serial_number;
break;
default:
ret = -EINVAL;
}
return ret;
}
static enum power_supply_property charge_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_POWER_NOW,
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
POWER_SUPPLY_PROP_ENERGY_FULL,
POWER_SUPPLY_PROP_ENERGY_NOW,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
#ifdef CONFIG_ACPI_PROCFS_POWER
inline char *acpi_battery_units(struct acpi_battery *battery)
{
return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
"mA" : "mW";
}
#endif
/* --------------------------------------------------------------------------
Battery Management
-------------------------------------------------------------------------- */
struct acpi_offsets {
size_t offset; /* offset inside struct acpi_sbs_battery */
u8 mode; /* int or string? */
};
static struct acpi_offsets state_offsets[] = {
{offsetof(struct acpi_battery, state), 0},
{offsetof(struct acpi_battery, rate_now), 0},
{offsetof(struct acpi_battery, capacity_now), 0},
{offsetof(struct acpi_battery, voltage_now), 0},
};
static struct acpi_offsets info_offsets[] = {
{offsetof(struct acpi_battery, power_unit), 0},
{offsetof(struct acpi_battery, design_capacity), 0},
{offsetof(struct acpi_battery, full_charge_capacity), 0},
{offsetof(struct acpi_battery, technology), 0},
{offsetof(struct acpi_battery, design_voltage), 0},
{offsetof(struct acpi_battery, design_capacity_warning), 0},
{offsetof(struct acpi_battery, design_capacity_low), 0},
{offsetof(struct acpi_battery, capacity_granularity_1), 0},
{offsetof(struct acpi_battery, capacity_granularity_2), 0},
{offsetof(struct acpi_battery, model_number), 1},
{offsetof(struct acpi_battery, serial_number), 1},
{offsetof(struct acpi_battery, type), 1},
{offsetof(struct acpi_battery, oem_info), 1},
};
static struct acpi_offsets extended_info_offsets[] = {
{offsetof(struct acpi_battery, power_unit), 0},
{offsetof(struct acpi_battery, design_capacity), 0},
{offsetof(struct acpi_battery, full_charge_capacity), 0},
{offsetof(struct acpi_battery, technology), 0},
{offsetof(struct acpi_battery, design_voltage), 0},
{offsetof(struct acpi_battery, design_capacity_warning), 0},
{offsetof(struct acpi_battery, design_capacity_low), 0},
{offsetof(struct acpi_battery, cycle_count), 0},
{offsetof(struct acpi_battery, measurement_accuracy), 0},
{offsetof(struct acpi_battery, max_sampling_time), 0},
{offsetof(struct acpi_battery, min_sampling_time), 0},
{offsetof(struct acpi_battery, max_averaging_interval), 0},
{offsetof(struct acpi_battery, min_averaging_interval), 0},
{offsetof(struct acpi_battery, capacity_granularity_1), 0},
{offsetof(struct acpi_battery, capacity_granularity_2), 0},
{offsetof(struct acpi_battery, model_number), 1},
{offsetof(struct acpi_battery, serial_number), 1},
{offsetof(struct acpi_battery, type), 1},
{offsetof(struct acpi_battery, oem_info), 1},
};
static int extract_package(struct acpi_battery *battery,
union acpi_object *package,
struct acpi_offsets *offsets, int num)
{
int i;
union acpi_object *element;
if (package->type != ACPI_TYPE_PACKAGE)
return -EFAULT;
for (i = 0; i < num; ++i) {
if (package->package.count <= i)
return -EFAULT;
element = &package->package.elements[i];
if (offsets[i].mode) {
u8 *ptr = (u8 *)battery + offsets[i].offset;
if (element->type == ACPI_TYPE_STRING ||
element->type == ACPI_TYPE_BUFFER)
strncpy(ptr, element->string.pointer, 32);
else if (element->type == ACPI_TYPE_INTEGER) {
strncpy(ptr, (u8 *)&element->integer.value,
sizeof(u64));
ptr[sizeof(u64)] = 0;
} else
*ptr = 0; /* don't have value */
} else {
int *x = (int *)((u8 *)battery + offsets[i].offset);
*x = (element->type == ACPI_TYPE_INTEGER) ?
element->integer.value : -1;
}
}
return 0;
}
static int acpi_battery_get_status(struct acpi_battery *battery)
{
if (acpi_bus_get_status(battery->device)) {
ACPI_EXCEPTION((AE_INFO, AE_ERROR, "Evaluating _STA"));
return -ENODEV;
}
return 0;
}
static int acpi_battery_get_info(struct acpi_battery *battery)
{
int result = -EFAULT;
acpi_status status = 0;
char *name = test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags)?
"_BIX" : "_BIF";
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
if (!acpi_battery_present(battery))
return 0;
mutex_lock(&battery->lock);
status = acpi_evaluate_object(battery->device->handle, name,
NULL, &buffer);
mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
return -ENODEV;
}
if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
result = extract_package(battery, buffer.pointer,
extended_info_offsets,
ARRAY_SIZE(extended_info_offsets));
else
result = extract_package(battery, buffer.pointer,
info_offsets, ARRAY_SIZE(info_offsets));
kfree(buffer.pointer);
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
battery->full_charge_capacity = battery->design_capacity;
return result;
}
static int acpi_battery_get_state(struct acpi_battery *battery)
{
int result = 0;
acpi_status status = 0;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
if (!acpi_battery_present(battery))
return 0;
if (battery->update_time &&
time_before(jiffies, battery->update_time +
msecs_to_jiffies(cache_time)))
return 0;
mutex_lock(&battery->lock);
status = acpi_evaluate_object(battery->device->handle, "_BST",
NULL, &buffer);
mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BST"));
return -ENODEV;
}
result = extract_package(battery, buffer.pointer,
state_offsets, ARRAY_SIZE(state_offsets));
battery->update_time = jiffies;
kfree(buffer.pointer);
/* For buggy DSDTs that report negative 16-bit values for either
* charging or discharging current and/or report 0 as 65536
* due to bad math.
*/
if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA &&
battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN &&
(s16)(battery->rate_now) < 0) {
battery->rate_now = abs((s16)battery->rate_now);
printk_once(KERN_WARNING FW_BUG "battery: (dis)charge rate"
" invalid.\n");
}
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
&& battery->capacity_now >= 0 && battery->capacity_now <= 100)
battery->capacity_now = (battery->capacity_now *
battery->full_charge_capacity) / 100;
return result;
}
static int acpi_battery_set_alarm(struct acpi_battery *battery)
{
acpi_status status = 0;
union acpi_object arg0 = { .type = ACPI_TYPE_INTEGER };
struct acpi_object_list arg_list = { 1, &arg0 };
if (!acpi_battery_present(battery) ||
!test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags))
return -ENODEV;
arg0.integer.value = battery->alarm;
mutex_lock(&battery->lock);
status = acpi_evaluate_object(battery->device->handle, "_BTP",
&arg_list, NULL);
mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status))
return -ENODEV;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Alarm set to %d\n", battery->alarm));
return 0;
}
static int acpi_battery_init_alarm(struct acpi_battery *battery)
{
acpi_status status = AE_OK;
acpi_handle handle = NULL;
/* See if alarms are supported, and if so, set default */
status = acpi_get_handle(battery->device->handle, "_BTP", &handle);
if (ACPI_FAILURE(status)) {
clear_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags);
return 0;
}
set_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags);
if (!battery->alarm)
battery->alarm = battery->design_capacity_warning;
return acpi_battery_set_alarm(battery);
}
static ssize_t acpi_battery_alarm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
return sprintf(buf, "%d\n", battery->alarm * 1000);
}
static ssize_t acpi_battery_alarm_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long x;
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
if (sscanf(buf, "%ld\n", &x) == 1)
battery->alarm = x/1000;
if (acpi_battery_present(battery))
acpi_battery_set_alarm(battery);
return count;
}
static struct device_attribute alarm_attr = {
.attr = {.name = "alarm", .mode = 0644},
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
};
static int sysfs_add_battery(struct acpi_battery *battery)
{
int result;
if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) {
battery->bat.properties = charge_battery_props;
battery->bat.num_properties =
ARRAY_SIZE(charge_battery_props);
} else {
battery->bat.properties = energy_battery_props;
battery->bat.num_properties =
ARRAY_SIZE(energy_battery_props);
}
battery->bat.name = acpi_device_bid(battery->device);
battery->bat.type = POWER_SUPPLY_TYPE_BATTERY;
battery->bat.get_property = acpi_battery_get_property;
result = power_supply_register(&battery->device->dev, &battery->bat);
if (result)
return result;
return device_create_file(battery->bat.dev, &alarm_attr);
}
static void sysfs_remove_battery(struct acpi_battery *battery)
{
mutex_lock(&battery->sysfs_lock);
if (!battery->bat.dev) {
mutex_unlock(&battery->sysfs_lock);
return;
}
device_remove_file(battery->bat.dev, &alarm_attr);
power_supply_unregister(&battery->bat);
battery->bat.dev = NULL;
mutex_unlock(&battery->sysfs_lock);
}
/*
* According to the ACPI spec, some kinds of primary batteries can
* report percentage battery remaining capacity directly to OS.
* In this case, it reports the Last Full Charged Capacity == 100
* and BatteryPresentRate == 0xFFFFFFFF.
*
* Now we found some battery reports percentage remaining capacity
* even if it's rechargeable.
* https://bugzilla.kernel.org/show_bug.cgi?id=15979
*
* Handle this correctly so that they won't break userspace.
*/
static void acpi_battery_quirks(struct acpi_battery *battery)
{
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
return ;
if (battery->full_charge_capacity == 100 &&
battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN &&
battery->capacity_now >=0 && battery->capacity_now <= 100) {
set_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags);
battery->full_charge_capacity = battery->design_capacity;
battery->capacity_now = (battery->capacity_now *
battery->full_charge_capacity) / 100;
}
}
static int acpi_battery_update(struct acpi_battery *battery)
{
int result, old_present = acpi_battery_present(battery);
result = acpi_battery_get_status(battery);
if (result)
return result;
if (!acpi_battery_present(battery)) {
sysfs_remove_battery(battery);
battery->update_time = 0;
return 0;
}
if (!battery->update_time ||
old_present != acpi_battery_present(battery)) {
result = acpi_battery_get_info(battery);
if (result)
return result;
acpi_battery_init_alarm(battery);
}
if (!battery->bat.dev) {
result = sysfs_add_battery(battery);
if (result)
return result;
}
result = acpi_battery_get_state(battery);
acpi_battery_quirks(battery);
return result;
}
static void acpi_battery_refresh(struct acpi_battery *battery)
{
if (!battery->bat.dev)
return;
acpi_battery_get_info(battery);
/* The battery may have changed its reporting units. */
sysfs_remove_battery(battery);
sysfs_add_battery(battery);
}
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
#ifdef CONFIG_ACPI_PROCFS_POWER
static struct proc_dir_entry *acpi_battery_dir;
static int acpi_battery_print_info(struct seq_file *seq, int result)
{
struct acpi_battery *battery = seq->private;
if (result)
goto end;
seq_printf(seq, "present: %s\n",
acpi_battery_present(battery)?"yes":"no");
if (!acpi_battery_present(battery))
goto end;
if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "design capacity: unknown\n");
else
seq_printf(seq, "design capacity: %d %sh\n",
battery->design_capacity,
acpi_battery_units(battery));
if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "last full capacity: unknown\n");
else
seq_printf(seq, "last full capacity: %d %sh\n",
battery->full_charge_capacity,
acpi_battery_units(battery));
seq_printf(seq, "battery technology: %srechargeable\n",
(!battery->technology)?"non-":"");
if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "design voltage: unknown\n");
else
seq_printf(seq, "design voltage: %d mV\n",
battery->design_voltage);
seq_printf(seq, "design capacity warning: %d %sh\n",
battery->design_capacity_warning,
acpi_battery_units(battery));
seq_printf(seq, "design capacity low: %d %sh\n",
battery->design_capacity_low,
acpi_battery_units(battery));
seq_printf(seq, "cycle count: %i\n", battery->cycle_count);
seq_printf(seq, "capacity granularity 1: %d %sh\n",
battery->capacity_granularity_1,
acpi_battery_units(battery));
seq_printf(seq, "capacity granularity 2: %d %sh\n",
battery->capacity_granularity_2,
acpi_battery_units(battery));
seq_printf(seq, "model number: %s\n", battery->model_number);
seq_printf(seq, "serial number: %s\n", battery->serial_number);
seq_printf(seq, "battery type: %s\n", battery->type);
seq_printf(seq, "OEM info: %s\n", battery->oem_info);
end:
if (result)
seq_printf(seq, "ERROR: Unable to read battery info\n");
return result;
}
static int acpi_battery_print_state(struct seq_file *seq, int result)
{
struct acpi_battery *battery = seq->private;
if (result)
goto end;
seq_printf(seq, "present: %s\n",
acpi_battery_present(battery)?"yes":"no");
if (!acpi_battery_present(battery))
goto end;
seq_printf(seq, "capacity state: %s\n",
(battery->state & 0x04)?"critical":"ok");
if ((battery->state & 0x01) && (battery->state & 0x02))
seq_printf(seq,
"charging state: charging/discharging\n");
else if (battery->state & 0x01)
seq_printf(seq, "charging state: discharging\n");
else if (battery->state & 0x02)
seq_printf(seq, "charging state: charging\n");
else
seq_printf(seq, "charging state: charged\n");
if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "present rate: unknown\n");
else
seq_printf(seq, "present rate: %d %s\n",
battery->rate_now, acpi_battery_units(battery));
if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "remaining capacity: unknown\n");
else
seq_printf(seq, "remaining capacity: %d %sh\n",
battery->capacity_now, acpi_battery_units(battery));
if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "present voltage: unknown\n");
else
seq_printf(seq, "present voltage: %d mV\n",
battery->voltage_now);
end:
if (result)
seq_printf(seq, "ERROR: Unable to read battery state\n");
return result;
}
static int acpi_battery_print_alarm(struct seq_file *seq, int result)
{
struct acpi_battery *battery = seq->private;
if (result)
goto end;
if (!acpi_battery_present(battery)) {
seq_printf(seq, "present: no\n");
goto end;
}
seq_printf(seq, "alarm: ");
if (!battery->alarm)
seq_printf(seq, "unsupported\n");
else
seq_printf(seq, "%u %sh\n", battery->alarm,
acpi_battery_units(battery));
end:
if (result)
seq_printf(seq, "ERROR: Unable to read battery alarm\n");
return result;
}
static ssize_t acpi_battery_write_alarm(struct file *file,
const char __user * buffer,
size_t count, loff_t * ppos)
{
int result = 0;
char alarm_string[12] = { '\0' };
struct seq_file *m = file->private_data;
struct acpi_battery *battery = m->private;
if (!battery || (count > sizeof(alarm_string) - 1))
return -EINVAL;
if (!acpi_battery_present(battery)) {
result = -ENODEV;
goto end;
}
if (copy_from_user(alarm_string, buffer, count)) {
result = -EFAULT;
goto end;
}
alarm_string[count] = '\0';
battery->alarm = simple_strtol(alarm_string, NULL, 0);
result = acpi_battery_set_alarm(battery);
end:
if (!result)
return count;
return result;
}
typedef int(*print_func)(struct seq_file *seq, int result);
static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
acpi_battery_print_info,
acpi_battery_print_state,
acpi_battery_print_alarm,
};
static int acpi_battery_read(int fid, struct seq_file *seq)
{
struct acpi_battery *battery = seq->private;
int result = acpi_battery_update(battery);
return acpi_print_funcs[fid](seq, result);
}
#define DECLARE_FILE_FUNCTIONS(_name) \
static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
{ \
return acpi_battery_read(_name##_tag, seq); \
} \
static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
{ \
return single_open(file, acpi_battery_read_##_name, PDE(inode)->data); \
}
DECLARE_FILE_FUNCTIONS(info);
DECLARE_FILE_FUNCTIONS(state);
DECLARE_FILE_FUNCTIONS(alarm);
#undef DECLARE_FILE_FUNCTIONS
#define FILE_DESCRIPTION_RO(_name) \
{ \
.name = __stringify(_name), \
.mode = S_IRUGO, \
.ops = { \
.open = acpi_battery_##_name##_open_fs, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = single_release, \
.owner = THIS_MODULE, \
}, \
}
#define FILE_DESCRIPTION_RW(_name) \
{ \
.name = __stringify(_name), \
.mode = S_IFREG | S_IRUGO | S_IWUSR, \
.ops = { \
.open = acpi_battery_##_name##_open_fs, \
.read = seq_read, \
.llseek = seq_lseek, \
.write = acpi_battery_write_##_name, \
.release = single_release, \
.owner = THIS_MODULE, \
}, \
}
static const struct battery_file {
struct file_operations ops;
umode_t mode;
const char *name;
} acpi_battery_file[] = {
FILE_DESCRIPTION_RO(info),
FILE_DESCRIPTION_RO(state),
FILE_DESCRIPTION_RW(alarm),
};
#undef FILE_DESCRIPTION_RO
#undef FILE_DESCRIPTION_RW
static int acpi_battery_add_fs(struct acpi_device *device)
{
struct proc_dir_entry *entry = NULL;
int i;
printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
" please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
if (!acpi_device_dir(device)) {
acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
acpi_battery_dir);
if (!acpi_device_dir(device))
return -ENODEV;
}
for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
entry = proc_create_data(acpi_battery_file[i].name,
acpi_battery_file[i].mode,
acpi_device_dir(device),
&acpi_battery_file[i].ops,
acpi_driver_data(device));
if (!entry)
return -ENODEV;
}
return 0;
}
static void acpi_battery_remove_fs(struct acpi_device *device)
{
int i;
if (!acpi_device_dir(device))
return;
for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
remove_proc_entry(acpi_battery_file[i].name,
acpi_device_dir(device));
remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
acpi_device_dir(device) = NULL;
}
#endif
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
static void acpi_battery_notify(struct acpi_device *device, u32 event)
{
struct acpi_battery *battery = acpi_driver_data(device);
struct device *old;
if (!battery)
return;
old = battery->bat.dev;
if (event == ACPI_BATTERY_NOTIFY_INFO)
acpi_battery_refresh(battery);
acpi_battery_update(battery);
acpi_bus_generate_proc_event(device, event,
acpi_battery_present(battery));
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
acpi_battery_present(battery));
/* acpi_battery_update could remove power_supply object */
if (old && battery->bat.dev)
power_supply_changed(&battery->bat);
}
static int battery_notify(struct notifier_block *nb,
unsigned long mode, void *_unused)
{
struct acpi_battery *battery = container_of(nb, struct acpi_battery,
pm_nb);
switch (mode) {
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
if (battery->bat.dev) {
sysfs_remove_battery(battery);
sysfs_add_battery(battery);
}
break;
}
return 0;
}
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
struct acpi_battery *battery = NULL;
acpi_handle handle;
if (!device)
return -EINVAL;
battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL);
if (!battery)
return -ENOMEM;
battery->device = device;
strcpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
device->driver_data = battery;
mutex_init(&battery->lock);
mutex_init(&battery->sysfs_lock);
if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle,
"_BIX", &handle)))
set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
result = acpi_battery_update(battery);
if (result)
goto fail;
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
#endif
if (result) {
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
#endif
goto fail;
}
printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
device->status.battery_present ? "present" : "absent");
battery->pm_nb.notifier_call = battery_notify;
register_pm_notifier(&battery->pm_nb);
return result;
fail:
sysfs_remove_battery(battery);
mutex_destroy(&battery->lock);
mutex_destroy(&battery->sysfs_lock);
kfree(battery);
return result;
}
static int acpi_battery_remove(struct acpi_device *device, int type)
{
struct acpi_battery *battery = NULL;
if (!device || !acpi_driver_data(device))
return -EINVAL;
battery = acpi_driver_data(device);
unregister_pm_notifier(&battery->pm_nb);
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
#endif
sysfs_remove_battery(battery);
mutex_destroy(&battery->lock);
mutex_destroy(&battery->sysfs_lock);
kfree(battery);
return 0;
}
/* this is needed to learn about changes made in suspended state */
static int acpi_battery_resume(struct acpi_device *device)
{
struct acpi_battery *battery;
if (!device)
return -EINVAL;
battery = acpi_driver_data(device);
battery->update_time = 0;
acpi_battery_update(battery);
return 0;
}
static struct acpi_driver acpi_battery_driver = {
.name = "battery",
.class = ACPI_BATTERY_CLASS,
.ids = battery_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = acpi_battery_add,
.resume = acpi_battery_resume,
.remove = acpi_battery_remove,
.notify = acpi_battery_notify,
},
};
static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
{
if (acpi_disabled)
return;
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_dir = acpi_lock_battery_dir();
if (!acpi_battery_dir)
return;
#endif
if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_unlock_battery_dir(acpi_battery_dir);
#endif
return;
}
return;
}
static int __init acpi_battery_init(void)
{
async_schedule(acpi_battery_init_async, NULL);
return 0;
}
static void __exit acpi_battery_exit(void)
{
acpi_bus_unregister_driver(&acpi_battery_driver);
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_unlock_battery_dir(acpi_battery_dir);
#endif
}
module_init(acpi_battery_init);
module_exit(acpi_battery_exit);
| gpl-2.0 |
ChameleonOS/android_kernel_amazon_bowser-common | drivers/media/video/gspca/finepix.c | 3072 | 8010 | /*
* Fujifilm Finepix subdriver
*
* Copyright (C) 2008 Frank Zago
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define MODULE_NAME "finepix"
#include "gspca.h"
MODULE_AUTHOR("Frank Zago <frank@zago.net>");
MODULE_DESCRIPTION("Fujifilm FinePix USB V4L2 driver");
MODULE_LICENSE("GPL");
/* Default timeout, in ms */
#define FPIX_TIMEOUT 250
/* Maximum transfer size to use. The windows driver reads by chunks of
* 0x2000 bytes, so do the same. Note: reading more seems to work
* too. */
#define FPIX_MAX_TRANSFER 0x2000
/* Structure to hold all of our device specific stuff */
struct usb_fpix {
struct gspca_dev gspca_dev; /* !! must be the first item */
struct work_struct work_struct;
struct workqueue_struct *work_thread;
};
/* Delay after which claim the next frame. If the delay is too small,
* the camera will return old frames. On the 4800Z, 20ms is bad, 25ms
* will fail every 4 or 5 frames, but 30ms is perfect. On the A210,
* 30ms is bad while 35ms is perfect. */
#define NEXT_FRAME_DELAY 35
/* These cameras only support 320x200. */
static const struct v4l2_pix_format fpix_mode[1] = {
{ 320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0}
};
/* send a command to the webcam */
static int command(struct gspca_dev *gspca_dev,
int order) /* 0: reset, 1: frame request */
{
static u8 order_values[2][12] = {
{0xc6, 0, 0, 0, 0, 0, 0, 0, 0x20, 0, 0, 0}, /* reset */
{0xd3, 0, 0, 0, 0, 0, 0, 0x01, 0, 0, 0, 0}, /* fr req */
};
memcpy(gspca_dev->usb_buf, order_values[order], 12);
return usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
USB_REQ_GET_STATUS,
USB_DIR_OUT | USB_TYPE_CLASS |
USB_RECIP_INTERFACE, 0, 0, gspca_dev->usb_buf,
12, FPIX_TIMEOUT);
}
/* workqueue */
static void dostream(struct work_struct *work)
{
struct usb_fpix *dev = container_of(work, struct usb_fpix, work_struct);
struct gspca_dev *gspca_dev = &dev->gspca_dev;
struct urb *urb = gspca_dev->urb[0];
u8 *data = urb->transfer_buffer;
int ret = 0;
int len;
/* synchronize with the main driver */
mutex_lock(&gspca_dev->usb_lock);
mutex_unlock(&gspca_dev->usb_lock);
PDEBUG(D_STREAM, "dostream started");
/* loop reading a frame */
again:
while (gspca_dev->present && gspca_dev->streaming) {
/* request a frame */
mutex_lock(&gspca_dev->usb_lock);
ret = command(gspca_dev, 1);
mutex_unlock(&gspca_dev->usb_lock);
if (ret < 0)
break;
if (!gspca_dev->present || !gspca_dev->streaming)
break;
/* the frame comes in parts */
for (;;) {
ret = usb_bulk_msg(gspca_dev->dev,
urb->pipe,
data,
FPIX_MAX_TRANSFER,
&len, FPIX_TIMEOUT);
if (ret < 0) {
/* Most of the time we get a timeout
* error. Just restart. */
goto again;
}
if (!gspca_dev->present || !gspca_dev->streaming)
goto out;
if (len < FPIX_MAX_TRANSFER ||
(data[len - 2] == 0xff &&
data[len - 1] == 0xd9)) {
/* If the result is less than what was asked
* for, then it's the end of the
* frame. Sometimes the jpeg is not complete,
* but there's nothing we can do. We also end
* here if the the jpeg ends right at the end
* of the frame. */
gspca_frame_add(gspca_dev, LAST_PACKET,
data, len);
break;
}
/* got a partial image */
gspca_frame_add(gspca_dev,
gspca_dev->last_packet_type
== LAST_PACKET
? FIRST_PACKET : INTER_PACKET,
data, len);
}
/* We must wait before trying reading the next
* frame. If we don't, or if the delay is too short,
* the camera will disconnect. */
msleep(NEXT_FRAME_DELAY);
}
out:
PDEBUG(D_STREAM, "dostream stopped");
}
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
struct usb_fpix *dev = (struct usb_fpix *) gspca_dev;
struct cam *cam = &gspca_dev->cam;
cam->cam_mode = fpix_mode;
cam->nmodes = 1;
cam->bulk = 1;
cam->bulk_size = FPIX_MAX_TRANSFER;
INIT_WORK(&dev->work_struct, dostream);
return 0;
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
return 0;
}
/* start the camera */
static int sd_start(struct gspca_dev *gspca_dev)
{
struct usb_fpix *dev = (struct usb_fpix *) gspca_dev;
int ret, len;
/* Init the device */
ret = command(gspca_dev, 0);
if (ret < 0) {
err("init failed %d", ret);
return ret;
}
/* Read the result of the command. Ignore the result, for it
* varies with the device. */
ret = usb_bulk_msg(gspca_dev->dev,
gspca_dev->urb[0]->pipe,
gspca_dev->urb[0]->transfer_buffer,
FPIX_MAX_TRANSFER, &len,
FPIX_TIMEOUT);
if (ret < 0) {
err("usb_bulk_msg failed %d", ret);
return ret;
}
/* Request a frame, but don't read it */
ret = command(gspca_dev, 1);
if (ret < 0) {
err("frame request failed %d", ret);
return ret;
}
/* Again, reset bulk in endpoint */
usb_clear_halt(gspca_dev->dev, gspca_dev->urb[0]->pipe);
/* Start the workqueue function to do the streaming */
dev->work_thread = create_singlethread_workqueue(MODULE_NAME);
queue_work(dev->work_thread, &dev->work_struct);
return 0;
}
/* called on streamoff with alt==0 and on disconnect */
/* the usb_lock is held at entry - restore on exit */
static void sd_stop0(struct gspca_dev *gspca_dev)
{
struct usb_fpix *dev = (struct usb_fpix *) gspca_dev;
/* wait for the work queue to terminate */
mutex_unlock(&gspca_dev->usb_lock);
destroy_workqueue(dev->work_thread);
mutex_lock(&gspca_dev->usb_lock);
dev->work_thread = NULL;
}
/* Table of supported USB devices */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x04cb, 0x0104)},
{USB_DEVICE(0x04cb, 0x0109)},
{USB_DEVICE(0x04cb, 0x010b)},
{USB_DEVICE(0x04cb, 0x010f)},
{USB_DEVICE(0x04cb, 0x0111)},
{USB_DEVICE(0x04cb, 0x0113)},
{USB_DEVICE(0x04cb, 0x0115)},
{USB_DEVICE(0x04cb, 0x0117)},
{USB_DEVICE(0x04cb, 0x0119)},
{USB_DEVICE(0x04cb, 0x011b)},
{USB_DEVICE(0x04cb, 0x011d)},
{USB_DEVICE(0x04cb, 0x0121)},
{USB_DEVICE(0x04cb, 0x0123)},
{USB_DEVICE(0x04cb, 0x0125)},
{USB_DEVICE(0x04cb, 0x0127)},
{USB_DEVICE(0x04cb, 0x0129)},
{USB_DEVICE(0x04cb, 0x012b)},
{USB_DEVICE(0x04cb, 0x012d)},
{USB_DEVICE(0x04cb, 0x012f)},
{USB_DEVICE(0x04cb, 0x0131)},
{USB_DEVICE(0x04cb, 0x013b)},
{USB_DEVICE(0x04cb, 0x013d)},
{USB_DEVICE(0x04cb, 0x013f)},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.config = sd_config,
.init = sd_init,
.start = sd_start,
.stop0 = sd_stop0,
};
/* -- device connect -- */
static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id,
&sd_desc,
sizeof(struct usb_fpix),
THIS_MODULE);
}
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
.disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
#endif
};
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
}
module_init(sd_mod_init);
module_exit(sd_mod_exit);
| gpl-2.0 |
InstigatorX/InstigatorX-V4-Kernel | drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c | 3584 | 6804 | /*
* Host AP crypt: host-based WEP encryption implementation for Host AP driver
*
* Copyright (c) 2002-2004, Jouni Malinen <jkmaline@cc.hut.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. See README and COPYING for
* more details.
*/
//#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/skbuff.h>
#include <asm/string.h>
#include "ieee80211.h"
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <linux/crc32.h>
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Host AP crypt: WEP");
MODULE_LICENSE("GPL");
struct prism2_wep_data {
u32 iv;
#define WEP_KEY_LEN 13
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
struct crypto_blkcipher *tx_tfm;
struct crypto_blkcipher *rx_tfm;
};
static void * prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
priv->key_idx = keyidx;
priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm)) {
printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
"crypto API arc4\n");
priv->tx_tfm = NULL;
goto fail;
}
priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm)) {
printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
"crypto API arc4\n");
priv->rx_tfm = NULL;
goto fail;
}
/* start WEP IV from a random value */
get_random_bytes(&priv->iv, 4);
return priv;
fail:
if (priv) {
if (priv->tx_tfm)
crypto_free_blkcipher(priv->tx_tfm);
if (priv->rx_tfm)
crypto_free_blkcipher(priv->rx_tfm);
kfree(priv);
}
return NULL;
}
static void prism2_wep_deinit(void *priv)
{
struct prism2_wep_data *_priv = priv;
if (_priv) {
if (_priv->tx_tfm)
crypto_free_blkcipher(_priv->tx_tfm);
if (_priv->rx_tfm)
crypto_free_blkcipher(_priv->rx_tfm);
}
kfree(priv);
}
/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
* for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
* so the payload length increases with 8 bytes.
*
* WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
*/
static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct prism2_wep_data *wep = priv;
struct blkcipher_desc desc = { .tfm = wep->tx_tfm };
u32 klen, len;
u8 key[WEP_KEY_LEN + 3];
u8 *pos;
u32 crc;
u8 *icv;
struct scatterlist sg;
if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
skb->len < hdr_len)
return -1;
len = skb->len - hdr_len;
pos = skb_push(skb, 4);
memmove(pos, pos + 4, hdr_len);
pos += hdr_len;
klen = 3 + wep->key_len;
wep->iv++;
/* Fluhrer, Mantin, and Shamir have reported weaknesses in the key
* scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N)
* can be used to speedup attacks, so avoid using them. */
if ((wep->iv & 0xff00) == 0xff00) {
u8 B = (wep->iv >> 16) & 0xff;
if (B >= 3 && B < klen)
wep->iv += 0x0100;
}
/* Prepend 24-bit IV to RC4 key and TX frame */
*pos++ = key[0] = (wep->iv >> 16) & 0xff;
*pos++ = key[1] = (wep->iv >> 8) & 0xff;
*pos++ = key[2] = wep->iv & 0xff;
*pos++ = wep->key_idx << 6;
/* Copy rest of the WEP key (the secret part) */
memcpy(key + 3, wep->key, wep->key_len);
/* Append little-endian CRC32 and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
icv = skb_put(skb, 4);
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
sg_init_one(&sg, pos, len + 4);
return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
}
/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
* the frame: IV (4 bytes), encrypted payload (including SNAP header),
* ICV (4 bytes). len includes both IV and ICV.
*
* Returns 0 if frame was decrypted successfully and ICV was correct and -1 on
* failure. If frame is OK, IV and ICV will be removed.
*/
static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct prism2_wep_data *wep = priv;
struct blkcipher_desc desc = { .tfm = wep->rx_tfm };
u32 klen, plen;
u8 key[WEP_KEY_LEN + 3];
u8 keyidx, *pos;
u32 crc;
u8 icv[4];
struct scatterlist sg;
if (skb->len < hdr_len + 8)
return -1;
pos = skb->data + hdr_len;
key[0] = *pos++;
key[1] = *pos++;
key[2] = *pos++;
keyidx = *pos++ >> 6;
if (keyidx != wep->key_idx)
return -1;
klen = 3 + wep->key_len;
/* Copy rest of the WEP key (the secret part) */
memcpy(key + 3, wep->key, wep->key_len);
/* Apply RC4 to data and compute CRC32 over decrypted data */
plen = skb->len - hdr_len - 8;
crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
sg_init_one(&sg, pos, plen + 4);
if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
return -7;
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
if (memcmp(icv, pos + plen, 4) != 0) {
/* ICV mismatch - drop frame */
return -2;
}
/* Remove IV and ICV */
memmove(skb->data + 4, skb->data, hdr_len);
skb_pull(skb, 4);
skb_trim(skb, skb->len - 4);
return 0;
}
static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
{
struct prism2_wep_data *wep = priv;
if (len < 0 || len > WEP_KEY_LEN)
return -1;
memcpy(wep->key, key, len);
wep->key_len = len;
return 0;
}
static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
{
struct prism2_wep_data *wep = priv;
if (len < wep->key_len)
return -1;
memcpy(key, wep->key, wep->key_len);
return wep->key_len;
}
static char * prism2_wep_print_stats(char *p, void *priv)
{
struct prism2_wep_data *wep = priv;
p += sprintf(p, "key[%d] alg=WEP len=%d\n",
wep->key_idx, wep->key_len);
return p;
}
static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
.name = "WEP",
.init = prism2_wep_init,
.deinit = prism2_wep_deinit,
.encrypt_mpdu = prism2_wep_encrypt,
.decrypt_mpdu = prism2_wep_decrypt,
.encrypt_msdu = NULL,
.decrypt_msdu = NULL,
.set_key = prism2_wep_set_key,
.get_key = prism2_wep_get_key,
.print_stats = prism2_wep_print_stats,
.extra_prefix_len = 4, /* IV */
.extra_postfix_len = 4, /* ICV */
.owner = THIS_MODULE,
};
int ieee80211_crypto_wep_init(void)
{
return ieee80211_register_crypto_ops(&ieee80211_crypt_wep);
}
void ieee80211_crypto_wep_exit(void)
{
ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep);
}
void ieee80211_wep_null(void)
{
// printk("============>%s()\n", __func__);
return;
}
| gpl-2.0 |
AscendG630-DEV/kernel_huawei_msm8610 | arch/arm/mach-davinci/dm644x.c | 4864 | 22290 | /*
* TI DaVinci DM644x chip specific setup
*
* Author: Kevin Hilman, Deep Root Systems, LLC
*
* 2007 (c) Deep Root Systems, LLC. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/serial_8250.h>
#include <linux/platform_device.h>
#include <asm/mach/map.h>
#include <mach/cputype.h>
#include <mach/edma.h>
#include <mach/irqs.h>
#include <mach/psc.h>
#include <mach/mux.h>
#include <mach/time.h>
#include <mach/serial.h>
#include <mach/common.h>
#include <mach/asp.h>
#include <mach/gpio-davinci.h>
#include "davinci.h"
#include "clock.h"
#include "mux.h"
/*
* Device specific clocks
*/
#define DM644X_REF_FREQ 27000000
#define DM644X_EMAC_BASE 0x01c80000
#define DM644X_EMAC_MDIO_BASE (DM644X_EMAC_BASE + 0x4000)
#define DM644X_EMAC_CNTRL_OFFSET 0x0000
#define DM644X_EMAC_CNTRL_MOD_OFFSET 0x1000
#define DM644X_EMAC_CNTRL_RAM_OFFSET 0x2000
#define DM644X_EMAC_CNTRL_RAM_SIZE 0x2000
static struct pll_data pll1_data = {
.num = 1,
.phys_base = DAVINCI_PLL1_BASE,
};
static struct pll_data pll2_data = {
.num = 2,
.phys_base = DAVINCI_PLL2_BASE,
};
static struct clk ref_clk = {
.name = "ref_clk",
.rate = DM644X_REF_FREQ,
};
static struct clk pll1_clk = {
.name = "pll1",
.parent = &ref_clk,
.pll_data = &pll1_data,
.flags = CLK_PLL,
};
static struct clk pll1_sysclk1 = {
.name = "pll1_sysclk1",
.parent = &pll1_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV1,
};
static struct clk pll1_sysclk2 = {
.name = "pll1_sysclk2",
.parent = &pll1_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV2,
};
static struct clk pll1_sysclk3 = {
.name = "pll1_sysclk3",
.parent = &pll1_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV3,
};
static struct clk pll1_sysclk5 = {
.name = "pll1_sysclk5",
.parent = &pll1_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV5,
};
static struct clk pll1_aux_clk = {
.name = "pll1_aux_clk",
.parent = &pll1_clk,
.flags = CLK_PLL | PRE_PLL,
};
static struct clk pll1_sysclkbp = {
.name = "pll1_sysclkbp",
.parent = &pll1_clk,
.flags = CLK_PLL | PRE_PLL,
.div_reg = BPDIV
};
static struct clk pll2_clk = {
.name = "pll2",
.parent = &ref_clk,
.pll_data = &pll2_data,
.flags = CLK_PLL,
};
static struct clk pll2_sysclk1 = {
.name = "pll2_sysclk1",
.parent = &pll2_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV1,
};
static struct clk pll2_sysclk2 = {
.name = "pll2_sysclk2",
.parent = &pll2_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV2,
};
static struct clk pll2_sysclkbp = {
.name = "pll2_sysclkbp",
.parent = &pll2_clk,
.flags = CLK_PLL | PRE_PLL,
.div_reg = BPDIV
};
static struct clk dsp_clk = {
.name = "dsp",
.parent = &pll1_sysclk1,
.lpsc = DAVINCI_LPSC_GEM,
.domain = DAVINCI_GPSC_DSPDOMAIN,
.usecount = 1, /* REVISIT how to disable? */
};
static struct clk arm_clk = {
.name = "arm",
.parent = &pll1_sysclk2,
.lpsc = DAVINCI_LPSC_ARM,
.flags = ALWAYS_ENABLED,
};
static struct clk vicp_clk = {
.name = "vicp",
.parent = &pll1_sysclk2,
.lpsc = DAVINCI_LPSC_IMCOP,
.domain = DAVINCI_GPSC_DSPDOMAIN,
.usecount = 1, /* REVISIT how to disable? */
};
static struct clk vpss_master_clk = {
.name = "vpss_master",
.parent = &pll1_sysclk3,
.lpsc = DAVINCI_LPSC_VPSSMSTR,
.flags = CLK_PSC,
};
static struct clk vpss_slave_clk = {
.name = "vpss_slave",
.parent = &pll1_sysclk3,
.lpsc = DAVINCI_LPSC_VPSSSLV,
};
static struct clk uart0_clk = {
.name = "uart0",
.parent = &pll1_aux_clk,
.lpsc = DAVINCI_LPSC_UART0,
};
static struct clk uart1_clk = {
.name = "uart1",
.parent = &pll1_aux_clk,
.lpsc = DAVINCI_LPSC_UART1,
};
static struct clk uart2_clk = {
.name = "uart2",
.parent = &pll1_aux_clk,
.lpsc = DAVINCI_LPSC_UART2,
};
static struct clk emac_clk = {
.name = "emac",
.parent = &pll1_sysclk5,
.lpsc = DAVINCI_LPSC_EMAC_WRAPPER,
};
static struct clk i2c_clk = {
.name = "i2c",
.parent = &pll1_aux_clk,
.lpsc = DAVINCI_LPSC_I2C,
};
static struct clk ide_clk = {
.name = "ide",
.parent = &pll1_sysclk5,
.lpsc = DAVINCI_LPSC_ATA,
};
static struct clk asp_clk = {
.name = "asp0",
.parent = &pll1_sysclk5,
.lpsc = DAVINCI_LPSC_McBSP,
};
static struct clk mmcsd_clk = {
.name = "mmcsd",
.parent = &pll1_sysclk5,
.lpsc = DAVINCI_LPSC_MMC_SD,
};
static struct clk spi_clk = {
.name = "spi",
.parent = &pll1_sysclk5,
.lpsc = DAVINCI_LPSC_SPI,
};
static struct clk gpio_clk = {
.name = "gpio",
.parent = &pll1_sysclk5,
.lpsc = DAVINCI_LPSC_GPIO,
};
static struct clk usb_clk = {
.name = "usb",
.parent = &pll1_sysclk5,
.lpsc = DAVINCI_LPSC_USB,
};
static struct clk vlynq_clk = {
.name = "vlynq",
.parent = &pll1_sysclk5,
.lpsc = DAVINCI_LPSC_VLYNQ,
};
static struct clk aemif_clk = {
.name = "aemif",
.parent = &pll1_sysclk5,
.lpsc = DAVINCI_LPSC_AEMIF,
};
static struct clk pwm0_clk = {
.name = "pwm0",
.parent = &pll1_aux_clk,
.lpsc = DAVINCI_LPSC_PWM0,
};
static struct clk pwm1_clk = {
.name = "pwm1",
.parent = &pll1_aux_clk,
.lpsc = DAVINCI_LPSC_PWM1,
};
static struct clk pwm2_clk = {
.name = "pwm2",
.parent = &pll1_aux_clk,
.lpsc = DAVINCI_LPSC_PWM2,
};
static struct clk timer0_clk = {
.name = "timer0",
.parent = &pll1_aux_clk,
.lpsc = DAVINCI_LPSC_TIMER0,
};
static struct clk timer1_clk = {
.name = "timer1",
.parent = &pll1_aux_clk,
.lpsc = DAVINCI_LPSC_TIMER1,
};
static struct clk timer2_clk = {
.name = "timer2",
.parent = &pll1_aux_clk,
.lpsc = DAVINCI_LPSC_TIMER2,
.usecount = 1, /* REVISIT: why can't this be disabled? */
};
static struct clk_lookup dm644x_clks[] = {
CLK(NULL, "ref", &ref_clk),
CLK(NULL, "pll1", &pll1_clk),
CLK(NULL, "pll1_sysclk1", &pll1_sysclk1),
CLK(NULL, "pll1_sysclk2", &pll1_sysclk2),
CLK(NULL, "pll1_sysclk3", &pll1_sysclk3),
CLK(NULL, "pll1_sysclk5", &pll1_sysclk5),
CLK(NULL, "pll1_aux", &pll1_aux_clk),
CLK(NULL, "pll1_sysclkbp", &pll1_sysclkbp),
CLK(NULL, "pll2", &pll2_clk),
CLK(NULL, "pll2_sysclk1", &pll2_sysclk1),
CLK(NULL, "pll2_sysclk2", &pll2_sysclk2),
CLK(NULL, "pll2_sysclkbp", &pll2_sysclkbp),
CLK(NULL, "dsp", &dsp_clk),
CLK(NULL, "arm", &arm_clk),
CLK(NULL, "vicp", &vicp_clk),
CLK(NULL, "vpss_master", &vpss_master_clk),
CLK(NULL, "vpss_slave", &vpss_slave_clk),
CLK(NULL, "arm", &arm_clk),
CLK(NULL, "uart0", &uart0_clk),
CLK(NULL, "uart1", &uart1_clk),
CLK(NULL, "uart2", &uart2_clk),
CLK("davinci_emac.1", NULL, &emac_clk),
CLK("i2c_davinci.1", NULL, &i2c_clk),
CLK("palm_bk3710", NULL, &ide_clk),
CLK("davinci-mcbsp", NULL, &asp_clk),
CLK("davinci_mmc.0", NULL, &mmcsd_clk),
CLK(NULL, "spi", &spi_clk),
CLK(NULL, "gpio", &gpio_clk),
CLK(NULL, "usb", &usb_clk),
CLK(NULL, "vlynq", &vlynq_clk),
CLK(NULL, "aemif", &aemif_clk),
CLK(NULL, "pwm0", &pwm0_clk),
CLK(NULL, "pwm1", &pwm1_clk),
CLK(NULL, "pwm2", &pwm2_clk),
CLK(NULL, "timer0", &timer0_clk),
CLK(NULL, "timer1", &timer1_clk),
CLK("watchdog", NULL, &timer2_clk),
CLK(NULL, NULL, NULL),
};
static struct emac_platform_data dm644x_emac_pdata = {
.ctrl_reg_offset = DM644X_EMAC_CNTRL_OFFSET,
.ctrl_mod_reg_offset = DM644X_EMAC_CNTRL_MOD_OFFSET,
.ctrl_ram_offset = DM644X_EMAC_CNTRL_RAM_OFFSET,
.ctrl_ram_size = DM644X_EMAC_CNTRL_RAM_SIZE,
.version = EMAC_VERSION_1,
};
static struct resource dm644x_emac_resources[] = {
{
.start = DM644X_EMAC_BASE,
.end = DM644X_EMAC_BASE + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_EMACINT,
.end = IRQ_EMACINT,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dm644x_emac_device = {
.name = "davinci_emac",
.id = 1,
.dev = {
.platform_data = &dm644x_emac_pdata,
},
.num_resources = ARRAY_SIZE(dm644x_emac_resources),
.resource = dm644x_emac_resources,
};
static struct resource dm644x_mdio_resources[] = {
{
.start = DM644X_EMAC_MDIO_BASE,
.end = DM644X_EMAC_MDIO_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device dm644x_mdio_device = {
.name = "davinci_mdio",
.id = 0,
.num_resources = ARRAY_SIZE(dm644x_mdio_resources),
.resource = dm644x_mdio_resources,
};
/*
* Device specific mux setup
*
* soc description mux mode mode mux dbg
* reg offset mask mode
*/
static const struct mux_config dm644x_pins[] = {
#ifdef CONFIG_DAVINCI_MUX
MUX_CFG(DM644X, HDIREN, 0, 16, 1, 1, true)
MUX_CFG(DM644X, ATAEN, 0, 17, 1, 1, true)
MUX_CFG(DM644X, ATAEN_DISABLE, 0, 17, 1, 0, true)
MUX_CFG(DM644X, HPIEN_DISABLE, 0, 29, 1, 0, true)
MUX_CFG(DM644X, AEAW, 0, 0, 31, 31, true)
MUX_CFG(DM644X, AEAW0, 0, 0, 1, 0, true)
MUX_CFG(DM644X, AEAW1, 0, 1, 1, 0, true)
MUX_CFG(DM644X, AEAW2, 0, 2, 1, 0, true)
MUX_CFG(DM644X, AEAW3, 0, 3, 1, 0, true)
MUX_CFG(DM644X, AEAW4, 0, 4, 1, 0, true)
MUX_CFG(DM644X, MSTK, 1, 9, 1, 0, false)
MUX_CFG(DM644X, I2C, 1, 7, 1, 1, false)
MUX_CFG(DM644X, MCBSP, 1, 10, 1, 1, false)
MUX_CFG(DM644X, UART1, 1, 1, 1, 1, true)
MUX_CFG(DM644X, UART2, 1, 2, 1, 1, true)
MUX_CFG(DM644X, PWM0, 1, 4, 1, 1, false)
MUX_CFG(DM644X, PWM1, 1, 5, 1, 1, false)
MUX_CFG(DM644X, PWM2, 1, 6, 1, 1, false)
MUX_CFG(DM644X, VLYNQEN, 0, 15, 1, 1, false)
MUX_CFG(DM644X, VLSCREN, 0, 14, 1, 1, false)
MUX_CFG(DM644X, VLYNQWD, 0, 12, 3, 3, false)
MUX_CFG(DM644X, EMACEN, 0, 31, 1, 1, true)
MUX_CFG(DM644X, GPIO3V, 0, 31, 1, 0, true)
MUX_CFG(DM644X, GPIO0, 0, 24, 1, 0, true)
MUX_CFG(DM644X, GPIO3, 0, 25, 1, 0, false)
MUX_CFG(DM644X, GPIO43_44, 1, 7, 1, 0, false)
MUX_CFG(DM644X, GPIO46_47, 0, 22, 1, 0, true)
MUX_CFG(DM644X, RGB666, 0, 22, 1, 1, true)
MUX_CFG(DM644X, LOEEN, 0, 24, 1, 1, true)
MUX_CFG(DM644X, LFLDEN, 0, 25, 1, 1, false)
#endif
};
/* FIQ are pri 0-1; otherwise 2-7, with 7 lowest priority */
static u8 dm644x_default_priorities[DAVINCI_N_AINTC_IRQ] = {
[IRQ_VDINT0] = 2,
[IRQ_VDINT1] = 6,
[IRQ_VDINT2] = 6,
[IRQ_HISTINT] = 6,
[IRQ_H3AINT] = 6,
[IRQ_PRVUINT] = 6,
[IRQ_RSZINT] = 6,
[7] = 7,
[IRQ_VENCINT] = 6,
[IRQ_ASQINT] = 6,
[IRQ_IMXINT] = 6,
[IRQ_VLCDINT] = 6,
[IRQ_USBINT] = 4,
[IRQ_EMACINT] = 4,
[14] = 7,
[15] = 7,
[IRQ_CCINT0] = 5, /* dma */
[IRQ_CCERRINT] = 5, /* dma */
[IRQ_TCERRINT0] = 5, /* dma */
[IRQ_TCERRINT] = 5, /* dma */
[IRQ_PSCIN] = 7,
[21] = 7,
[IRQ_IDE] = 4,
[23] = 7,
[IRQ_MBXINT] = 7,
[IRQ_MBRINT] = 7,
[IRQ_MMCINT] = 7,
[IRQ_SDIOINT] = 7,
[28] = 7,
[IRQ_DDRINT] = 7,
[IRQ_AEMIFINT] = 7,
[IRQ_VLQINT] = 4,
[IRQ_TINT0_TINT12] = 2, /* clockevent */
[IRQ_TINT0_TINT34] = 2, /* clocksource */
[IRQ_TINT1_TINT12] = 7, /* DSP timer */
[IRQ_TINT1_TINT34] = 7, /* system tick */
[IRQ_PWMINT0] = 7,
[IRQ_PWMINT1] = 7,
[IRQ_PWMINT2] = 7,
[IRQ_I2C] = 3,
[IRQ_UARTINT0] = 3,
[IRQ_UARTINT1] = 3,
[IRQ_UARTINT2] = 3,
[IRQ_SPINT0] = 3,
[IRQ_SPINT1] = 3,
[45] = 7,
[IRQ_DSP2ARM0] = 4,
[IRQ_DSP2ARM1] = 4,
[IRQ_GPIO0] = 7,
[IRQ_GPIO1] = 7,
[IRQ_GPIO2] = 7,
[IRQ_GPIO3] = 7,
[IRQ_GPIO4] = 7,
[IRQ_GPIO5] = 7,
[IRQ_GPIO6] = 7,
[IRQ_GPIO7] = 7,
[IRQ_GPIOBNK0] = 7,
[IRQ_GPIOBNK1] = 7,
[IRQ_GPIOBNK2] = 7,
[IRQ_GPIOBNK3] = 7,
[IRQ_GPIOBNK4] = 7,
[IRQ_COMMTX] = 7,
[IRQ_COMMRX] = 7,
[IRQ_EMUINT] = 7,
};
/*----------------------------------------------------------------------*/
static const s8
queue_tc_mapping[][2] = {
/* {event queue no, TC no} */
{0, 0},
{1, 1},
{-1, -1},
};
static const s8
queue_priority_mapping[][2] = {
/* {event queue no, Priority} */
{0, 3},
{1, 7},
{-1, -1},
};
static struct edma_soc_info edma_cc0_info = {
.n_channel = 64,
.n_region = 4,
.n_slot = 128,
.n_tc = 2,
.n_cc = 1,
.queue_tc_mapping = queue_tc_mapping,
.queue_priority_mapping = queue_priority_mapping,
.default_queue = EVENTQ_1,
};
static struct edma_soc_info *dm644x_edma_info[EDMA_MAX_CC] = {
&edma_cc0_info,
};
static struct resource edma_resources[] = {
{
.name = "edma_cc0",
.start = 0x01c00000,
.end = 0x01c00000 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "edma_tc0",
.start = 0x01c10000,
.end = 0x01c10000 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "edma_tc1",
.start = 0x01c10400,
.end = 0x01c10400 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "edma0",
.start = IRQ_CCINT0,
.flags = IORESOURCE_IRQ,
},
{
.name = "edma0_err",
.start = IRQ_CCERRINT,
.flags = IORESOURCE_IRQ,
},
/* not using TC*_ERR */
};
static struct platform_device dm644x_edma_device = {
.name = "edma",
.id = 0,
.dev.platform_data = dm644x_edma_info,
.num_resources = ARRAY_SIZE(edma_resources),
.resource = edma_resources,
};
/* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */
static struct resource dm644x_asp_resources[] = {
{
.start = DAVINCI_ASP0_BASE,
.end = DAVINCI_ASP0_BASE + SZ_8K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = DAVINCI_DMA_ASP0_TX,
.end = DAVINCI_DMA_ASP0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = DAVINCI_DMA_ASP0_RX,
.end = DAVINCI_DMA_ASP0_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device dm644x_asp_device = {
.name = "davinci-mcbsp",
.id = -1,
.num_resources = ARRAY_SIZE(dm644x_asp_resources),
.resource = dm644x_asp_resources,
};
#define DM644X_VPSS_BASE 0x01c73400
static struct resource dm644x_vpss_resources[] = {
{
/* VPSS Base address */
.name = "vpss",
.start = DM644X_VPSS_BASE,
.end = DM644X_VPSS_BASE + 0xff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device dm644x_vpss_device = {
.name = "vpss",
.id = -1,
.dev.platform_data = "dm644x_vpss",
.num_resources = ARRAY_SIZE(dm644x_vpss_resources),
.resource = dm644x_vpss_resources,
};
static struct resource dm644x_vpfe_resources[] = {
{
.start = IRQ_VDINT0,
.end = IRQ_VDINT0,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_VDINT1,
.end = IRQ_VDINT1,
.flags = IORESOURCE_IRQ,
},
};
static u64 dm644x_video_dma_mask = DMA_BIT_MASK(32);
static struct resource dm644x_ccdc_resource[] = {
/* CCDC Base address */
{
.start = 0x01c70400,
.end = 0x01c70400 + 0xff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device dm644x_ccdc_dev = {
.name = "dm644x_ccdc",
.id = -1,
.num_resources = ARRAY_SIZE(dm644x_ccdc_resource),
.resource = dm644x_ccdc_resource,
.dev = {
.dma_mask = &dm644x_video_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
static struct platform_device dm644x_vpfe_dev = {
.name = CAPTURE_DRV_NAME,
.id = -1,
.num_resources = ARRAY_SIZE(dm644x_vpfe_resources),
.resource = dm644x_vpfe_resources,
.dev = {
.dma_mask = &dm644x_video_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
#define DM644X_OSD_BASE 0x01c72600
static struct resource dm644x_osd_resources[] = {
{
.start = DM644X_OSD_BASE,
.end = DM644X_OSD_BASE + 0x1ff,
.flags = IORESOURCE_MEM,
},
};
static struct osd_platform_data dm644x_osd_data = {
.vpbe_type = VPBE_VERSION_1,
};
static struct platform_device dm644x_osd_dev = {
.name = VPBE_OSD_SUBDEV_NAME,
.id = -1,
.num_resources = ARRAY_SIZE(dm644x_osd_resources),
.resource = dm644x_osd_resources,
.dev = {
.dma_mask = &dm644x_video_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &dm644x_osd_data,
},
};
#define DM644X_VENC_BASE 0x01c72400
static struct resource dm644x_venc_resources[] = {
{
.start = DM644X_VENC_BASE,
.end = DM644X_VENC_BASE + 0x17f,
.flags = IORESOURCE_MEM,
},
};
#define DM644X_VPSS_MUXSEL_PLL2_MODE BIT(0)
#define DM644X_VPSS_MUXSEL_VPBECLK_MODE BIT(1)
#define DM644X_VPSS_VENCLKEN BIT(3)
#define DM644X_VPSS_DACCLKEN BIT(4)
static int dm644x_venc_setup_clock(enum vpbe_enc_timings_type type,
unsigned int mode)
{
int ret = 0;
u32 v = DM644X_VPSS_VENCLKEN;
switch (type) {
case VPBE_ENC_STD:
v |= DM644X_VPSS_DACCLKEN;
writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL));
break;
case VPBE_ENC_DV_PRESET:
switch (mode) {
case V4L2_DV_480P59_94:
case V4L2_DV_576P50:
v |= DM644X_VPSS_MUXSEL_PLL2_MODE |
DM644X_VPSS_DACCLKEN;
writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL));
break;
case V4L2_DV_720P60:
case V4L2_DV_1080I60:
case V4L2_DV_1080P30:
/*
* For HD, use external clock source since
* HD requires higher clock rate
*/
v |= DM644X_VPSS_MUXSEL_VPBECLK_MODE;
writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL));
break;
default:
ret = -EINVAL;
break;
}
break;
default:
ret = -EINVAL;
}
return ret;
}
static struct resource dm644x_v4l2_disp_resources[] = {
{
.start = IRQ_VENCINT,
.end = IRQ_VENCINT,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device dm644x_vpbe_display = {
.name = "vpbe-v4l2",
.id = -1,
.num_resources = ARRAY_SIZE(dm644x_v4l2_disp_resources),
.resource = dm644x_v4l2_disp_resources,
.dev = {
.dma_mask = &dm644x_video_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
static struct venc_platform_data dm644x_venc_pdata = {
.venc_type = VPBE_VERSION_1,
.setup_clock = dm644x_venc_setup_clock,
};
static struct platform_device dm644x_venc_dev = {
.name = VPBE_VENC_SUBDEV_NAME,
.id = -1,
.num_resources = ARRAY_SIZE(dm644x_venc_resources),
.resource = dm644x_venc_resources,
.dev = {
.dma_mask = &dm644x_video_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &dm644x_venc_pdata,
},
};
static struct platform_device dm644x_vpbe_dev = {
.name = "vpbe_controller",
.id = -1,
.dev = {
.dma_mask = &dm644x_video_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
/*----------------------------------------------------------------------*/
static struct map_desc dm644x_io_desc[] = {
{
.virtual = IO_VIRT,
.pfn = __phys_to_pfn(IO_PHYS),
.length = IO_SIZE,
.type = MT_DEVICE
},
{
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00008000),
.length = SZ_16K,
.type = MT_MEMORY_NONCACHED,
},
};
/* Contents of JTAG ID register used to identify exact cpu type */
static struct davinci_id dm644x_ids[] = {
{
.variant = 0x0,
.part_no = 0xb700,
.manufacturer = 0x017,
.cpu_id = DAVINCI_CPU_ID_DM6446,
.name = "dm6446",
},
{
.variant = 0x1,
.part_no = 0xb700,
.manufacturer = 0x017,
.cpu_id = DAVINCI_CPU_ID_DM6446,
.name = "dm6446a",
},
};
static u32 dm644x_psc_bases[] = { DAVINCI_PWR_SLEEP_CNTRL_BASE };
/*
* T0_BOT: Timer 0, bottom: clockevent source for hrtimers
* T0_TOP: Timer 0, top : clocksource for generic timekeeping
* T1_BOT: Timer 1, bottom: (used by DSP in TI DSPLink code)
* T1_TOP: Timer 1, top : <unused>
*/
static struct davinci_timer_info dm644x_timer_info = {
.timers = davinci_timer_instance,
.clockevent_id = T0_BOT,
.clocksource_id = T0_TOP,
};
static struct plat_serial8250_port dm644x_serial_platform_data[] = {
{
.mapbase = DAVINCI_UART0_BASE,
.irq = IRQ_UARTINT0,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM,
.regshift = 2,
},
{
.mapbase = DAVINCI_UART1_BASE,
.irq = IRQ_UARTINT1,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM,
.regshift = 2,
},
{
.mapbase = DAVINCI_UART2_BASE,
.irq = IRQ_UARTINT2,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM,
.regshift = 2,
},
{
.flags = 0
},
};
static struct platform_device dm644x_serial_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = dm644x_serial_platform_data,
},
};
static struct davinci_soc_info davinci_soc_info_dm644x = {
.io_desc = dm644x_io_desc,
.io_desc_num = ARRAY_SIZE(dm644x_io_desc),
.jtag_id_reg = 0x01c40028,
.ids = dm644x_ids,
.ids_num = ARRAY_SIZE(dm644x_ids),
.cpu_clks = dm644x_clks,
.psc_bases = dm644x_psc_bases,
.psc_bases_num = ARRAY_SIZE(dm644x_psc_bases),
.pinmux_base = DAVINCI_SYSTEM_MODULE_BASE,
.pinmux_pins = dm644x_pins,
.pinmux_pins_num = ARRAY_SIZE(dm644x_pins),
.intc_base = DAVINCI_ARM_INTC_BASE,
.intc_type = DAVINCI_INTC_TYPE_AINTC,
.intc_irq_prios = dm644x_default_priorities,
.intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm644x_timer_info,
.gpio_type = GPIO_TYPE_DAVINCI,
.gpio_base = DAVINCI_GPIO_BASE,
.gpio_num = 71,
.gpio_irq = IRQ_GPIOBNK0,
.serial_dev = &dm644x_serial_device,
.emac_pdata = &dm644x_emac_pdata,
.sram_dma = 0x00008000,
.sram_len = SZ_16K,
};
void __init dm644x_init_asp(struct snd_platform_data *pdata)
{
davinci_cfg_reg(DM644X_MCBSP);
dm644x_asp_device.dev.platform_data = pdata;
platform_device_register(&dm644x_asp_device);
}
void __init dm644x_init(void)
{
davinci_common_init(&davinci_soc_info_dm644x);
davinci_map_sysmod();
}
int __init dm644x_init_video(struct vpfe_config *vpfe_cfg,
struct vpbe_config *vpbe_cfg)
{
if (vpfe_cfg || vpbe_cfg)
platform_device_register(&dm644x_vpss_device);
if (vpfe_cfg) {
dm644x_vpfe_dev.dev.platform_data = vpfe_cfg;
platform_device_register(&dm644x_ccdc_dev);
platform_device_register(&dm644x_vpfe_dev);
/* Add ccdc clock aliases */
clk_add_alias("master", dm644x_ccdc_dev.name,
"vpss_master", NULL);
clk_add_alias("slave", dm644x_ccdc_dev.name,
"vpss_slave", NULL);
}
if (vpbe_cfg) {
dm644x_vpbe_dev.dev.platform_data = vpbe_cfg;
platform_device_register(&dm644x_osd_dev);
platform_device_register(&dm644x_venc_dev);
platform_device_register(&dm644x_vpbe_dev);
platform_device_register(&dm644x_vpbe_display);
}
return 0;
}
static int __init dm644x_init_devices(void)
{
if (!cpu_is_davinci_dm644x())
return 0;
platform_device_register(&dm644x_edma_device);
platform_device_register(&dm644x_mdio_device);
platform_device_register(&dm644x_emac_device);
clk_add_alias(NULL, dev_name(&dm644x_mdio_device.dev),
NULL, &dm644x_emac_device.dev);
return 0;
}
postcore_initcall(dm644x_init_devices);
| gpl-2.0 |
ch33kybutt/kernel_skipjack_tuna | drivers/media/dvb/mantis/mantis_dvb.c | 6400 | 7701 | /*
Mantis PCI bridge driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/i2c.h>
#include "dmxdev.h"
#include "dvbdev.h"
#include "dvb_demux.h"
#include "dvb_frontend.h"
#include "dvb_net.h"
#include "mantis_common.h"
#include "mantis_dma.h"
#include "mantis_ca.h"
#include "mantis_ioc.h"
#include "mantis_dvb.h"
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
int mantis_frontend_power(struct mantis_pci *mantis, enum mantis_power power)
{
struct mantis_hwconfig *config = mantis->hwconfig;
switch (power) {
case POWER_ON:
dprintk(MANTIS_DEBUG, 1, "Power ON");
mantis_gpio_set_bits(mantis, config->power, POWER_ON);
msleep(100);
mantis_gpio_set_bits(mantis, config->power, POWER_ON);
msleep(100);
break;
case POWER_OFF:
dprintk(MANTIS_DEBUG, 1, "Power OFF");
mantis_gpio_set_bits(mantis, config->power, POWER_OFF);
msleep(100);
break;
default:
dprintk(MANTIS_DEBUG, 1, "Unknown state <%02x>", power);
return -1;
}
return 0;
}
EXPORT_SYMBOL_GPL(mantis_frontend_power);
void mantis_frontend_soft_reset(struct mantis_pci *mantis)
{
struct mantis_hwconfig *config = mantis->hwconfig;
dprintk(MANTIS_DEBUG, 1, "Frontend RESET");
mantis_gpio_set_bits(mantis, config->reset, 0);
msleep(100);
mantis_gpio_set_bits(mantis, config->reset, 0);
msleep(100);
mantis_gpio_set_bits(mantis, config->reset, 1);
msleep(100);
mantis_gpio_set_bits(mantis, config->reset, 1);
msleep(100);
return;
}
EXPORT_SYMBOL_GPL(mantis_frontend_soft_reset);
static int mantis_frontend_shutdown(struct mantis_pci *mantis)
{
int err;
mantis_frontend_soft_reset(mantis);
err = mantis_frontend_power(mantis, POWER_OFF);
if (err != 0) {
dprintk(MANTIS_ERROR, 1, "Frontend POWER OFF failed! <%d>", err);
return 1;
}
return 0;
}
static int mantis_dvb_start_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
struct mantis_pci *mantis = dvbdmx->priv;
dprintk(MANTIS_DEBUG, 1, "Mantis DVB Start feed");
if (!dvbdmx->dmx.frontend) {
dprintk(MANTIS_DEBUG, 1, "no frontend ?");
return -EINVAL;
}
mantis->feeds++;
dprintk(MANTIS_DEBUG, 1, "mantis start feed, feeds=%d", mantis->feeds);
if (mantis->feeds == 1) {
dprintk(MANTIS_DEBUG, 1, "mantis start feed & dma");
mantis_dma_start(mantis);
tasklet_enable(&mantis->tasklet);
}
return mantis->feeds;
}
static int mantis_dvb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
struct mantis_pci *mantis = dvbdmx->priv;
dprintk(MANTIS_DEBUG, 1, "Mantis DVB Stop feed");
if (!dvbdmx->dmx.frontend) {
dprintk(MANTIS_DEBUG, 1, "no frontend ?");
return -EINVAL;
}
mantis->feeds--;
if (mantis->feeds == 0) {
dprintk(MANTIS_DEBUG, 1, "mantis stop feed and dma");
tasklet_disable(&mantis->tasklet);
mantis_dma_stop(mantis);
}
return 0;
}
int __devinit mantis_dvb_init(struct mantis_pci *mantis)
{
struct mantis_hwconfig *config = mantis->hwconfig;
int result = -1;
dprintk(MANTIS_DEBUG, 1, "dvb_register_adapter");
result = dvb_register_adapter(&mantis->dvb_adapter,
"Mantis DVB adapter",
THIS_MODULE,
&mantis->pdev->dev,
adapter_nr);
if (result < 0) {
dprintk(MANTIS_ERROR, 1, "Error registering adapter");
return -ENODEV;
}
mantis->dvb_adapter.priv = mantis;
mantis->demux.dmx.capabilities = DMX_TS_FILTERING |
DMX_SECTION_FILTERING |
DMX_MEMORY_BASED_FILTERING;
mantis->demux.priv = mantis;
mantis->demux.filternum = 256;
mantis->demux.feednum = 256;
mantis->demux.start_feed = mantis_dvb_start_feed;
mantis->demux.stop_feed = mantis_dvb_stop_feed;
mantis->demux.write_to_decoder = NULL;
dprintk(MANTIS_DEBUG, 1, "dvb_dmx_init");
result = dvb_dmx_init(&mantis->demux);
if (result < 0) {
dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
goto err0;
}
mantis->dmxdev.filternum = 256;
mantis->dmxdev.demux = &mantis->demux.dmx;
mantis->dmxdev.capabilities = 0;
dprintk(MANTIS_DEBUG, 1, "dvb_dmxdev_init");
result = dvb_dmxdev_init(&mantis->dmxdev, &mantis->dvb_adapter);
if (result < 0) {
dprintk(MANTIS_ERROR, 1, "dvb_dmxdev_init failed, ERROR=%d", result);
goto err1;
}
mantis->fe_hw.source = DMX_FRONTEND_0;
result = mantis->demux.dmx.add_frontend(&mantis->demux.dmx, &mantis->fe_hw);
if (result < 0) {
dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
goto err2;
}
mantis->fe_mem.source = DMX_MEMORY_FE;
result = mantis->demux.dmx.add_frontend(&mantis->demux.dmx, &mantis->fe_mem);
if (result < 0) {
dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
goto err3;
}
result = mantis->demux.dmx.connect_frontend(&mantis->demux.dmx, &mantis->fe_hw);
if (result < 0) {
dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
goto err4;
}
dvb_net_init(&mantis->dvb_adapter, &mantis->dvbnet, &mantis->demux.dmx);
tasklet_init(&mantis->tasklet, mantis_dma_xfer, (unsigned long) mantis);
tasklet_disable(&mantis->tasklet);
if (mantis->hwconfig) {
result = config->frontend_init(mantis, mantis->fe);
if (result < 0) {
dprintk(MANTIS_ERROR, 1, "!!! NO Frontends found !!!");
goto err5;
} else {
if (mantis->fe == NULL) {
dprintk(MANTIS_ERROR, 1, "FE <NULL>");
goto err5;
}
if (dvb_register_frontend(&mantis->dvb_adapter, mantis->fe)) {
dprintk(MANTIS_ERROR, 1, "ERROR: Frontend registration failed");
if (mantis->fe->ops.release)
mantis->fe->ops.release(mantis->fe);
mantis->fe = NULL;
goto err5;
}
}
}
return 0;
/* Error conditions .. */
err5:
tasklet_kill(&mantis->tasklet);
dvb_net_release(&mantis->dvbnet);
dvb_unregister_frontend(mantis->fe);
dvb_frontend_detach(mantis->fe);
err4:
mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_mem);
err3:
mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_hw);
err2:
dvb_dmxdev_release(&mantis->dmxdev);
err1:
dvb_dmx_release(&mantis->demux);
err0:
dvb_unregister_adapter(&mantis->dvb_adapter);
return result;
}
EXPORT_SYMBOL_GPL(mantis_dvb_init);
int __devexit mantis_dvb_exit(struct mantis_pci *mantis)
{
int err;
if (mantis->fe) {
/* mantis_ca_exit(mantis); */
err = mantis_frontend_shutdown(mantis);
if (err != 0)
dprintk(MANTIS_ERROR, 1, "Frontend exit while POWER ON! <%d>", err);
dvb_unregister_frontend(mantis->fe);
dvb_frontend_detach(mantis->fe);
}
tasklet_kill(&mantis->tasklet);
dvb_net_release(&mantis->dvbnet);
mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_mem);
mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_hw);
dvb_dmxdev_release(&mantis->dmxdev);
dvb_dmx_release(&mantis->demux);
dprintk(MANTIS_DEBUG, 1, "dvb_unregister_adapter");
dvb_unregister_adapter(&mantis->dvb_adapter);
return 0;
}
EXPORT_SYMBOL_GPL(mantis_dvb_exit);
| gpl-2.0 |
srfarias/srfarias-kernel | net/rds/cong.c | 8448 | 12226 | /*
* Copyright (c) 2007 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/rbtree.h>
#include <linux/bitops.h>
#include <linux/export.h>
#include "rds.h"
/*
* This file implements the receive side of the unconventional congestion
* management in RDS.
*
* Messages waiting in the receive queue on the receiving socket are accounted
* against the sockets SO_RCVBUF option value. Only the payload bytes in the
* message are accounted for. If the number of bytes queued equals or exceeds
* rcvbuf then the socket is congested. All sends attempted to this socket's
* address should return block or return -EWOULDBLOCK.
*
* Applications are expected to be reasonably tuned such that this situation
* very rarely occurs. An application encountering this "back-pressure" is
* considered a bug.
*
* This is implemented by having each node maintain bitmaps which indicate
* which ports on bound addresses are congested. As the bitmap changes it is
* sent through all the connections which terminate in the local address of the
* bitmap which changed.
*
* The bitmaps are allocated as connections are brought up. This avoids
* allocation in the interrupt handling path which queues messages on sockets.
* The dense bitmaps let transports send the entire bitmap on any bitmap change
* reasonably efficiently. This is much easier to implement than some
* finer-grained communication of per-port congestion. The sender does a very
* inexpensive bit test to test if the port it's about to send to is congested
* or not.
*/
/*
* Interaction with poll is a tad tricky. We want all processes stuck in
* poll to wake up and check whether a congested destination became uncongested.
* The really sad thing is we have no idea which destinations the application
* wants to send to - we don't even know which rds_connections are involved.
* So until we implement a more flexible rds poll interface, we have to make
* do with this:
* We maintain a global counter that is incremented each time a congestion map
* update is received. Each rds socket tracks this value, and if rds_poll
* finds that the saved generation number is smaller than the global generation
* number, it wakes up the process.
*/
static atomic_t rds_cong_generation = ATOMIC_INIT(0);
/*
* Congestion monitoring
*/
static LIST_HEAD(rds_cong_monitor);
static DEFINE_RWLOCK(rds_cong_monitor_lock);
/*
* Yes, a global lock. It's used so infrequently that it's worth keeping it
* global to simplify the locking. It's only used in the following
* circumstances:
*
* - on connection buildup to associate a conn with its maps
* - on map changes to inform conns of a new map to send
*
* It's sadly ordered under the socket callback lock and the connection lock.
* Receive paths can mark ports congested from interrupt context so the
* lock masks interrupts.
*/
static DEFINE_SPINLOCK(rds_cong_lock);
static struct rb_root rds_cong_tree = RB_ROOT;
static struct rds_cong_map *rds_cong_tree_walk(__be32 addr,
struct rds_cong_map *insert)
{
struct rb_node **p = &rds_cong_tree.rb_node;
struct rb_node *parent = NULL;
struct rds_cong_map *map;
while (*p) {
parent = *p;
map = rb_entry(parent, struct rds_cong_map, m_rb_node);
if (addr < map->m_addr)
p = &(*p)->rb_left;
else if (addr > map->m_addr)
p = &(*p)->rb_right;
else
return map;
}
if (insert) {
rb_link_node(&insert->m_rb_node, parent, p);
rb_insert_color(&insert->m_rb_node, &rds_cong_tree);
}
return NULL;
}
/*
* There is only ever one bitmap for any address. Connections try and allocate
* these bitmaps in the process getting pointers to them. The bitmaps are only
* ever freed as the module is removed after all connections have been freed.
*/
static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
{
struct rds_cong_map *map;
struct rds_cong_map *ret = NULL;
unsigned long zp;
unsigned long i;
unsigned long flags;
map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
if (!map)
return NULL;
map->m_addr = addr;
init_waitqueue_head(&map->m_waitq);
INIT_LIST_HEAD(&map->m_conn_list);
for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
zp = get_zeroed_page(GFP_KERNEL);
if (zp == 0)
goto out;
map->m_page_addrs[i] = zp;
}
spin_lock_irqsave(&rds_cong_lock, flags);
ret = rds_cong_tree_walk(addr, map);
spin_unlock_irqrestore(&rds_cong_lock, flags);
if (!ret) {
ret = map;
map = NULL;
}
out:
if (map) {
for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
free_page(map->m_page_addrs[i]);
kfree(map);
}
rdsdebug("map %p for addr %x\n", ret, be32_to_cpu(addr));
return ret;
}
/*
* Put the conn on its local map's list. This is called when the conn is
* really added to the hash. It's nested under the rds_conn_lock, sadly.
*/
void rds_cong_add_conn(struct rds_connection *conn)
{
unsigned long flags;
rdsdebug("conn %p now on map %p\n", conn, conn->c_lcong);
spin_lock_irqsave(&rds_cong_lock, flags);
list_add_tail(&conn->c_map_item, &conn->c_lcong->m_conn_list);
spin_unlock_irqrestore(&rds_cong_lock, flags);
}
void rds_cong_remove_conn(struct rds_connection *conn)
{
unsigned long flags;
rdsdebug("removing conn %p from map %p\n", conn, conn->c_lcong);
spin_lock_irqsave(&rds_cong_lock, flags);
list_del_init(&conn->c_map_item);
spin_unlock_irqrestore(&rds_cong_lock, flags);
}
int rds_cong_get_maps(struct rds_connection *conn)
{
conn->c_lcong = rds_cong_from_addr(conn->c_laddr);
conn->c_fcong = rds_cong_from_addr(conn->c_faddr);
if (!(conn->c_lcong && conn->c_fcong))
return -ENOMEM;
return 0;
}
void rds_cong_queue_updates(struct rds_cong_map *map)
{
struct rds_connection *conn;
unsigned long flags;
spin_lock_irqsave(&rds_cong_lock, flags);
list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
if (!test_and_set_bit(0, &conn->c_map_queued)) {
rds_stats_inc(s_cong_update_queued);
rds_send_xmit(conn);
}
}
spin_unlock_irqrestore(&rds_cong_lock, flags);
}
void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
{
rdsdebug("waking map %p for %pI4\n",
map, &map->m_addr);
rds_stats_inc(s_cong_update_received);
atomic_inc(&rds_cong_generation);
if (waitqueue_active(&map->m_waitq))
wake_up(&map->m_waitq);
if (waitqueue_active(&rds_poll_waitq))
wake_up_all(&rds_poll_waitq);
if (portmask && !list_empty(&rds_cong_monitor)) {
unsigned long flags;
struct rds_sock *rs;
read_lock_irqsave(&rds_cong_monitor_lock, flags);
list_for_each_entry(rs, &rds_cong_monitor, rs_cong_list) {
spin_lock(&rs->rs_lock);
rs->rs_cong_notify |= (rs->rs_cong_mask & portmask);
rs->rs_cong_mask &= ~portmask;
spin_unlock(&rs->rs_lock);
if (rs->rs_cong_notify)
rds_wake_sk_sleep(rs);
}
read_unlock_irqrestore(&rds_cong_monitor_lock, flags);
}
}
EXPORT_SYMBOL_GPL(rds_cong_map_updated);
int rds_cong_updated_since(unsigned long *recent)
{
unsigned long gen = atomic_read(&rds_cong_generation);
if (likely(*recent == gen))
return 0;
*recent = gen;
return 1;
}
/*
* We're called under the locking that protects the sockets receive buffer
* consumption. This makes it a lot easier for the caller to only call us
* when it knows that an existing set bit needs to be cleared, and vice versa.
* We can't block and we need to deal with concurrent sockets working against
* the same per-address map.
*/
void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
{
unsigned long i;
unsigned long off;
rdsdebug("setting congestion for %pI4:%u in map %p\n",
&map->m_addr, ntohs(port), map);
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
__set_bit_le(off, (void *)map->m_page_addrs[i]);
}
void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
{
unsigned long i;
unsigned long off;
rdsdebug("clearing congestion for %pI4:%u in map %p\n",
&map->m_addr, ntohs(port), map);
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
__clear_bit_le(off, (void *)map->m_page_addrs[i]);
}
static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
{
unsigned long i;
unsigned long off;
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
return test_bit_le(off, (void *)map->m_page_addrs[i]);
}
void rds_cong_add_socket(struct rds_sock *rs)
{
unsigned long flags;
write_lock_irqsave(&rds_cong_monitor_lock, flags);
if (list_empty(&rs->rs_cong_list))
list_add(&rs->rs_cong_list, &rds_cong_monitor);
write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
}
void rds_cong_remove_socket(struct rds_sock *rs)
{
unsigned long flags;
struct rds_cong_map *map;
write_lock_irqsave(&rds_cong_monitor_lock, flags);
list_del_init(&rs->rs_cong_list);
write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
/* update congestion map for now-closed port */
spin_lock_irqsave(&rds_cong_lock, flags);
map = rds_cong_tree_walk(rs->rs_bound_addr, NULL);
spin_unlock_irqrestore(&rds_cong_lock, flags);
if (map && rds_cong_test_bit(map, rs->rs_bound_port)) {
rds_cong_clear_bit(map, rs->rs_bound_port);
rds_cong_queue_updates(map);
}
}
int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock,
struct rds_sock *rs)
{
if (!rds_cong_test_bit(map, port))
return 0;
if (nonblock) {
if (rs && rs->rs_cong_monitor) {
unsigned long flags;
/* It would have been nice to have an atomic set_bit on
* a uint64_t. */
spin_lock_irqsave(&rs->rs_lock, flags);
rs->rs_cong_mask |= RDS_CONG_MONITOR_MASK(ntohs(port));
spin_unlock_irqrestore(&rs->rs_lock, flags);
/* Test again - a congestion update may have arrived in
* the meantime. */
if (!rds_cong_test_bit(map, port))
return 0;
}
rds_stats_inc(s_cong_send_error);
return -ENOBUFS;
}
rds_stats_inc(s_cong_send_blocked);
rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port));
return wait_event_interruptible(map->m_waitq,
!rds_cong_test_bit(map, port));
}
void rds_cong_exit(void)
{
struct rb_node *node;
struct rds_cong_map *map;
unsigned long i;
while ((node = rb_first(&rds_cong_tree))) {
map = rb_entry(node, struct rds_cong_map, m_rb_node);
rdsdebug("freeing map %p\n", map);
rb_erase(&map->m_rb_node, &rds_cong_tree);
for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
free_page(map->m_page_addrs[i]);
kfree(map);
}
}
/*
* Allocate a RDS message containing a congestion update.
*/
struct rds_message *rds_cong_update_alloc(struct rds_connection *conn)
{
struct rds_cong_map *map = conn->c_lcong;
struct rds_message *rm;
rm = rds_message_map_pages(map->m_page_addrs, RDS_CONG_MAP_BYTES);
if (!IS_ERR(rm))
rm->m_inc.i_hdr.h_flags = RDS_FLAG_CONG_BITMAP;
return rm;
}
| gpl-2.0 |
Team-Hydra/S5-AEL-Kernel | net/rds/cong.c | 8448 | 12226 | /*
* Copyright (c) 2007 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/rbtree.h>
#include <linux/bitops.h>
#include <linux/export.h>
#include "rds.h"
/*
* This file implements the receive side of the unconventional congestion
* management in RDS.
*
* Messages waiting in the receive queue on the receiving socket are accounted
* against the sockets SO_RCVBUF option value. Only the payload bytes in the
* message are accounted for. If the number of bytes queued equals or exceeds
* rcvbuf then the socket is congested. All sends attempted to this socket's
* address should return block or return -EWOULDBLOCK.
*
* Applications are expected to be reasonably tuned such that this situation
* very rarely occurs. An application encountering this "back-pressure" is
* considered a bug.
*
* This is implemented by having each node maintain bitmaps which indicate
* which ports on bound addresses are congested. As the bitmap changes it is
* sent through all the connections which terminate in the local address of the
* bitmap which changed.
*
* The bitmaps are allocated as connections are brought up. This avoids
* allocation in the interrupt handling path which queues messages on sockets.
* The dense bitmaps let transports send the entire bitmap on any bitmap change
* reasonably efficiently. This is much easier to implement than some
* finer-grained communication of per-port congestion. The sender does a very
* inexpensive bit test to test if the port it's about to send to is congested
* or not.
*/
/*
* Interaction with poll is a tad tricky. We want all processes stuck in
* poll to wake up and check whether a congested destination became uncongested.
* The really sad thing is we have no idea which destinations the application
* wants to send to - we don't even know which rds_connections are involved.
* So until we implement a more flexible rds poll interface, we have to make
* do with this:
* We maintain a global counter that is incremented each time a congestion map
* update is received. Each rds socket tracks this value, and if rds_poll
* finds that the saved generation number is smaller than the global generation
* number, it wakes up the process.
*/
static atomic_t rds_cong_generation = ATOMIC_INIT(0);
/*
* Congestion monitoring
*/
static LIST_HEAD(rds_cong_monitor);
static DEFINE_RWLOCK(rds_cong_monitor_lock);
/*
* Yes, a global lock. It's used so infrequently that it's worth keeping it
* global to simplify the locking. It's only used in the following
* circumstances:
*
* - on connection buildup to associate a conn with its maps
* - on map changes to inform conns of a new map to send
*
* It's sadly ordered under the socket callback lock and the connection lock.
* Receive paths can mark ports congested from interrupt context so the
* lock masks interrupts.
*/
static DEFINE_SPINLOCK(rds_cong_lock);
static struct rb_root rds_cong_tree = RB_ROOT;
static struct rds_cong_map *rds_cong_tree_walk(__be32 addr,
struct rds_cong_map *insert)
{
struct rb_node **p = &rds_cong_tree.rb_node;
struct rb_node *parent = NULL;
struct rds_cong_map *map;
while (*p) {
parent = *p;
map = rb_entry(parent, struct rds_cong_map, m_rb_node);
if (addr < map->m_addr)
p = &(*p)->rb_left;
else if (addr > map->m_addr)
p = &(*p)->rb_right;
else
return map;
}
if (insert) {
rb_link_node(&insert->m_rb_node, parent, p);
rb_insert_color(&insert->m_rb_node, &rds_cong_tree);
}
return NULL;
}
/*
* There is only ever one bitmap for any address. Connections try and allocate
* these bitmaps in the process getting pointers to them. The bitmaps are only
* ever freed as the module is removed after all connections have been freed.
*/
static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
{
struct rds_cong_map *map;
struct rds_cong_map *ret = NULL;
unsigned long zp;
unsigned long i;
unsigned long flags;
map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
if (!map)
return NULL;
map->m_addr = addr;
init_waitqueue_head(&map->m_waitq);
INIT_LIST_HEAD(&map->m_conn_list);
for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
zp = get_zeroed_page(GFP_KERNEL);
if (zp == 0)
goto out;
map->m_page_addrs[i] = zp;
}
spin_lock_irqsave(&rds_cong_lock, flags);
ret = rds_cong_tree_walk(addr, map);
spin_unlock_irqrestore(&rds_cong_lock, flags);
if (!ret) {
ret = map;
map = NULL;
}
out:
if (map) {
for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
free_page(map->m_page_addrs[i]);
kfree(map);
}
rdsdebug("map %p for addr %x\n", ret, be32_to_cpu(addr));
return ret;
}
/*
* Put the conn on its local map's list. This is called when the conn is
* really added to the hash. It's nested under the rds_conn_lock, sadly.
*/
void rds_cong_add_conn(struct rds_connection *conn)
{
unsigned long flags;
rdsdebug("conn %p now on map %p\n", conn, conn->c_lcong);
spin_lock_irqsave(&rds_cong_lock, flags);
list_add_tail(&conn->c_map_item, &conn->c_lcong->m_conn_list);
spin_unlock_irqrestore(&rds_cong_lock, flags);
}
void rds_cong_remove_conn(struct rds_connection *conn)
{
unsigned long flags;
rdsdebug("removing conn %p from map %p\n", conn, conn->c_lcong);
spin_lock_irqsave(&rds_cong_lock, flags);
list_del_init(&conn->c_map_item);
spin_unlock_irqrestore(&rds_cong_lock, flags);
}
int rds_cong_get_maps(struct rds_connection *conn)
{
conn->c_lcong = rds_cong_from_addr(conn->c_laddr);
conn->c_fcong = rds_cong_from_addr(conn->c_faddr);
if (!(conn->c_lcong && conn->c_fcong))
return -ENOMEM;
return 0;
}
void rds_cong_queue_updates(struct rds_cong_map *map)
{
struct rds_connection *conn;
unsigned long flags;
spin_lock_irqsave(&rds_cong_lock, flags);
list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
if (!test_and_set_bit(0, &conn->c_map_queued)) {
rds_stats_inc(s_cong_update_queued);
rds_send_xmit(conn);
}
}
spin_unlock_irqrestore(&rds_cong_lock, flags);
}
void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
{
rdsdebug("waking map %p for %pI4\n",
map, &map->m_addr);
rds_stats_inc(s_cong_update_received);
atomic_inc(&rds_cong_generation);
if (waitqueue_active(&map->m_waitq))
wake_up(&map->m_waitq);
if (waitqueue_active(&rds_poll_waitq))
wake_up_all(&rds_poll_waitq);
if (portmask && !list_empty(&rds_cong_monitor)) {
unsigned long flags;
struct rds_sock *rs;
read_lock_irqsave(&rds_cong_monitor_lock, flags);
list_for_each_entry(rs, &rds_cong_monitor, rs_cong_list) {
spin_lock(&rs->rs_lock);
rs->rs_cong_notify |= (rs->rs_cong_mask & portmask);
rs->rs_cong_mask &= ~portmask;
spin_unlock(&rs->rs_lock);
if (rs->rs_cong_notify)
rds_wake_sk_sleep(rs);
}
read_unlock_irqrestore(&rds_cong_monitor_lock, flags);
}
}
EXPORT_SYMBOL_GPL(rds_cong_map_updated);
int rds_cong_updated_since(unsigned long *recent)
{
unsigned long gen = atomic_read(&rds_cong_generation);
if (likely(*recent == gen))
return 0;
*recent = gen;
return 1;
}
/*
* We're called under the locking that protects the sockets receive buffer
* consumption. This makes it a lot easier for the caller to only call us
* when it knows that an existing set bit needs to be cleared, and vice versa.
* We can't block and we need to deal with concurrent sockets working against
* the same per-address map.
*/
void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
{
unsigned long i;
unsigned long off;
rdsdebug("setting congestion for %pI4:%u in map %p\n",
&map->m_addr, ntohs(port), map);
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
__set_bit_le(off, (void *)map->m_page_addrs[i]);
}
void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
{
unsigned long i;
unsigned long off;
rdsdebug("clearing congestion for %pI4:%u in map %p\n",
&map->m_addr, ntohs(port), map);
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
__clear_bit_le(off, (void *)map->m_page_addrs[i]);
}
static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
{
unsigned long i;
unsigned long off;
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
return test_bit_le(off, (void *)map->m_page_addrs[i]);
}
void rds_cong_add_socket(struct rds_sock *rs)
{
unsigned long flags;
write_lock_irqsave(&rds_cong_monitor_lock, flags);
if (list_empty(&rs->rs_cong_list))
list_add(&rs->rs_cong_list, &rds_cong_monitor);
write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
}
void rds_cong_remove_socket(struct rds_sock *rs)
{
unsigned long flags;
struct rds_cong_map *map;
write_lock_irqsave(&rds_cong_monitor_lock, flags);
list_del_init(&rs->rs_cong_list);
write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
/* update congestion map for now-closed port */
spin_lock_irqsave(&rds_cong_lock, flags);
map = rds_cong_tree_walk(rs->rs_bound_addr, NULL);
spin_unlock_irqrestore(&rds_cong_lock, flags);
if (map && rds_cong_test_bit(map, rs->rs_bound_port)) {
rds_cong_clear_bit(map, rs->rs_bound_port);
rds_cong_queue_updates(map);
}
}
int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock,
struct rds_sock *rs)
{
if (!rds_cong_test_bit(map, port))
return 0;
if (nonblock) {
if (rs && rs->rs_cong_monitor) {
unsigned long flags;
/* It would have been nice to have an atomic set_bit on
* a uint64_t. */
spin_lock_irqsave(&rs->rs_lock, flags);
rs->rs_cong_mask |= RDS_CONG_MONITOR_MASK(ntohs(port));
spin_unlock_irqrestore(&rs->rs_lock, flags);
/* Test again - a congestion update may have arrived in
* the meantime. */
if (!rds_cong_test_bit(map, port))
return 0;
}
rds_stats_inc(s_cong_send_error);
return -ENOBUFS;
}
rds_stats_inc(s_cong_send_blocked);
rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port));
return wait_event_interruptible(map->m_waitq,
!rds_cong_test_bit(map, port));
}
void rds_cong_exit(void)
{
struct rb_node *node;
struct rds_cong_map *map;
unsigned long i;
while ((node = rb_first(&rds_cong_tree))) {
map = rb_entry(node, struct rds_cong_map, m_rb_node);
rdsdebug("freeing map %p\n", map);
rb_erase(&map->m_rb_node, &rds_cong_tree);
for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
free_page(map->m_page_addrs[i]);
kfree(map);
}
}
/*
* Allocate a RDS message containing a congestion update.
*/
struct rds_message *rds_cong_update_alloc(struct rds_connection *conn)
{
struct rds_cong_map *map = conn->c_lcong;
struct rds_message *rm;
rm = rds_message_map_pages(map->m_page_addrs, RDS_CONG_MAP_BYTES);
if (!IS_ERR(rm))
rm->m_inc.i_hdr.h_flags = RDS_FLAG_CONG_BITMAP;
return rm;
}
| gpl-2.0 |
PyYoshi/android_kernel_kyocera_l03 | net/rds/cong.c | 8448 | 12226 | /*
* Copyright (c) 2007 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/rbtree.h>
#include <linux/bitops.h>
#include <linux/export.h>
#include "rds.h"
/*
* This file implements the receive side of the unconventional congestion
* management in RDS.
*
* Messages waiting in the receive queue on the receiving socket are accounted
* against the sockets SO_RCVBUF option value. Only the payload bytes in the
* message are accounted for. If the number of bytes queued equals or exceeds
* rcvbuf then the socket is congested. All sends attempted to this socket's
* address should return block or return -EWOULDBLOCK.
*
* Applications are expected to be reasonably tuned such that this situation
* very rarely occurs. An application encountering this "back-pressure" is
* considered a bug.
*
* This is implemented by having each node maintain bitmaps which indicate
* which ports on bound addresses are congested. As the bitmap changes it is
* sent through all the connections which terminate in the local address of the
* bitmap which changed.
*
* The bitmaps are allocated as connections are brought up. This avoids
* allocation in the interrupt handling path which queues messages on sockets.
* The dense bitmaps let transports send the entire bitmap on any bitmap change
* reasonably efficiently. This is much easier to implement than some
* finer-grained communication of per-port congestion. The sender does a very
* inexpensive bit test to test if the port it's about to send to is congested
* or not.
*/
/*
* Interaction with poll is a tad tricky. We want all processes stuck in
* poll to wake up and check whether a congested destination became uncongested.
* The really sad thing is we have no idea which destinations the application
* wants to send to - we don't even know which rds_connections are involved.
* So until we implement a more flexible rds poll interface, we have to make
* do with this:
* We maintain a global counter that is incremented each time a congestion map
* update is received. Each rds socket tracks this value, and if rds_poll
* finds that the saved generation number is smaller than the global generation
* number, it wakes up the process.
*/
static atomic_t rds_cong_generation = ATOMIC_INIT(0);
/*
* Congestion monitoring
*/
static LIST_HEAD(rds_cong_monitor);
static DEFINE_RWLOCK(rds_cong_monitor_lock);
/*
* Yes, a global lock. It's used so infrequently that it's worth keeping it
* global to simplify the locking. It's only used in the following
* circumstances:
*
* - on connection buildup to associate a conn with its maps
* - on map changes to inform conns of a new map to send
*
* It's sadly ordered under the socket callback lock and the connection lock.
* Receive paths can mark ports congested from interrupt context so the
* lock masks interrupts.
*/
static DEFINE_SPINLOCK(rds_cong_lock);
static struct rb_root rds_cong_tree = RB_ROOT;
static struct rds_cong_map *rds_cong_tree_walk(__be32 addr,
struct rds_cong_map *insert)
{
struct rb_node **p = &rds_cong_tree.rb_node;
struct rb_node *parent = NULL;
struct rds_cong_map *map;
while (*p) {
parent = *p;
map = rb_entry(parent, struct rds_cong_map, m_rb_node);
if (addr < map->m_addr)
p = &(*p)->rb_left;
else if (addr > map->m_addr)
p = &(*p)->rb_right;
else
return map;
}
if (insert) {
rb_link_node(&insert->m_rb_node, parent, p);
rb_insert_color(&insert->m_rb_node, &rds_cong_tree);
}
return NULL;
}
/*
* There is only ever one bitmap for any address. Connections try and allocate
* these bitmaps in the process getting pointers to them. The bitmaps are only
* ever freed as the module is removed after all connections have been freed.
*/
static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
{
struct rds_cong_map *map;
struct rds_cong_map *ret = NULL;
unsigned long zp;
unsigned long i;
unsigned long flags;
map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
if (!map)
return NULL;
map->m_addr = addr;
init_waitqueue_head(&map->m_waitq);
INIT_LIST_HEAD(&map->m_conn_list);
for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
zp = get_zeroed_page(GFP_KERNEL);
if (zp == 0)
goto out;
map->m_page_addrs[i] = zp;
}
spin_lock_irqsave(&rds_cong_lock, flags);
ret = rds_cong_tree_walk(addr, map);
spin_unlock_irqrestore(&rds_cong_lock, flags);
if (!ret) {
ret = map;
map = NULL;
}
out:
if (map) {
for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
free_page(map->m_page_addrs[i]);
kfree(map);
}
rdsdebug("map %p for addr %x\n", ret, be32_to_cpu(addr));
return ret;
}
/*
* Put the conn on its local map's list. This is called when the conn is
* really added to the hash. It's nested under the rds_conn_lock, sadly.
*/
void rds_cong_add_conn(struct rds_connection *conn)
{
unsigned long flags;
rdsdebug("conn %p now on map %p\n", conn, conn->c_lcong);
spin_lock_irqsave(&rds_cong_lock, flags);
list_add_tail(&conn->c_map_item, &conn->c_lcong->m_conn_list);
spin_unlock_irqrestore(&rds_cong_lock, flags);
}
void rds_cong_remove_conn(struct rds_connection *conn)
{
unsigned long flags;
rdsdebug("removing conn %p from map %p\n", conn, conn->c_lcong);
spin_lock_irqsave(&rds_cong_lock, flags);
list_del_init(&conn->c_map_item);
spin_unlock_irqrestore(&rds_cong_lock, flags);
}
int rds_cong_get_maps(struct rds_connection *conn)
{
conn->c_lcong = rds_cong_from_addr(conn->c_laddr);
conn->c_fcong = rds_cong_from_addr(conn->c_faddr);
if (!(conn->c_lcong && conn->c_fcong))
return -ENOMEM;
return 0;
}
void rds_cong_queue_updates(struct rds_cong_map *map)
{
struct rds_connection *conn;
unsigned long flags;
spin_lock_irqsave(&rds_cong_lock, flags);
list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
if (!test_and_set_bit(0, &conn->c_map_queued)) {
rds_stats_inc(s_cong_update_queued);
rds_send_xmit(conn);
}
}
spin_unlock_irqrestore(&rds_cong_lock, flags);
}
void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
{
rdsdebug("waking map %p for %pI4\n",
map, &map->m_addr);
rds_stats_inc(s_cong_update_received);
atomic_inc(&rds_cong_generation);
if (waitqueue_active(&map->m_waitq))
wake_up(&map->m_waitq);
if (waitqueue_active(&rds_poll_waitq))
wake_up_all(&rds_poll_waitq);
if (portmask && !list_empty(&rds_cong_monitor)) {
unsigned long flags;
struct rds_sock *rs;
read_lock_irqsave(&rds_cong_monitor_lock, flags);
list_for_each_entry(rs, &rds_cong_monitor, rs_cong_list) {
spin_lock(&rs->rs_lock);
rs->rs_cong_notify |= (rs->rs_cong_mask & portmask);
rs->rs_cong_mask &= ~portmask;
spin_unlock(&rs->rs_lock);
if (rs->rs_cong_notify)
rds_wake_sk_sleep(rs);
}
read_unlock_irqrestore(&rds_cong_monitor_lock, flags);
}
}
EXPORT_SYMBOL_GPL(rds_cong_map_updated);
int rds_cong_updated_since(unsigned long *recent)
{
unsigned long gen = atomic_read(&rds_cong_generation);
if (likely(*recent == gen))
return 0;
*recent = gen;
return 1;
}
/*
* We're called under the locking that protects the sockets receive buffer
* consumption. This makes it a lot easier for the caller to only call us
* when it knows that an existing set bit needs to be cleared, and vice versa.
* We can't block and we need to deal with concurrent sockets working against
* the same per-address map.
*/
void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
{
unsigned long i;
unsigned long off;
rdsdebug("setting congestion for %pI4:%u in map %p\n",
&map->m_addr, ntohs(port), map);
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
__set_bit_le(off, (void *)map->m_page_addrs[i]);
}
void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
{
unsigned long i;
unsigned long off;
rdsdebug("clearing congestion for %pI4:%u in map %p\n",
&map->m_addr, ntohs(port), map);
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
__clear_bit_le(off, (void *)map->m_page_addrs[i]);
}
static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
{
unsigned long i;
unsigned long off;
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
return test_bit_le(off, (void *)map->m_page_addrs[i]);
}
void rds_cong_add_socket(struct rds_sock *rs)
{
unsigned long flags;
write_lock_irqsave(&rds_cong_monitor_lock, flags);
if (list_empty(&rs->rs_cong_list))
list_add(&rs->rs_cong_list, &rds_cong_monitor);
write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
}
void rds_cong_remove_socket(struct rds_sock *rs)
{
unsigned long flags;
struct rds_cong_map *map;
write_lock_irqsave(&rds_cong_monitor_lock, flags);
list_del_init(&rs->rs_cong_list);
write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
/* update congestion map for now-closed port */
spin_lock_irqsave(&rds_cong_lock, flags);
map = rds_cong_tree_walk(rs->rs_bound_addr, NULL);
spin_unlock_irqrestore(&rds_cong_lock, flags);
if (map && rds_cong_test_bit(map, rs->rs_bound_port)) {
rds_cong_clear_bit(map, rs->rs_bound_port);
rds_cong_queue_updates(map);
}
}
int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock,
struct rds_sock *rs)
{
if (!rds_cong_test_bit(map, port))
return 0;
if (nonblock) {
if (rs && rs->rs_cong_monitor) {
unsigned long flags;
/* It would have been nice to have an atomic set_bit on
* a uint64_t. */
spin_lock_irqsave(&rs->rs_lock, flags);
rs->rs_cong_mask |= RDS_CONG_MONITOR_MASK(ntohs(port));
spin_unlock_irqrestore(&rs->rs_lock, flags);
/* Test again - a congestion update may have arrived in
* the meantime. */
if (!rds_cong_test_bit(map, port))
return 0;
}
rds_stats_inc(s_cong_send_error);
return -ENOBUFS;
}
rds_stats_inc(s_cong_send_blocked);
rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port));
return wait_event_interruptible(map->m_waitq,
!rds_cong_test_bit(map, port));
}
void rds_cong_exit(void)
{
struct rb_node *node;
struct rds_cong_map *map;
unsigned long i;
while ((node = rb_first(&rds_cong_tree))) {
map = rb_entry(node, struct rds_cong_map, m_rb_node);
rdsdebug("freeing map %p\n", map);
rb_erase(&map->m_rb_node, &rds_cong_tree);
for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
free_page(map->m_page_addrs[i]);
kfree(map);
}
}
/*
* Allocate a RDS message containing a congestion update.
*/
struct rds_message *rds_cong_update_alloc(struct rds_connection *conn)
{
struct rds_cong_map *map = conn->c_lcong;
struct rds_message *rm;
rm = rds_message_map_pages(map->m_page_addrs, RDS_CONG_MAP_BYTES);
if (!IS_ERR(rm))
rm->m_inc.i_hdr.h_flags = RDS_FLAG_CONG_BITMAP;
return rm;
}
| gpl-2.0 |
MoKee/android_kernel_zte_nx507j | tools/power/cpupower/utils/idle_monitor/nhm_idle.c | 10240 | 4899 | /*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
*
* Licensed under the terms of the GNU GPL License version 2.
*
* Based on Len Brown's <lenb@kernel.org> turbostat tool.
*/
#if defined(__i386__) || defined(__x86_64__)
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include "helpers/helpers.h"
#include "idle_monitor/cpupower-monitor.h"
#define MSR_PKG_C3_RESIDENCY 0x3F8
#define MSR_PKG_C6_RESIDENCY 0x3F9
#define MSR_CORE_C3_RESIDENCY 0x3FC
#define MSR_CORE_C6_RESIDENCY 0x3FD
#define MSR_TSC 0x10
#define NHM_CSTATE_COUNT 4
enum intel_nhm_id { C3 = 0, C6, PC3, PC6, TSC = 0xFFFF };
static int nhm_get_count_percent(unsigned int self_id, double *percent,
unsigned int cpu);
static cstate_t nhm_cstates[NHM_CSTATE_COUNT] = {
{
.name = "C3",
.desc = N_("Processor Core C3"),
.id = C3,
.range = RANGE_CORE,
.get_count_percent = nhm_get_count_percent,
},
{
.name = "C6",
.desc = N_("Processor Core C6"),
.id = C6,
.range = RANGE_CORE,
.get_count_percent = nhm_get_count_percent,
},
{
.name = "PC3",
.desc = N_("Processor Package C3"),
.id = PC3,
.range = RANGE_PACKAGE,
.get_count_percent = nhm_get_count_percent,
},
{
.name = "PC6",
.desc = N_("Processor Package C6"),
.id = PC6,
.range = RANGE_PACKAGE,
.get_count_percent = nhm_get_count_percent,
},
};
static unsigned long long tsc_at_measure_start;
static unsigned long long tsc_at_measure_end;
static unsigned long long *previous_count[NHM_CSTATE_COUNT];
static unsigned long long *current_count[NHM_CSTATE_COUNT];
/* valid flag for all CPUs. If a MSR read failed it will be zero */
static int *is_valid;
static int nhm_get_count(enum intel_nhm_id id, unsigned long long *val,
unsigned int cpu)
{
int msr;
switch (id) {
case C3:
msr = MSR_CORE_C3_RESIDENCY;
break;
case C6:
msr = MSR_CORE_C6_RESIDENCY;
break;
case PC3:
msr = MSR_PKG_C3_RESIDENCY;
break;
case PC6:
msr = MSR_PKG_C6_RESIDENCY;
break;
case TSC:
msr = MSR_TSC;
break;
default:
return -1;
};
if (read_msr(cpu, msr, val))
return -1;
return 0;
}
static int nhm_get_count_percent(unsigned int id, double *percent,
unsigned int cpu)
{
*percent = 0.0;
if (!is_valid[cpu])
return -1;
*percent = (100.0 *
(current_count[id][cpu] - previous_count[id][cpu])) /
(tsc_at_measure_end - tsc_at_measure_start);
dprint("%s: previous: %llu - current: %llu - (%u)\n",
nhm_cstates[id].name, previous_count[id][cpu],
current_count[id][cpu], cpu);
dprint("%s: tsc_diff: %llu - count_diff: %llu - percent: %2.f (%u)\n",
nhm_cstates[id].name,
(unsigned long long) tsc_at_measure_end - tsc_at_measure_start,
current_count[id][cpu] - previous_count[id][cpu],
*percent, cpu);
return 0;
}
static int nhm_start(void)
{
int num, cpu;
unsigned long long dbg, val;
nhm_get_count(TSC, &tsc_at_measure_start, 0);
for (num = 0; num < NHM_CSTATE_COUNT; num++) {
for (cpu = 0; cpu < cpu_count; cpu++) {
is_valid[cpu] = !nhm_get_count(num, &val, cpu);
previous_count[num][cpu] = val;
}
}
nhm_get_count(TSC, &dbg, 0);
dprint("TSC diff: %llu\n", dbg - tsc_at_measure_start);
return 0;
}
static int nhm_stop(void)
{
unsigned long long val;
unsigned long long dbg;
int num, cpu;
nhm_get_count(TSC, &tsc_at_measure_end, 0);
for (num = 0; num < NHM_CSTATE_COUNT; num++) {
for (cpu = 0; cpu < cpu_count; cpu++) {
is_valid[cpu] = !nhm_get_count(num, &val, cpu);
current_count[num][cpu] = val;
}
}
nhm_get_count(TSC, &dbg, 0);
dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end);
return 0;
}
struct cpuidle_monitor intel_nhm_monitor;
struct cpuidle_monitor *intel_nhm_register(void)
{
int num;
if (cpupower_cpu_info.vendor != X86_VENDOR_INTEL)
return NULL;
if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC))
return NULL;
if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF))
return NULL;
/* Free this at program termination */
is_valid = calloc(cpu_count, sizeof(int));
for (num = 0; num < NHM_CSTATE_COUNT; num++) {
previous_count[num] = calloc(cpu_count,
sizeof(unsigned long long));
current_count[num] = calloc(cpu_count,
sizeof(unsigned long long));
}
intel_nhm_monitor.name_len = strlen(intel_nhm_monitor.name);
return &intel_nhm_monitor;
}
void intel_nhm_unregister(void)
{
int num;
for (num = 0; num < NHM_CSTATE_COUNT; num++) {
free(previous_count[num]);
free(current_count[num]);
}
free(is_valid);
}
struct cpuidle_monitor intel_nhm_monitor = {
.name = "Nehalem",
.hw_states_num = NHM_CSTATE_COUNT,
.hw_states = nhm_cstates,
.start = nhm_start,
.stop = nhm_stop,
.do_register = intel_nhm_register,
.unregister = intel_nhm_unregister,
.needs_root = 1,
.overflow_s = 922000000 /* 922337203 seconds TSC overflow
at 20GHz */
};
#endif
| gpl-2.0 |
SimpleAOSP-Kernel/kernel_hammerhead | arch/mips/kernel/jump_label.c | 10496 | 1304 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2010 Cavium Networks, Inc.
*/
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/memory.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/cpu.h>
#include <asm/cacheflush.h>
#include <asm/inst.h>
#ifdef HAVE_JUMP_LABEL
#define J_RANGE_MASK ((1ul << 28) - 1)
void arch_jump_label_transform(struct jump_entry *e,
enum jump_label_type type)
{
union mips_instruction insn;
union mips_instruction *insn_p =
(union mips_instruction *)(unsigned long)e->code;
/* Jump only works within a 256MB aligned region. */
BUG_ON((e->target & ~J_RANGE_MASK) != (e->code & ~J_RANGE_MASK));
/* Target must have 4 byte alignment. */
BUG_ON((e->target & 3) != 0);
if (type == JUMP_LABEL_ENABLE) {
insn.j_format.opcode = j_op;
insn.j_format.target = (e->target & J_RANGE_MASK) >> 2;
} else {
insn.word = 0; /* nop */
}
get_online_cpus();
mutex_lock(&text_mutex);
*insn_p = insn;
flush_icache_range((unsigned long)insn_p,
(unsigned long)insn_p + sizeof(*insn_p));
mutex_unlock(&text_mutex);
put_online_cpus();
}
#endif /* HAVE_JUMP_LABEL */
| gpl-2.0 |
jyunyen/Nexus7_Kernal | arch/ia64/kernel/esi.c | 13312 | 4576 | /*
* Extensible SAL Interface (ESI) support routines.
*
* Copyright (C) 2006 Hewlett-Packard Co
* Alex Williamson <alex.williamson@hp.com>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <asm/esi.h>
#include <asm/sal.h>
MODULE_AUTHOR("Alex Williamson <alex.williamson@hp.com>");
MODULE_DESCRIPTION("Extensible SAL Interface (ESI) support");
MODULE_LICENSE("GPL");
#define MODULE_NAME "esi"
#define ESI_TABLE_GUID \
EFI_GUID(0x43EA58DC, 0xCF28, 0x4b06, 0xB3, \
0x91, 0xB7, 0x50, 0x59, 0x34, 0x2B, 0xD4)
enum esi_systab_entry_type {
ESI_DESC_ENTRY_POINT = 0
};
/*
* Entry type: Size:
* 0 48
*/
#define ESI_DESC_SIZE(type) "\060"[(unsigned) (type)]
typedef struct ia64_esi_desc_entry_point {
u8 type;
u8 reserved1[15];
u64 esi_proc;
u64 gp;
efi_guid_t guid;
} ia64_esi_desc_entry_point_t;
struct pdesc {
void *addr;
void *gp;
};
static struct ia64_sal_systab *esi_systab;
static int __init esi_init (void)
{
efi_config_table_t *config_tables;
struct ia64_sal_systab *systab;
unsigned long esi = 0;
char *p;
int i;
config_tables = __va(efi.systab->tables);
for (i = 0; i < (int) efi.systab->nr_tables; ++i) {
if (efi_guidcmp(config_tables[i].guid, ESI_TABLE_GUID) == 0) {
esi = config_tables[i].table;
break;
}
}
if (!esi)
return -ENODEV;
systab = __va(esi);
if (strncmp(systab->signature, "ESIT", 4) != 0) {
printk(KERN_ERR "bad signature in ESI system table!");
return -ENODEV;
}
p = (char *) (systab + 1);
for (i = 0; i < systab->entry_count; i++) {
/*
* The first byte of each entry type contains the type
* descriptor.
*/
switch (*p) {
case ESI_DESC_ENTRY_POINT:
break;
default:
printk(KERN_WARNING "Unknown table type %d found in "
"ESI table, ignoring rest of table\n", *p);
return -ENODEV;
}
p += ESI_DESC_SIZE(*p);
}
esi_systab = systab;
return 0;
}
int ia64_esi_call (efi_guid_t guid, struct ia64_sal_retval *isrvp,
enum esi_proc_type proc_type, u64 func,
u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6,
u64 arg7)
{
struct ia64_fpreg fr[6];
unsigned long flags = 0;
int i;
char *p;
if (!esi_systab)
return -1;
p = (char *) (esi_systab + 1);
for (i = 0; i < esi_systab->entry_count; i++) {
if (*p == ESI_DESC_ENTRY_POINT) {
ia64_esi_desc_entry_point_t *esi = (void *)p;
if (!efi_guidcmp(guid, esi->guid)) {
ia64_sal_handler esi_proc;
struct pdesc pdesc;
pdesc.addr = __va(esi->esi_proc);
pdesc.gp = __va(esi->gp);
esi_proc = (ia64_sal_handler) &pdesc;
ia64_save_scratch_fpregs(fr);
if (proc_type == ESI_PROC_SERIALIZED)
spin_lock_irqsave(&sal_lock, flags);
else if (proc_type == ESI_PROC_MP_SAFE)
local_irq_save(flags);
else
preempt_disable();
*isrvp = (*esi_proc)(func, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
if (proc_type == ESI_PROC_SERIALIZED)
spin_unlock_irqrestore(&sal_lock,
flags);
else if (proc_type == ESI_PROC_MP_SAFE)
local_irq_restore(flags);
else
preempt_enable();
ia64_load_scratch_fpregs(fr);
return 0;
}
}
p += ESI_DESC_SIZE(*p);
}
return -1;
}
EXPORT_SYMBOL_GPL(ia64_esi_call);
int ia64_esi_call_phys (efi_guid_t guid, struct ia64_sal_retval *isrvp,
u64 func, u64 arg1, u64 arg2, u64 arg3, u64 arg4,
u64 arg5, u64 arg6, u64 arg7)
{
struct ia64_fpreg fr[6];
unsigned long flags;
u64 esi_params[8];
char *p;
int i;
if (!esi_systab)
return -1;
p = (char *) (esi_systab + 1);
for (i = 0; i < esi_systab->entry_count; i++) {
if (*p == ESI_DESC_ENTRY_POINT) {
ia64_esi_desc_entry_point_t *esi = (void *)p;
if (!efi_guidcmp(guid, esi->guid)) {
ia64_sal_handler esi_proc;
struct pdesc pdesc;
pdesc.addr = (void *)esi->esi_proc;
pdesc.gp = (void *)esi->gp;
esi_proc = (ia64_sal_handler) &pdesc;
esi_params[0] = func;
esi_params[1] = arg1;
esi_params[2] = arg2;
esi_params[3] = arg3;
esi_params[4] = arg4;
esi_params[5] = arg5;
esi_params[6] = arg6;
esi_params[7] = arg7;
ia64_save_scratch_fpregs(fr);
spin_lock_irqsave(&sal_lock, flags);
*isrvp = esi_call_phys(esi_proc, esi_params);
spin_unlock_irqrestore(&sal_lock, flags);
ia64_load_scratch_fpregs(fr);
return 0;
}
}
p += ESI_DESC_SIZE(*p);
}
return -1;
}
EXPORT_SYMBOL_GPL(ia64_esi_call_phys);
static void __exit esi_exit (void)
{
}
module_init(esi_init);
module_exit(esi_exit); /* makes module removable... */
| gpl-2.0 |
jerem/linux-rk3066 | arch/powerpc/boot/pq2.c | 14080 | 2422 | /*
* PowerQUICC II support functions
*
* Author: Scott Wood <scottwood@freescale.com>
*
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "ops.h"
#include "types.h"
#include "fsl-soc.h"
#include "pq2.h"
#include "stdio.h"
#include "io.h"
#define PQ2_SCCR (0x10c80/4) /* System Clock Configuration Register */
#define PQ2_SCMR (0x10c88/4) /* System Clock Mode Register */
static int pq2_corecnf_map[] = {
3, 2, 2, 2, 4, 4, 5, 9, 6, 11, 8, 10, 3, 12, 7, -1,
6, 5, 13, 2, 14, 4, 15, 9, 0, 11, 8, 10, 16, 12, 7, -1
};
/* Get various clocks from crystal frequency.
* Returns zero on failure and non-zero on success.
*/
int pq2_get_clocks(u32 crystal, u32 *sysfreq, u32 *corefreq,
u32 *timebase, u32 *brgfreq)
{
u32 *immr;
u32 sccr, scmr, mainclk, busclk;
int corecnf, busdf, plldf, pllmf, dfbrg;
immr = fsl_get_immr();
if (!immr) {
printf("pq2_get_clocks: Couldn't get IMMR base.\r\n");
return 0;
}
sccr = in_be32(&immr[PQ2_SCCR]);
scmr = in_be32(&immr[PQ2_SCMR]);
dfbrg = sccr & 3;
corecnf = (scmr >> 24) & 0x1f;
busdf = (scmr >> 20) & 0xf;
plldf = (scmr >> 12) & 1;
pllmf = scmr & 0xfff;
mainclk = crystal * (pllmf + 1) / (plldf + 1);
busclk = mainclk / (busdf + 1);
if (sysfreq)
*sysfreq = mainclk / 2;
if (timebase)
*timebase = busclk / 4;
if (brgfreq)
*brgfreq = mainclk / (1 << ((dfbrg + 1) * 2));
if (corefreq) {
int coremult = pq2_corecnf_map[corecnf];
if (coremult < 0)
*corefreq = mainclk / 2;
else if (coremult == 0)
return 0;
else
*corefreq = busclk * coremult / 2;
}
return 1;
}
/* Set common device tree fields based on the given clock frequencies. */
void pq2_set_clocks(u32 sysfreq, u32 corefreq, u32 timebase, u32 brgfreq)
{
void *node;
dt_fixup_cpu_clocks(corefreq, timebase, sysfreq);
node = finddevice("/soc/cpm");
if (node)
setprop(node, "clock-frequency", &sysfreq, 4);
node = finddevice("/soc/cpm/brg");
if (node)
setprop(node, "clock-frequency", &brgfreq, 4);
}
int pq2_fixup_clocks(u32 crystal)
{
u32 sysfreq, corefreq, timebase, brgfreq;
if (!pq2_get_clocks(crystal, &sysfreq, &corefreq, &timebase, &brgfreq))
return 0;
pq2_set_clocks(sysfreq, corefreq, timebase, brgfreq);
return 1;
}
| gpl-2.0 |
bugobliterator/unoRTOS | freeRTOS750/lib_time/isLeap.c | 1 | 2071 | /*
* (C)2012 Michael Duane Rice All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer. Redistributions in binary
* form must reproduce the above copyright notice, this list of conditions
* and the following disclaimer in the documentation and/or other materials
* provided with the distribution. Neither the name of the copyright holders
* nor the names of contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/* $Id$ */
/*
Return 1 if 'year' is a leap year, else 0.
*/
#include <stdlib.h>
unsigned char
is_leap_year(int year)
{
div_t d;
/* year must be divisible by 4 to be a leap year */
if (year & 3)
return 0;
/* If theres a remainder after division by 100, year is not divisible by 100 or 400 */
d = div(year, 100);
if (d.rem)
return 1;
/* If the quotient is divisible by 4, then year is divisible by 400 */
if ((d.quot & 3) == 0)
return 1;
return 0;
}
| gpl-2.0 |
NemesisGamingDE/Cataclysm | src/server/scripts/Kalimdor/TempleOfAhnQiraj/boss_twinemperors.cpp | 1 | 18692 | /*
*
* Copyright (C) 2011-2013 ArkCORE <http://www.arkania.net/>
*
* Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/>
*
* Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* ScriptData
SDName: Boss_Twinemperors
SD%Complete: 95
SDComment:
SDCategory: Temple of Ahn'Qiraj
EndScriptData */
#include "ScriptMgr.h"
#include "ScriptedCreature.h"
#include "temple_of_ahnqiraj.h"
#include "WorldPacket.h"
#include "Item.h"
#include "Spell.h"
enum Spells
{
SPELL_HEAL_BROTHER = 7393,
SPELL_TWIN_TELEPORT = 800, // CTRA watches for this spell to start its teleport timer
SPELL_TWIN_TELEPORT_VISUAL = 26638, // visual
SPELL_EXPLODEBUG = 804,
SPELL_MUTATE_BUG = 802,
SPELL_BERSERK = 26662,
SPELL_UPPERCUT = 26007,
SPELL_UNBALANCING_STRIKE = 26613,
SPELL_SHADOWBOLT = 26006,
SPELL_BLIZZARD = 26607,
SPELL_ARCANEBURST = 568,
};
enum Sound
{
SOUND_VL_AGGRO = 8657, //8657 - Aggro - To Late
SOUND_VL_KILL = 8658, //8658 - Kill - You will not
SOUND_VL_DEATH = 8659, //8659 - Death
SOUND_VN_DEATH = 8660, //8660 - Death - Feel
SOUND_VN_AGGRO = 8661, //8661 - Aggro - Let none
SOUND_VN_KILL = 8662, //8661 - Kill - your fate
};
enum Misc
{
PULL_RANGE = 50,
ABUSE_BUG_RANGE = 20,
VEKLOR_DIST = 20, // VL will not come to melee when attacking
TELEPORTTIME = 30000
};
struct boss_twinemperorsAI : public ScriptedAI
{
boss_twinemperorsAI(Creature* creature): ScriptedAI(creature)
{
instance = creature->GetInstanceScript();
}
InstanceScript* instance;
uint32 Heal_Timer;
uint32 Teleport_Timer;
bool AfterTeleport;
uint32 AfterTeleportTimer;
bool DontYellWhenDead;
uint32 Abuse_Bug_Timer, BugsTimer;
bool tspellcasted;
uint32 EnrageTimer;
virtual bool IAmVeklor() = 0;
virtual void Reset() = 0;
virtual void CastSpellOnBug(Creature* target) = 0;
void TwinReset()
{
Heal_Timer = 0; // first heal immediately when they get close together
Teleport_Timer = TELEPORTTIME;
AfterTeleport = false;
tspellcasted = false;
AfterTeleportTimer = 0;
Abuse_Bug_Timer = urand(10000, 17000);
BugsTimer = 2000;
me->ClearUnitState(UNIT_STATE_STUNNED);
DontYellWhenDead = false;
EnrageTimer = 15*60000;
}
Creature* GetOtherBoss()
{
if (instance)
return Unit::GetCreature(*me, instance->GetData64(IAmVeklor() ? DATA_VEKNILASH : DATA_VEKLOR));
else
return NULL;
}
void DamageTaken(Unit* /*done_by*/, uint32 &damage) OVERRIDE
{
Unit* pOtherBoss = GetOtherBoss();
if (pOtherBoss)
{
float dPercent = ((float)damage) / ((float)me->GetMaxHealth());
int odmg = (int)(dPercent * ((float)pOtherBoss->GetMaxHealth()));
int ohealth = pOtherBoss->GetHealth()-odmg;
pOtherBoss->SetHealth(ohealth > 0 ? ohealth : 0);
if (ohealth <= 0)
{
pOtherBoss->setDeathState(JUST_DIED);
pOtherBoss->SetFlag(UNIT_DYNAMIC_FLAGS, UNIT_DYNFLAG_LOOTABLE);
}
}
}
void JustDied(Unit* /*killer*/) OVERRIDE
{
Creature* pOtherBoss = GetOtherBoss();
if (pOtherBoss)
{
pOtherBoss->SetHealth(0);
pOtherBoss->setDeathState(JUST_DIED);
pOtherBoss->SetFlag(UNIT_DYNAMIC_FLAGS, UNIT_DYNFLAG_LOOTABLE);
CAST_AI(boss_twinemperorsAI, pOtherBoss->AI())->DontYellWhenDead = true;
}
if (!DontYellWhenDead) // I hope AI is not threaded
DoPlaySoundToSet(me, IAmVeklor() ? SOUND_VL_DEATH : SOUND_VN_DEATH);
}
void KilledUnit(Unit* /*victim*/) OVERRIDE
{
DoPlaySoundToSet(me, IAmVeklor() ? SOUND_VL_KILL : SOUND_VN_KILL);
}
void EnterCombat(Unit* who) OVERRIDE
{
DoZoneInCombat();
Creature* pOtherBoss = GetOtherBoss();
if (pOtherBoss)
{
/// @todo we should activate the other boss location so he can start attackning even if nobody
// is near I dont know how to do that
ScriptedAI* otherAI = CAST_AI(ScriptedAI, pOtherBoss->AI());
if (!pOtherBoss->IsInCombat())
{
DoPlaySoundToSet(me, IAmVeklor() ? SOUND_VL_AGGRO : SOUND_VN_AGGRO);
otherAI->AttackStart(who);
otherAI->DoZoneInCombat();
}
}
}
void SpellHit(Unit* caster, const SpellInfo* entry) OVERRIDE
{
if (caster == me)
return;
Creature* pOtherBoss = GetOtherBoss();
if (entry->Id != SPELL_HEAL_BROTHER || !pOtherBoss)
return;
// add health so we keep same percentage for both brothers
uint32 mytotal = me->GetMaxHealth(), histotal = pOtherBoss->GetMaxHealth();
float mult = ((float)mytotal) / ((float)histotal);
if (mult < 1)
mult = 1.0f/mult;
#define HEAL_BROTHER_AMOUNT 30000.0f
uint32 largerAmount = (uint32)((HEAL_BROTHER_AMOUNT * mult) - HEAL_BROTHER_AMOUNT);
if (mytotal > histotal)
{
uint32 h = me->GetHealth()+largerAmount;
me->SetHealth(std::min(mytotal, h));
}
else
{
uint32 h = pOtherBoss->GetHealth()+largerAmount;
pOtherBoss->SetHealth(std::min(histotal, h));
}
}
void TryHealBrother(uint32 diff)
{
if (IAmVeklor()) // this spell heals caster and the other brother so let VN cast it
return;
if (Heal_Timer <= diff)
{
Unit* pOtherBoss = GetOtherBoss();
if (pOtherBoss && pOtherBoss->IsWithinDist(me, 60))
{
DoCast(pOtherBoss, SPELL_HEAL_BROTHER);
Heal_Timer = 1000;
}
} else Heal_Timer -= diff;
}
void TeleportToMyBrother()
{
if (!instance)
return;
Teleport_Timer = TELEPORTTIME;
if (IAmVeklor())
return; // mechanics handled by veknilash so they teleport exactly at the same time and to correct coordinates
Creature* pOtherBoss = GetOtherBoss();
if (pOtherBoss)
{
//me->MonsterYell("Teleporting ...", LANG_UNIVERSAL, 0);
Position thisPos;
thisPos.Relocate(me);
Position otherPos;
otherPos.Relocate(pOtherBoss);
pOtherBoss->SetPosition(thisPos);
me->SetPosition(otherPos);
SetAfterTeleport();
CAST_AI(boss_twinemperorsAI, pOtherBoss->AI())->SetAfterTeleport();
}
}
void SetAfterTeleport()
{
me->InterruptNonMeleeSpells(false);
DoStopAttack();
DoResetThreat();
DoCast(me, SPELL_TWIN_TELEPORT_VISUAL);
me->AddUnitState(UNIT_STATE_STUNNED);
AfterTeleport = true;
AfterTeleportTimer = 2000;
tspellcasted = false;
}
bool TryActivateAfterTTelep(uint32 diff)
{
if (AfterTeleport)
{
if (!tspellcasted)
{
me->ClearUnitState(UNIT_STATE_STUNNED);
DoCast(me, SPELL_TWIN_TELEPORT);
me->AddUnitState(UNIT_STATE_STUNNED);
}
tspellcasted = true;
if (AfterTeleportTimer <= diff)
{
AfterTeleport = false;
me->ClearUnitState(UNIT_STATE_STUNNED);
if (Unit* nearu = me->SelectNearestTarget(100))
{
//DoYell(nearu->GetName(), LANG_UNIVERSAL, 0);
AttackStart(nearu);
me->AddThreat(nearu, 10000);
}
return true;
}
else
{
AfterTeleportTimer -= diff;
// update important timers which would otherwise get skipped
if (EnrageTimer > diff)
EnrageTimer -= diff;
else
EnrageTimer = 0;
if (Teleport_Timer > diff)
Teleport_Timer -= diff;
else
Teleport_Timer = 0;
return false;
}
}
else
{
return true;
}
}
void MoveInLineOfSight(Unit* who) OVERRIDE
{
if (!who || me->GetVictim())
return;
if (me->CanCreatureAttack(who))
{
float attackRadius = me->GetAttackDistance(who);
if (attackRadius < PULL_RANGE)
attackRadius = PULL_RANGE;
if (me->IsWithinDistInMap(who, attackRadius) && me->GetDistanceZ(who) <= /*CREATURE_Z_ATTACK_RANGE*/7 /*there are stairs*/)
{
//if (who->HasStealthAura())
// who->RemoveSpellsCausingAura(SPELL_AURA_MOD_STEALTH);
AttackStart(who);
}
}
}
Creature* RespawnNearbyBugsAndGetOne()
{
std::list<Creature*> lUnitList;
me->GetCreatureListWithEntryInGrid(lUnitList, 15316, 150.0f);
me->GetCreatureListWithEntryInGrid(lUnitList, 15317, 150.0f);
if (lUnitList.empty())
return NULL;
Creature* nearb = NULL;
for (std::list<Creature*>::const_iterator iter = lUnitList.begin(); iter != lUnitList.end(); ++iter)
{
Creature* c = *iter;
if (c)
{
if (c->isDead())
{
c->Respawn();
c->setFaction(7);
c->RemoveAllAuras();
}
if (c->IsWithinDistInMap(me, ABUSE_BUG_RANGE))
{
if (!nearb || (rand()%4) == 0)
nearb = c;
}
}
}
return nearb;
}
void HandleBugs(uint32 diff)
{
if (BugsTimer < diff || Abuse_Bug_Timer <= diff)
{
Creature* c = RespawnNearbyBugsAndGetOne();
if (Abuse_Bug_Timer <= diff)
{
if (c)
{
CastSpellOnBug(c);
Abuse_Bug_Timer = urand(10000, 17000);
}
else
{
Abuse_Bug_Timer = 1000;
}
}
else
{
Abuse_Bug_Timer -= diff;
}
BugsTimer = 2000;
}
else
{
BugsTimer -= diff;
Abuse_Bug_Timer -= diff;
}
}
void CheckEnrage(uint32 diff)
{
if (EnrageTimer <= diff)
{
if (!me->IsNonMeleeSpellCasted(true))
{
DoCast(me, SPELL_BERSERK);
EnrageTimer = 60*60000;
} else EnrageTimer = 0;
} else EnrageTimer-=diff;
}
};
class boss_veknilash : public CreatureScript
{
public:
boss_veknilash() : CreatureScript("boss_veknilash") { }
CreatureAI* GetAI(Creature* creature) const OVERRIDE
{
return new boss_veknilashAI(creature);
}
struct boss_veknilashAI : public boss_twinemperorsAI
{
bool IAmVeklor() {return false;}
boss_veknilashAI(Creature* creature) : boss_twinemperorsAI(creature) {}
uint32 UpperCut_Timer;
uint32 UnbalancingStrike_Timer;
uint32 Scarabs_Timer;
int Rand;
int RandX;
int RandY;
Creature* Summoned;
void Reset() OVERRIDE
{
TwinReset();
UpperCut_Timer = urand(14000, 29000);
UnbalancingStrike_Timer = urand(8000, 18000);
Scarabs_Timer = urand(7000, 14000);
//Added. Can be removed if its included in DB.
me->ApplySpellImmune(0, IMMUNITY_DAMAGE, SPELL_SCHOOL_MASK_MAGIC, true);
}
void CastSpellOnBug(Creature* target)
{
target->setFaction(14);
target->AI()->AttackStart(me->getThreatManager().getHostilTarget());
target->AddAura(SPELL_MUTATE_BUG, target);
target->SetFullHealth();
}
void UpdateAI(uint32 diff) OVERRIDE
{
//Return since we have no target
if (!UpdateVictim())
return;
if (!TryActivateAfterTTelep(diff))
return;
//UnbalancingStrike_Timer
if (UnbalancingStrike_Timer <= diff)
{
DoCastVictim(SPELL_UNBALANCING_STRIKE);
UnbalancingStrike_Timer = 8000+rand()%12000;
} else UnbalancingStrike_Timer -= diff;
if (UpperCut_Timer <= diff)
{
Unit* randomMelee = SelectTarget(SELECT_TARGET_RANDOM, 0, NOMINAL_MELEE_RANGE, true);
if (randomMelee)
DoCast(randomMelee, SPELL_UPPERCUT);
UpperCut_Timer = 15000+rand()%15000;
} else UpperCut_Timer -= diff;
HandleBugs(diff);
//Heal brother when 60yrds close
TryHealBrother(diff);
//Teleporting to brother
if (Teleport_Timer <= diff)
{
TeleportToMyBrother();
} else Teleport_Timer -= diff;
CheckEnrage(diff);
DoMeleeAttackIfReady();
}
};
};
class boss_veklor : public CreatureScript
{
public:
boss_veklor() : CreatureScript("boss_veklor") { }
CreatureAI* GetAI(Creature* creature) const OVERRIDE
{
return new boss_veklorAI(creature);
}
struct boss_veklorAI : public boss_twinemperorsAI
{
bool IAmVeklor() {return true;}
boss_veklorAI(Creature* creature) : boss_twinemperorsAI(creature) {}
uint32 ShadowBolt_Timer;
uint32 Blizzard_Timer;
uint32 ArcaneBurst_Timer;
uint32 Scorpions_Timer;
int Rand;
int RandX;
int RandY;
Creature* Summoned;
void Reset() OVERRIDE
{
TwinReset();
ShadowBolt_Timer = 0;
Blizzard_Timer = urand(15000, 20000);
ArcaneBurst_Timer = 1000;
Scorpions_Timer = urand(7000, 14000);
//Added. Can be removed if its included in DB.
me->ApplySpellImmune(0, IMMUNITY_DAMAGE, SPELL_SCHOOL_MASK_NORMAL, true);
me->SetBaseWeaponDamage(BASE_ATTACK, MINDAMAGE, 0);
me->SetBaseWeaponDamage(BASE_ATTACK, MAXDAMAGE, 0);
}
void CastSpellOnBug(Creature* target)
{
target->setFaction(14);
target->AddAura(SPELL_EXPLODEBUG, target);
target->SetFullHealth();
}
void UpdateAI(uint32 diff) OVERRIDE
{
//Return since we have no target
if (!UpdateVictim())
return;
// reset arcane burst after teleport - we need to do this because
// when VL jumps to VN's location there will be a warrior who will get only 2s to run away
// which is almost impossible
if (AfterTeleport)
ArcaneBurst_Timer = 5000;
if (!TryActivateAfterTTelep(diff))
return;
//ShadowBolt_Timer
if (ShadowBolt_Timer <= diff)
{
if (!me->IsWithinDist(me->GetVictim(), 45.0f))
me->GetMotionMaster()->MoveChase(me->GetVictim(), VEKLOR_DIST, 0);
else
DoCastVictim(SPELL_SHADOWBOLT);
ShadowBolt_Timer = 2000;
} else ShadowBolt_Timer -= diff;
//Blizzard_Timer
if (Blizzard_Timer <= diff)
{
Unit* target = NULL;
target = SelectTarget(SELECT_TARGET_RANDOM, 0, 45, true);
if (target)
DoCast(target, SPELL_BLIZZARD);
Blizzard_Timer = 15000+rand()%15000;
} else Blizzard_Timer -= diff;
if (ArcaneBurst_Timer <= diff)
{
Unit* mvic;
if ((mvic=SelectTarget(SELECT_TARGET_NEAREST, 0, NOMINAL_MELEE_RANGE, true)) != NULL)
{
DoCast(mvic, SPELL_ARCANEBURST);
ArcaneBurst_Timer = 5000;
}
} else ArcaneBurst_Timer -= diff;
HandleBugs(diff);
//Heal brother when 60yrds close
TryHealBrother(diff);
//Teleporting to brother
if (Teleport_Timer <= diff)
{
TeleportToMyBrother();
} else Teleport_Timer -= diff;
CheckEnrage(diff);
//VL doesn't melee
//DoMeleeAttackIfReady();
}
void AttackStart(Unit* who) OVERRIDE
{
if (!who)
return;
if (who->isTargetableForAttack())
{
// VL doesn't melee
if (me->Attack(who, false))
{
me->GetMotionMaster()->MoveChase(who, VEKLOR_DIST, 0);
me->AddThreat(who, 0.0f);
}
}
}
};
};
void AddSC_boss_twinemperors()
{
new boss_veknilash();
new boss_veklor();
}
| gpl-2.0 |
gongminmin/KlayGE | DXBC2GLSL/Src/DXBC2GLSL.cpp | 1 | 5884 | /**
* @file DXBC2GLSL.cpp
* @author Shenghua Lin, Minmin Gong
*
* @section DESCRIPTION
*
* This source file is part of KlayGE
* For the latest info, see http://www.klayge.org
*
* @section LICENSE
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* You may alternatively use this source under the terms of
* the KlayGE Proprietary License (KPL). You can obtained such a license
* from http://www.klayge.org/licensing/.
*/
#include <DXBC2GLSL/DXBC2GLSL.hpp>
#include <KFL/CustomizedStreamBuf.hpp>
#include <DXBC2GLSL/DXBC.hpp>
#include <DXBC2GLSL/GLSLGen.hpp>
#include <sstream>
namespace DXBC2GLSL
{
uint32_t DXBC2GLSL::DefaultRules(GLSLVersion version)
{
return GLSLGen::DefaultRules(version);
}
void DXBC2GLSL::FeedDXBC(void const * dxbc_data,
bool has_gs, bool has_ps, ShaderTessellatorPartitioning ds_partitioning, ShaderTessellatorOutputPrimitive ds_output_primitive,
GLSLVersion version)
{
this->FeedDXBC(dxbc_data, has_gs, has_ps, ds_partitioning, ds_output_primitive, version, this->DefaultRules(version));
}
void DXBC2GLSL::FeedDXBC(void const * dxbc_data,
bool has_gs, bool has_ps, ShaderTessellatorPartitioning ds_partitioning, ShaderTessellatorOutputPrimitive ds_output_primitive,
GLSLVersion version, uint32_t glsl_rules)
{
dxbc_ = DXBCParse(dxbc_data);
if (dxbc_)
{
if (dxbc_->shader_chunk)
{
shader_ = ShaderParse(*dxbc_);
KlayGE::StringOutputStreamBuf glsl_buff(glsl_);
std::ostream ss(&glsl_buff);
GLSLGen converter;
converter.FeedDXBC(shader_, has_gs, has_ps, ds_partitioning, ds_output_primitive, version, glsl_rules);
converter.ToGLSL(ss);
}
}
}
std::string const & DXBC2GLSL::GLSLString() const
{
return glsl_;
}
uint32_t DXBC2GLSL::NumInputParams() const
{
return static_cast<uint32_t>(shader_->params_in.size());
}
DXBCSignatureParamDesc const & DXBC2GLSL::InputParam(uint32_t index) const
{
BOOST_ASSERT(index < shader_->params_in.size());
return shader_->params_in[index];
}
uint32_t DXBC2GLSL::NumOutputParams() const
{
return static_cast<uint32_t>(shader_->params_out.size());
}
DXBCSignatureParamDesc const & DXBC2GLSL::OutputParam(uint32_t index) const
{
BOOST_ASSERT(index < shader_->params_out.size());
return shader_->params_out[index];
}
uint32_t DXBC2GLSL::NumCBuffers() const
{
return static_cast<uint32_t>(shader_->cbuffers.size());
}
uint32_t DXBC2GLSL::NumVariables(uint32_t cb_index) const
{
BOOST_ASSERT(cb_index < shader_->cbuffers.size());
return static_cast<uint32_t>(shader_->cbuffers[cb_index].vars.size());
}
char const * DXBC2GLSL::VariableName(uint32_t cb_index, uint32_t var_index) const
{
BOOST_ASSERT(cb_index < shader_->cbuffers.size());
BOOST_ASSERT(var_index < shader_->cbuffers[cb_index].vars.size());
return shader_->cbuffers[cb_index].vars[var_index].var_desc.name;
}
bool DXBC2GLSL::VariableUsed(uint32_t cb_index, uint32_t var_index) const
{
BOOST_ASSERT(cb_index < shader_->cbuffers.size());
BOOST_ASSERT(var_index < shader_->cbuffers[cb_index].vars.size());
return shader_->cbuffers[cb_index].vars[var_index].var_desc.flags ? true : false;
}
uint32_t DXBC2GLSL::NumResources() const
{
return static_cast<uint32_t>(shader_->resource_bindings.size());
}
char const * DXBC2GLSL::ResourceName(uint32_t index) const
{
BOOST_ASSERT(index < shader_->resource_bindings.size());
return shader_->resource_bindings[index].name;
}
uint32_t DXBC2GLSL::ResourceBindPoint(uint32_t index) const
{
BOOST_ASSERT(index < shader_->resource_bindings.size());
return shader_->resource_bindings[index].bind_point;
}
ShaderInputType DXBC2GLSL::ResourceType(uint32_t index) const
{
BOOST_ASSERT(index < shader_->resource_bindings.size());
return shader_->resource_bindings[index].type;
}
ShaderSRVDimension DXBC2GLSL::ResourceDimension(uint32_t index) const
{
BOOST_ASSERT(index < shader_->resource_bindings.size());
return shader_->resource_bindings[index].dimension;
}
bool DXBC2GLSL::ResourceUsed(uint32_t index) const
{
BOOST_ASSERT(index < shader_->resource_bindings.size());
return !(shader_->resource_bindings[index].flags & DSIF_Unused);
}
ShaderPrimitive DXBC2GLSL::GSInputPrimitive() const
{
return shader_->gs_input_primitive;
}
uint32_t DXBC2GLSL::NumGSOutputTopology() const
{
return static_cast<uint32_t>(shader_->gs_output_topology.size());
}
ShaderPrimitiveTopology DXBC2GLSL::GSOutputTopology(uint32_t index) const
{
BOOST_ASSERT(index < shader_->gs_output_topology.size());
return shader_->gs_output_topology[index];
}
uint32_t DXBC2GLSL::MaxGSOutputVertex() const
{
return shader_->max_gs_output_vertex;
}
uint32_t DXBC2GLSL::GSInstanceCount() const
{
return shader_->gs_instance_count;
}
ShaderTessellatorPartitioning DXBC2GLSL::DSPartitioning() const
{
return shader_->ds_tessellator_partitioning;
}
ShaderTessellatorOutputPrimitive DXBC2GLSL::DSOutputPrimitive() const
{
return shader_->ds_tessellator_output_primitive;
}
}
| gpl-2.0 |
Eve-Lyn/Mong2-Trinity | src/server/game/Instances/InstanceScript.cpp | 1 | 15891 | /*
* Copyright (C) 2008-2011 TrinityCore <http://www.trinitycore.org/>
* Copyright (C) 2005-2009 MaNGOS <http://getmangos.com/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "InstanceScript.h"
#include "DatabaseEnv.h"
#include "Map.h"
#include "Player.h"
#include "GameObject.h"
#include "Creature.h"
#include "CreatureAI.h"
#include "Log.h"
#include "LFGMgr.h"
void InstanceScript::SaveToDB()
{
std::string data = GetSaveData();
if (data.empty())
return;
PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_UPDATE_INSTANCE_DATA);
stmt->setUInt32(0, GetCompletedEncounterMask());
stmt->setString(1, data);
stmt->setUInt32(2, instance->GetInstanceId());
CharacterDatabase.Execute(stmt);
}
void InstanceScript::HandleGameObject(uint64 GUID, bool open, GameObject* go)
{
if (!go)
go = instance->GetGameObject(GUID);
if (go && go->GetGoState() != (open ? GO_STATE_ACTIVE : GO_STATE_READY))
go->SetGoState(open ? GO_STATE_ACTIVE : GO_STATE_READY);
else
sLog->outDebug(LOG_FILTER_TSCR, "TSCR: InstanceScript: HandleGameObject failed");
}
bool InstanceScript::IsEncounterInProgress() const
{
for (std::vector<BossInfo>::const_iterator itr = bosses.begin(); itr != bosses.end(); ++itr)
if (itr->state == IN_PROGRESS)
return true;
return false;
}
void InstanceScript::LoadMinionData(const MinionData* data)
{
while (data->entry)
{
if (data->bossId < bosses.size())
minions.insert(std::make_pair(data->entry, MinionInfo(&bosses[data->bossId])));
++data;
}
sLog->outDebug(LOG_FILTER_TSCR, "InstanceScript::LoadMinionData: " UI64FMTD " minions loaded.", uint64(minions.size()));
}
void InstanceScript::LoadDoorData(const DoorData* data)
{
while (data->entry)
{
if (data->bossId < bosses.size())
doors.insert(std::make_pair(data->entry, DoorInfo(&bosses[data->bossId], data->type, BoundaryType(data->boundary))));
++data;
}
sLog->outDebug(LOG_FILTER_TSCR, "InstanceScript::LoadDoorData: " UI64FMTD " doors loaded.", uint64(doors.size()));
}
void InstanceScript::UpdateMinionState(Creature* minion, EncounterState state)
{
switch (state)
{
case NOT_STARTED:
if (!minion->isAlive())
minion->Respawn();
else if (minion->isInCombat())
minion->AI()->EnterEvadeMode();
break;
case IN_PROGRESS:
if (!minion->isAlive())
minion->Respawn();
else if (!minion->getVictim())
minion->AI()->DoZoneInCombat();
break;
default:
break;
}
}
void InstanceScript::UpdateDoorState(GameObject* door)
{
DoorInfoMap::iterator lower = doors.lower_bound(door->GetEntry());
DoorInfoMap::iterator upper = doors.upper_bound(door->GetEntry());
if (lower == upper)
return;
bool open = true;
for (DoorInfoMap::iterator itr = lower; itr != upper && open; ++itr)
{
switch (itr->second.type)
{
case DOOR_TYPE_ROOM:
open = (itr->second.bossInfo->state != IN_PROGRESS);
break;
case DOOR_TYPE_PASSAGE:
open = (itr->second.bossInfo->state == DONE);
break;
case DOOR_TYPE_SPAWN_HOLE:
open = (itr->second.bossInfo->state == IN_PROGRESS);
break;
default:
break;
}
}
door->SetGoState(open ? GO_STATE_ACTIVE : GO_STATE_READY);
}
void InstanceScript::AddDoor(GameObject* door, bool add)
{
DoorInfoMap::iterator lower = doors.lower_bound(door->GetEntry());
DoorInfoMap::iterator upper = doors.upper_bound(door->GetEntry());
if (lower == upper)
return;
for (DoorInfoMap::iterator itr = lower; itr != upper; ++itr)
{
DoorInfo const& data = itr->second;
if (add)
{
data.bossInfo->door[data.type].insert(door);
switch (data.boundary)
{
default:
case BOUNDARY_NONE:
break;
case BOUNDARY_N:
case BOUNDARY_S:
data.bossInfo->boundary[data.boundary] = door->GetPositionX();
break;
case BOUNDARY_E:
case BOUNDARY_W:
data.bossInfo->boundary[data.boundary] = door->GetPositionY();
break;
case BOUNDARY_NW:
case BOUNDARY_SE:
data.bossInfo->boundary[data.boundary] = door->GetPositionX() + door->GetPositionY();
break;
case BOUNDARY_NE:
case BOUNDARY_SW:
data.bossInfo->boundary[data.boundary] = door->GetPositionX() - door->GetPositionY();
break;
}
}
else
data.bossInfo->door[data.type].erase(door);
}
if (add)
UpdateDoorState(door);
}
void InstanceScript::AddMinion(Creature* minion, bool add)
{
MinionInfoMap::iterator itr = minions.find(minion->GetEntry());
if (itr == minions.end())
return;
if (add)
itr->second.bossInfo->minion.insert(minion);
else
itr->second.bossInfo->minion.erase(minion);
}
bool InstanceScript::SetBossState(uint32 id, EncounterState state)
{
if (id < bosses.size())
{
BossInfo* bossInfo = &bosses[id];
if (bossInfo->state == TO_BE_DECIDED) // loading
{
bossInfo->state = state;
//sLog->outError("Inialize boss %u state as %u.", id, (uint32)state);
return false;
}
else
{
if (bossInfo->state == state)
return false;
if (state == DONE)
for (MinionSet::iterator i = bossInfo->minion.begin(); i != bossInfo->minion.end(); ++i)
if ((*i)->isWorldBoss() && (*i)->isAlive())
return false;
bossInfo->state = state;
SaveToDB();
}
for (uint32 type = 0; type < MAX_DOOR_TYPES; ++type)
for (DoorSet::iterator i = bossInfo->door[type].begin(); i != bossInfo->door[type].end(); ++i)
UpdateDoorState(*i);
for (MinionSet::iterator i = bossInfo->minion.begin(); i != bossInfo->minion.end(); ++i)
UpdateMinionState(*i, state);
return true;
}
return false;
}
std::string InstanceScript::LoadBossState(const char * data)
{
if (!data)
return NULL;
std::istringstream loadStream(data);
uint32 buff;
uint32 bossId = 0;
for (std::vector<BossInfo>::iterator i = bosses.begin(); i != bosses.end(); ++i, ++bossId)
{
loadStream >> buff;
if (buff < TO_BE_DECIDED)
SetBossState(bossId, (EncounterState)buff);
}
return loadStream.str();
}
std::string InstanceScript::GetBossSaveData()
{
std::ostringstream saveStream;
for (std::vector<BossInfo>::iterator i = bosses.begin(); i != bosses.end(); ++i)
saveStream << (uint32)i->state << ' ';
return saveStream.str();
}
void InstanceScript::DoUseDoorOrButton(uint64 uiGuid, uint32 uiWithRestoreTime, bool bUseAlternativeState)
{
if (!uiGuid)
return;
GameObject* go = instance->GetGameObject(uiGuid);
if (go)
{
if (go->GetGoType() == GAMEOBJECT_TYPE_DOOR || go->GetGoType() == GAMEOBJECT_TYPE_BUTTON)
{
if (go->getLootState() == GO_READY)
go->UseDoorOrButton(uiWithRestoreTime, bUseAlternativeState);
else if (go->getLootState() == GO_ACTIVATED)
go->ResetDoorOrButton();
}
else
sLog->outError("SD2: Script call DoUseDoorOrButton, but gameobject entry %u is type %u.", go->GetEntry(), go->GetGoType());
}
}
void InstanceScript::DoRespawnGameObject(uint64 uiGuid, uint32 uiTimeToDespawn)
{
if (GameObject* go = instance->GetGameObject(uiGuid))
{
//not expect any of these should ever be handled
if (go->GetGoType() == GAMEOBJECT_TYPE_FISHINGNODE || go->GetGoType() == GAMEOBJECT_TYPE_DOOR ||
go->GetGoType() == GAMEOBJECT_TYPE_BUTTON || go->GetGoType() == GAMEOBJECT_TYPE_TRAP)
return;
if (go->isSpawned())
return;
go->SetRespawnTime(uiTimeToDespawn);
}
}
void InstanceScript::DoUpdateWorldState(uint32 uiStateId, uint32 uiStateData)
{
Map::PlayerList const& lPlayers = instance->GetPlayers();
if (!lPlayers.isEmpty())
{
for (Map::PlayerList::const_iterator itr = lPlayers.begin(); itr != lPlayers.end(); ++itr)
if (Player* player = itr->getSource())
player->SendUpdateWorldState(uiStateId, uiStateData);
}
else
sLog->outDebug(LOG_FILTER_TSCR, "TSCR: DoUpdateWorldState attempt send data but no players in map.");
}
// Send Notify to all players in instance
void InstanceScript::DoSendNotifyToInstance(const char *format, ...)
{
InstanceMap::PlayerList const &PlayerList = instance->GetPlayers();
InstanceMap::PlayerList::const_iterator i;
if (!PlayerList.isEmpty())
{
va_list ap;
va_start(ap, format);
for (Map::PlayerList::const_iterator i = PlayerList.begin(); i != PlayerList.end(); ++i)
{
if (Player* player = i->getSource())
if (WorldSession* pSession = player->GetSession())
pSession->SendNotification(format, ap);
}
va_end(ap);
}
}
// Update Achievement Criteria for all players in instance
void InstanceScript::DoUpdateAchievementCriteria(AchievementCriteriaTypes type, uint32 miscValue1 /*= 0*/, uint32 miscValue2 /*= 0*/, Unit* unit /*= NULL*/)
{
Map::PlayerList const &PlayerList = instance->GetPlayers();
if (!PlayerList.isEmpty())
for (Map::PlayerList::const_iterator i = PlayerList.begin(); i != PlayerList.end(); ++i)
if (Player* player = i->getSource())
player->UpdateAchievementCriteria(type, miscValue1, miscValue2, unit);
}
// Start timed achievement for all players in instance
void InstanceScript::DoStartTimedAchievement(AchievementCriteriaTimedTypes type, uint32 entry)
{
Map::PlayerList const &PlayerList = instance->GetPlayers();
if (!PlayerList.isEmpty())
for (Map::PlayerList::const_iterator i = PlayerList.begin(); i != PlayerList.end(); ++i)
if (Player* player = i->getSource())
player->GetAchievementMgr().StartTimedAchievement(type, entry);
}
// Stop timed achievement for all players in instance
void InstanceScript::DoStopTimedAchievement(AchievementCriteriaTimedTypes type, uint32 entry)
{
Map::PlayerList const &PlayerList = instance->GetPlayers();
if (!PlayerList.isEmpty())
for (Map::PlayerList::const_iterator i = PlayerList.begin(); i != PlayerList.end(); ++i)
if (Player* player = i->getSource())
player->GetAchievementMgr().RemoveTimedAchievement(type, entry);
}
// Remove Auras due to Spell on all players in instance
void InstanceScript::DoRemoveAurasDueToSpellOnPlayers(uint32 spell)
{
Map::PlayerList const& PlayerList = instance->GetPlayers();
if (!PlayerList.isEmpty())
{
for (Map::PlayerList::const_iterator itr = PlayerList.begin(); itr != PlayerList.end(); ++itr)
{
if (Player* player = itr->getSource())
{
player->RemoveAurasDueToSpell(spell);
if (Pet* pet = player->GetPet())
pet->RemoveAurasDueToSpell(spell);
}
}
}
}
// Cast spell on all players in instance
void InstanceScript::DoCastSpellOnPlayers(uint32 spell)
{
Map::PlayerList const &PlayerList = instance->GetPlayers();
if (!PlayerList.isEmpty())
for (Map::PlayerList::const_iterator i = PlayerList.begin(); i != PlayerList.end(); ++i)
if (Player* player = i->getSource())
player->CastSpell(player, spell, true);
}
bool InstanceScript::CheckAchievementCriteriaMeet(uint32 criteria_id, Player const* /*source*/, Unit const* /*target*/ /*= NULL*/, uint32 /*miscvalue1*/ /*= 0*/)
{
sLog->outError("Achievement system call InstanceScript::CheckAchievementCriteriaMeet but instance script for map %u not have implementation for achievement criteria %u",
instance->GetId(), criteria_id);
return false;
}
void InstanceScript::SendEncounterUnit(uint32 type, Unit* unit /*= NULL*/, uint8 param1 /*= 0*/, uint8 param2 /*= 0*/)
{
// size of this packet is at most 15 (usually less)
WorldPacket data(SMSG_UPDATE_INSTANCE_ENCOUNTER_UNIT, 15);
data << uint32(type);
switch (type)
{
case ENCOUNTER_FRAME_ADD:
case ENCOUNTER_FRAME_REMOVE:
case 2:
data.append(unit->GetPackGUID());
data << uint8(param1);
break;
case 3:
case 4:
case 6:
data << uint8(param1);
data << uint8(param2);
break;
case 5:
data << uint8(param1);
break;
case 7:
default:
break;
}
instance->SendToPlayers(&data);
}
void InstanceScript::UpdateEncounterState(EncounterCreditType type, uint32 creditEntry, Unit* source)
{
DungeonEncounterList const* encounters = sObjectMgr->GetDungeonEncounterList(instance->GetId(), instance->GetDifficulty());
if (!encounters)
return;
for (DungeonEncounterList::const_iterator itr = encounters->begin(); itr != encounters->end(); ++itr)
{
if ((*itr)->creditType == type && (*itr)->creditEntry == creditEntry)
{
completedEncounters |= 1 << (*itr)->dbcEntry->encounterIndex;
sLog->outDebug(LOG_FILTER_TSCR, "Instance %s (instanceId %u) completed encounter %s", instance->GetMapName(), instance->GetInstanceId(), (*itr)->dbcEntry->encounterName[0]);
if (uint32 dungeonId = (*itr)->lastEncounterDungeon)
{
Map::PlayerList const& players = instance->GetPlayers();
if (!players.isEmpty())
for (Map::PlayerList::const_iterator i = players.begin(); i != players.end(); ++i)
if (Player* player = i->getSource())
if (!source || player->IsAtGroupRewardDistance(source))
sLFGMgr->RewardDungeonDoneFor(dungeonId, player);
}
return;
}
}
}
// Complete Achievement for all players in instance
void InstanceScript::DoCompleteAchievement(uint32 achievement)
{
AchievementEntry const* pAE = GetAchievementStore()->LookupEntry(achievement);
Map::PlayerList const &PlayerList = instance->GetPlayers();
if (!pAE)
{
sLog->outError("TSCR: DoCompleteAchievement called for not existing achievement %u", achievement);
return;
}
if (!PlayerList.isEmpty())
for (Map::PlayerList::const_iterator i = PlayerList.begin(); i != PlayerList.end(); ++i)
if (Player *pPlayer = i->getSource())
pPlayer->CompletedAchievement(pAE);
}
| gpl-2.0 |
lynx19890808/LynxFlyOpen | Math/LibMatrix.c | 1 | 7402 | /*********************************************************************************
* ɽè·É¿Ø£¨Lynx£©
* ²âÊÔ°æ
*
* Version : V1.0
* By : Lynx@ustc 84693469@qq.com
*
* For : Stm32f103VET6
* Mode : Thumb2
* Description : À´Ô´ÓÚÍøÂç²¢ÂÔÓи͝µÄ¾ØÕó¿â
*
*
* Date : 2013.XX.XX
*******************************************************************************/
#include <math.h>
#include <stdlib.h> //³£Óõĺ¯ÊýÈçmalloc()¡¢calloc()¡¢realloc()¡¢free()¡¢system()¡¢atoi()¡¢atol()¡¢rand()¡¢srand()¡¢exit()µÈµÈ¡£
#include "LibMatrix.h"
//--------------------
void matrix_init0(float* A, int m, int n)
// Initialize A Matrix (Set All Elements 0)
{
// A = input matrix (m x n)
// m = number of rows in A
// n = number of columns in A
int i, j;
for (i=0;i<m;i++)
for(j=0;j<n;j++)
A[n*i+j] = 0;
}
void matrix_copy(float* A, int m, int n, float* C)
// Matrix Copy Routine
{
// A = input matrix (m x n)
// m = number of rows in A
// n = number of columns in A
// C = output matrix = A (m x n)
int i, j;
for (i=0;i<m;i++)
for(j=0;j<n;j++)
C[n*i+j]=A[n*i+j];
}
void matrix_eye(float* A, int n)
// Initialize A Matrix to An Eye Matrix
{
// A = input matrix (m x n)
// n = number of columns and rows in A
int i, j;
for (i=0;i<n;i++)
for(j=0;j<n;j++){
if(i == j)
A[n*i+j] = 1;
else
A[n*i+j] = 0;
}
}
void matrix_multiply(float* A, float* B, int m, int p, int n, float* C)
// Matrix Multiplication Routine
{
// A = input matrix (m x p)
// B = input matrix (p x n)
// m = number of rows in A
// p = number of columns in A = number of rows in B
// n = number of columns in B
// C = output matrix = A*B (m x n)
int i, j, k;
for (i=0;i<m;i++)
for(j=0;j<n;j++)
{
C[n*i+j]=0;
for (k=0;k<p;k++)
C[n*i+j]= C[n*i+j]+A[p*i+k]*B[n*k+j];
}
}
void matrix_multiply_k(float* A, float k, int m, int n, float* C)
// Matrix Multiplication with K Routine
{
// A = input matrix (m x n)
// m = number of rows in A
// n = number of columns in A
// C = output matrix = k*A (m x n)
int i, j;
for (i=0;i<m;i++)
for(j=0;j<n;j++)
C[n*i+j]=A[n*i+j]*k;
}
void matrix_addition(float* A, float* B, int m, int n, float* C)
// Matrix Addition Routine
{
// A = input matrix (m x n)
// B = input matrix (m x n)
// m = number of rows in A = number of rows in B
// n = number of columns in A = number of columns in B
// C = output matrix = A+B (m x n)
int i, j;
for (i=0;i<m;i++)
for(j=0;j<n;j++)
C[n*i+j]=A[n*i+j]+B[n*i+j];
}
void matrix_minus(float* A, float* B, int m, int n, float* C)
// Matrix Minus Routine
{
// A = input matrix (m x n)
// B = input matrix (m x n)
// m = number of rows in A = number of rows in B
// n = number of columns in A = number of columns in B
// C = output matrix = A-B (m x n)
int i, j;
for (i=0;i<m;i++)
for(j=0;j<n;j++)
C[n*i+j]=A[n*i+j]-B[n*i+j];
}
void matrix_negate(float* A, int m, int n, float* C)
// Matrix Negate Routine
{
// A = input matrix (m x n)
// m = number of rows in A
// n = number of columns in A
// C = output matrix = -A (m x n)
int i, j;
for (i=0;i<m;i++)
for(j=0;j<n;j++)
C[n*i+j]=-A[n*i+j];
}
void matrix_subtraction(float* A, float* B, int m, int n, float* C)
// Matrix Subtraction Routine
{
// A = input matrix (m x n)
// B = input matrix (m x n)
// m = number of rows in A = number of rows in B
// n = number of columns in A = number of columns in B
// C = output matrix = A-B (m x n)
int i, j;
for (i=0;i<m;i++)
for(j=0;j<n;j++)
C[n*i+j]=A[n*i+j]-B[n*i+j];
}
void matrix_transpose(float* A, int m, int n, float* C)
// Matrix Transpose Routine
{
// A = input matrix (m x n)
// m = number of rows in A
// n = number of columns in A
// C = output matrix = the transpose of A (n x m)
int i, j;
for (i=0;i<m;i++)
for(j=0;j<n;j++)
C[m*j+i]=A[n*i+j];
}
int matrix_inversion(float* A, int n, float* AInverse)
// Matrix Inversion Routine
{
// A = input matrix (n x n)
// n = dimension of A
// AInverse = inverted matrix (n x n)
// This function inverts a matrix based on the Gauss Jordan method.
// The function returns 1 on success, 0 on failure.
int i, j, iPass, imx, icol, irow;
float det, temp, pivot;
float factor = 0; //ÒòΪÕâÀﱨ´íÁË
float* ac = (float*)calloc(n*n, sizeof(float));
det = 1;
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
AInverse[n*i+j] = 0;
ac[n*i+j] = A[n*i+j];
}
AInverse[n*i+i] = 1;
}
// The current pivot row is iPass.
// For each pass, first find the maximum element in the pivot column.
for (iPass = 0; iPass < n; iPass++)
{
imx = iPass;
for (irow = iPass; irow < n; irow++)
{
if (fabs(A[n*irow+iPass]) > fabs(A[n*imx+iPass])) imx = irow;
}
// Interchange the elements of row iPass and row imx in both A and AInverse.
if (imx != iPass)
{
for (icol = 0; icol < n; icol++)
{
temp = AInverse[n*iPass+icol];
AInverse[n*iPass+icol] = AInverse[n*imx+icol];
AInverse[n*imx+icol] = temp;
if (icol >= iPass)
{
temp = A[n*iPass+icol];
A[n*iPass+icol] = A[n*imx+icol];
A[n*imx+icol] = temp;
}
}
}
// The current pivot is now A[iPass][iPass].
// The determinant is the product of the pivot elements.
pivot = A[n*iPass+iPass];
det = det * pivot;
if (det == 0)
{
free(ac);
return 0;
}
for (icol = 0; icol < n; icol++)
{
// Normalize the pivot row by dividing by the pivot element.
AInverse[n*iPass+icol] = AInverse[n*iPass+icol] / pivot;
if (icol >= iPass) A[n*iPass+icol] = A[n*iPass+icol] / pivot;
}
for (irow = 0; irow < n; irow++)
{
// Add a multiple of the pivot row to each row. The multiple factor
// is chosen so that the element of A on the pivot column is 0.
if (irow != iPass) factor = A[n*irow+iPass];
for (icol = 0; icol < n; icol++)
{
if (irow != iPass)
{
AInverse[n*irow+icol] -= factor * AInverse[n*iPass+icol];
A[n*irow+icol] -= factor * A[n*iPass+icol];
}
}
}
}
free(ac);
return 1;
}
//static void matrix_print(float* A, int m, int n)
//// Matrix print.
//{
// // A = input matrix (m x n)
// // m = number of rows in A
// // n = number of columns in A
// int i, j;
// for (i=0;i<m;i++)
// {
// printf("| ");
// for(j=0;j<n;j++)
// {
// printf("%7.3f ", A[n*i+j]);
// }
// printf("|\n");
// }
//}
//--------------------------------------------
| gpl-2.0 |
TwitchPlaysPokemon/vba-rr | src/win32/DirectInput.cpp | 1 | 30665 | //#define USE_GETASYNCKEYSTATE_FOR_KEYBOARD
#include "stdafx.h"
#define DIRECTINPUT_VERSION 0x0500
#include "dinput.h"
#include "resource.h"
#include "Input.h"
#include "Reg.h"
#include "WinResUtil.h"
// master keyboard translation table
static const struct {
int dik;
int vk;
int ascii;
} win_key_trans_table[] = {
// dinput key virtual key ascii
{ DIK_ESCAPE, VK_ESCAPE, 27 },
{ DIK_1, '1', '1' },
{ DIK_2, '2', '2' },
{ DIK_3, '3', '3' },
{ DIK_4, '4', '4' },
{ DIK_5, '5', '5' },
{ DIK_6, '6', '6' },
{ DIK_7, '7', '7' },
{ DIK_8, '8', '8' },
{ DIK_9, '9', '9' },
{ DIK_0, '0', '0' },
{ DIK_MINUS, VK_OEM_MINUS, '-' },
{ DIK_EQUALS, VK_OEM_PLUS, '=' },
{ DIK_BACK, VK_BACK, 8 },
{ DIK_TAB, VK_TAB, 9 },
{ DIK_Q, 'Q', 'Q' },
{ DIK_W, 'W', 'W' },
{ DIK_E, 'E', 'E' },
{ DIK_R, 'R', 'R' },
{ DIK_T, 'T', 'T' },
{ DIK_Y, 'Y', 'Y' },
{ DIK_U, 'U', 'U' },
{ DIK_I, 'I', 'I' },
{ DIK_O, 'O', 'O' },
{ DIK_P, 'P', 'P' },
{ DIK_LBRACKET, VK_OEM_4, '[' },
{ DIK_RBRACKET, VK_OEM_6, ']' },
{ DIK_RETURN, VK_RETURN, 13 },
{ DIK_LCONTROL, VK_LCONTROL, 0 },
{ DIK_A, 'A', 'A' },
{ DIK_S, 'S', 'S' },
{ DIK_D, 'D', 'D' },
{ DIK_F, 'F', 'F' },
{ DIK_G, 'G', 'G' },
{ DIK_H, 'H', 'H' },
{ DIK_J, 'J', 'J' },
{ DIK_K, 'K', 'K' },
{ DIK_L, 'L', 'L' },
{ DIK_SEMICOLON, VK_OEM_1, ';' },
{ DIK_APOSTROPHE, VK_OEM_7, '\'' },
{ DIK_GRAVE, VK_OEM_3, '`' },
{ DIK_LSHIFT, VK_LSHIFT, 0 },
{ DIK_BACKSLASH, VK_OEM_5, '\\' },
{ DIK_Z, 'Z', 'Z' },
{ DIK_X, 'X', 'X' },
{ DIK_C, 'C', 'C' },
{ DIK_V, 'V', 'V' },
{ DIK_B, 'B', 'B' },
{ DIK_N, 'N', 'N' },
{ DIK_M, 'M', 'M' },
{ DIK_COMMA, VK_OEM_COMMA, ',' },
{ DIK_PERIOD, VK_OEM_PERIOD, '.' },
{ DIK_SLASH, VK_OEM_2, '/' },
{ DIK_RSHIFT, VK_RSHIFT, 0 },
{ DIK_MULTIPLY, VK_MULTIPLY, '*' },
{ DIK_LMENU, VK_LMENU, 0 },
{ DIK_SPACE, VK_SPACE, ' ' },
{ DIK_CAPITAL, VK_CAPITAL, 0 },
{ DIK_F1, VK_F1, 0 },
{ DIK_F2, VK_F2, 0 },
{ DIK_F3, VK_F3, 0 },
{ DIK_F4, VK_F4, 0 },
{ DIK_F5, VK_F5, 0 },
{ DIK_F6, VK_F6, 0 },
{ DIK_F7, VK_F7, 0 },
{ DIK_F8, VK_F8, 0 },
{ DIK_F9, VK_F9, 0 },
{ DIK_F10, VK_F10, 0 },
{ DIK_NUMLOCK, VK_NUMLOCK, 0 },
{ DIK_SCROLL, VK_SCROLL, 0 },
{ DIK_NUMPAD7, VK_NUMPAD7, 0 },
{ DIK_NUMPAD8, VK_NUMPAD8, 0 },
{ DIK_NUMPAD9, VK_NUMPAD9, 0 },
{ DIK_SUBTRACT, VK_SUBTRACT, 0 },
{ DIK_NUMPAD4, VK_NUMPAD4, 0 },
{ DIK_NUMPAD5, VK_NUMPAD5, 0 },
{ DIK_NUMPAD6, VK_NUMPAD6, 0 },
{ DIK_ADD, VK_ADD, 0 },
{ DIK_NUMPAD1, VK_NUMPAD1, 0 },
{ DIK_NUMPAD2, VK_NUMPAD2, 0 },
{ DIK_NUMPAD3, VK_NUMPAD3, 0 },
{ DIK_NUMPAD0, VK_NUMPAD0, 0 },
{ DIK_DECIMAL, VK_DECIMAL, 0 },
{ DIK_F11, VK_F11, 0 },
{ DIK_F12, VK_F12, 0 },
{ DIK_F13, VK_F13, 0 },
{ DIK_F14, VK_F14, 0 },
{ DIK_F15, VK_F15, 0 },
{ DIK_NUMPADENTER, VK_RETURN, 0 },
{ DIK_RCONTROL, VK_RCONTROL, 0 },
{ DIK_DIVIDE, VK_DIVIDE, 0 },
{ DIK_SYSRQ, 0, 0 },
{ DIK_RMENU, VK_RMENU, 0 },
{ DIK_HOME, VK_HOME, 0 },
{ DIK_UP, VK_UP, 0 },
{ DIK_PRIOR, VK_PRIOR, 0 },
{ DIK_LEFT, VK_LEFT, 0 },
{ DIK_RIGHT, VK_RIGHT, 0 },
{ DIK_END, VK_END, 0 },
{ DIK_DOWN, VK_DOWN, 0 },
{ DIK_NEXT, VK_NEXT, 0 },
{ DIK_INSERT, VK_INSERT, 0 },
{ DIK_DELETE, VK_DELETE, 0 },
{ DIK_LWIN, VK_LWIN, 0 },
{ DIK_RWIN, VK_RWIN, 0 },
{ DIK_APPS, VK_APPS, 0 },
{ DIK_PAUSE, VK_PAUSE, 0 },
{ 0, VK_CANCEL, 0 },
// New keys introduced in Windows 2000. These have no MAME codes to
// preserve compatibility with old config files that may refer to them
// as e.g. FORWARD instead of e.g. KEYCODE_WEBFORWARD. They need table
// entries anyway because otherwise they aren't recognized when
// GetAsyncKeyState polling is used (as happens currently when MAME is
// paused). Some codes are missing because the mapping to vkey codes
// isn't clear, and MapVirtualKey is no help.
{ DIK_MUTE, VK_VOLUME_MUTE, 0 },
{ DIK_VOLUMEDOWN, VK_VOLUME_DOWN, 0 },
{ DIK_VOLUMEUP, VK_VOLUME_UP, 0 },
{ DIK_WEBHOME, VK_BROWSER_HOME, 0 },
{ DIK_WEBSEARCH, VK_BROWSER_SEARCH, 0 },
{ DIK_WEBFAVORITES, VK_BROWSER_FAVORITES, 0 },
{ DIK_WEBREFRESH, VK_BROWSER_REFRESH, 0 },
{ DIK_WEBSTOP, VK_BROWSER_STOP, 0 },
{ DIK_WEBFORWARD, VK_BROWSER_FORWARD, 0 },
{ DIK_WEBBACK, VK_BROWSER_BACK, 0 },
{ DIK_MAIL, VK_LAUNCH_MAIL, 0 },
{ DIK_MEDIASELECT, VK_LAUNCH_MEDIA_SELECT, 0 },
};
extern void directXMessage(const char *);
extern void winlog(const char *msg, ...);
#define POV_UP 1
#define POV_DOWN 2
#define POV_RIGHT 4
#define POV_LEFT 8
class DirectInput : public Input
{
private:
HINSTANCE dinputDLL;
public:
virtual void checkDevices();
DirectInput();
virtual ~DirectInput();
virtual bool initialize();
virtual bool readDevices();
virtual u32 readDevice(int which, bool sensor);
virtual CString getKeyName(LONG_PTR key);
virtual void checkKeys();
virtual void activate();
virtual void loadSettings();
virtual void saveSettings();
};
struct deviceInfo
{
LPDIRECTINPUTDEVICE device;
BOOL isPolled;
int nButtons;
int nAxes;
int nPovs;
BOOL first;
struct
{
DWORD offset;
LONG center;
LONG negative;
LONG positive;
} axis[8];
int needed;
union
{
UCHAR data[256];
DIJOYSTATE state;
};
};
static deviceInfo * currentDevice = NULL;
static int numDevices = 1;
static deviceInfo * pDevices = NULL;
static LPDIRECTINPUT pDirectInput = NULL;
static int joyDebug = 0;
static int axisNumber = 0;
USHORT joypad[4][13] = {
{
DIK_LEFT, DIK_RIGHT,
DIK_UP, DIK_DOWN,
DIK_Z, DIK_X,
DIK_RETURN, DIK_BACK,
DIK_A, DIK_S,
DIK_SPACE, DIK_F12,
DIK_C
},
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
};
USHORT motion[4] = {
DIK_NUMPAD4, DIK_NUMPAD6, DIK_NUMPAD8, DIK_NUMPAD2
};
static int winReadKey(char *name, int num)
{
char buffer[80];
sprintf(buffer, "Joy%d_%s", num, name);
return regQueryDwordValue(buffer, (DWORD)-1);
}
void winReadKeys()
{
int key = -1;
for (int i = 0; i < 4; i++)
{
key = winReadKey("Left", i);
if (key != -1)
joypad[i][KEY_LEFT] = key;
key = winReadKey("Right", i);
if (key != -1)
joypad[i][KEY_RIGHT] = key;
key = winReadKey("Up", i);
if (key != -1)
joypad[i][KEY_UP] = key;
key = winReadKey("Down", i);
if (key != -1)
joypad[i][KEY_DOWN] = key;
key = winReadKey("A", i);
if (key != -1)
joypad[i][KEY_BUTTON_A] = key;
key = winReadKey("B", i);
if (key != -1)
joypad[i][KEY_BUTTON_B] = key;
key = winReadKey("L", i);
if (key != -1)
joypad[i][KEY_BUTTON_L] = key;
key = winReadKey("R", i);
if (key != -1)
joypad[i][KEY_BUTTON_R] = key;
key = winReadKey("Start", i);
if (key != -1)
joypad[i][KEY_BUTTON_START] = key;
key = winReadKey("Select", i);
if (key != -1)
joypad[i][KEY_BUTTON_SELECT] = key;
key = winReadKey("Speed", i);
if (key != -1)
joypad[i][KEY_BUTTON_SPEED] = key;
key = winReadKey("Capture", i);
if (key != -1)
joypad[i][KEY_BUTTON_CAPTURE] = key;
key = winReadKey("GS", i);
if (key != -1)
joypad[i][KEY_BUTTON_GS] = key;
}
key = regQueryDwordValue("Motion_Left", (DWORD)-1);
if (key != -1)
motion[KEY_LEFT] = key;
key = regQueryDwordValue("Motion_Right", (DWORD)-1);
if (key != -1)
motion[KEY_RIGHT] = key;
key = regQueryDwordValue("Motion_Up", (DWORD)-1);
if (key != -1)
motion[KEY_UP] = key;
key = regQueryDwordValue("Motion_Down", (DWORD)-1);
if (key != -1)
motion[KEY_DOWN] = key;
}
static void winSaveKey(char *name, int num, USHORT value)
{
char buffer[80];
sprintf(buffer, "Joy%d_%s", num, name);
regSetDwordValue(buffer, value);
}
void winSaveKeys()
{
for (int i = 0; i < 4; i++)
{
winSaveKey("Left", i, joypad[i][KEY_LEFT]);
winSaveKey("Right", i, joypad[i][KEY_RIGHT]);
winSaveKey("Up", i, joypad[i][KEY_UP]);
winSaveKey("Speed", i, joypad[i][KEY_BUTTON_SPEED]);
winSaveKey("Capture", i, joypad[i][KEY_BUTTON_CAPTURE]);
winSaveKey("GS", i, joypad[i][KEY_BUTTON_GS]);
winSaveKey("Down", i, joypad[i][KEY_DOWN]);
winSaveKey("A", i, joypad[i][KEY_BUTTON_A]);
winSaveKey("B", i, joypad[i][KEY_BUTTON_B]);
winSaveKey("L", i, joypad[i][KEY_BUTTON_L]);
winSaveKey("R", i, joypad[i][KEY_BUTTON_R]);
winSaveKey("Start", i, joypad[i][KEY_BUTTON_START]);
winSaveKey("Select", i, joypad[i][KEY_BUTTON_SELECT]);
}
regSetDwordValue("joyVersion", 1);
regSetDwordValue("Motion_Left",
motion[KEY_LEFT]);
regSetDwordValue("Motion_Right",
motion[KEY_RIGHT]);
regSetDwordValue("Motion_Up",
motion[KEY_UP]);
regSetDwordValue("Motion_Down",
motion[KEY_DOWN]);
}
static BOOL CALLBACK EnumAxesCallback(const DIDEVICEOBJECTINSTANCE*pdidoi,
VOID*pContext)
{
DIPROPRANGE diprg;
diprg.diph.dwSize = sizeof(DIPROPRANGE);
diprg.diph.dwHeaderSize = sizeof(DIPROPHEADER);
diprg.diph.dwHow = DIPH_BYOFFSET;
diprg.diph.dwObj = pdidoi->dwOfs; // Specify the enumerated axis
diprg.lMin = -32768;
diprg.lMax = 32767;
// try to set the range
if (FAILED(currentDevice->device->SetProperty(DIPROP_RANGE, &diprg.diph)))
{
// Get the range for the axis
if (FAILED(currentDevice->device->
GetProperty(DIPROP_RANGE, &diprg.diph)))
{
return DIENUM_STOP;
}
}
DIPROPDWORD didz;
didz.diph.dwSize = sizeof(didz);
didz.diph.dwHeaderSize = sizeof(DIPROPHEADER);
didz.diph.dwHow = DIPH_BYOFFSET;
didz.diph.dwObj = pdidoi->dwOfs;
didz.dwData = 5000;
currentDevice->device->SetProperty(DIPROP_DEADZONE, &didz.diph);
LONG center = (diprg.lMin + diprg.lMax)/2;
LONG threshold = (diprg.lMax - center)/2;
// only 8 axis supported
if (axisNumber < 8)
{
currentDevice->axis[axisNumber].center = center;
currentDevice->axis[axisNumber].negative = center - threshold;
currentDevice->axis[axisNumber].positive = center + threshold;
currentDevice->axis[axisNumber].offset = pdidoi->dwOfs;
}
axisNumber++;
return DIENUM_CONTINUE;
}
static BOOL CALLBACK EnumPovsCallback(const DIDEVICEOBJECTINSTANCE*pdidoi,
VOID*pContext)
{
return DIENUM_CONTINUE;
}
static BOOL CALLBACK DIEnumDevicesCallback(LPCDIDEVICEINSTANCE pInst,
LPVOID lpvContext)
{
ZeroMemory(&pDevices[numDevices], sizeof(deviceInfo));
HRESULT hRet = pDirectInput->CreateDevice(pInst->guidInstance,
&pDevices[numDevices].device,
NULL);
if (hRet != DI_OK)
return DIENUM_STOP;
DIDEVCAPS caps;
caps.dwSize = sizeof(DIDEVCAPS);
hRet = pDevices[numDevices].device->GetCapabilities(&caps);
if (hRet == DI_OK)
{
if (caps.dwFlags & DIDC_POLLEDDATAFORMAT ||
caps.dwFlags & DIDC_POLLEDDEVICE)
pDevices[numDevices].isPolled = TRUE;
pDevices[numDevices].nButtons = caps.dwButtons;
pDevices[numDevices].nAxes = caps.dwAxes;
pDevices[numDevices].nPovs = caps.dwPOVs;
for (int i = 0; i < 6; i++)
{
pDevices[numDevices].axis[i].center = 0x8000;
pDevices[numDevices].axis[i].negative = 0x4000;
pDevices[numDevices].axis[i].positive = 0xc000;
}
}
else if (joyDebug)
winlog("Failed to get device capabilities %08x\n", hRet);
if (joyDebug)
{
// don't translate. debug only
winlog("******************************\n");
winlog("Joystick %2d name : %s\n", numDevices, pInst->tszProductName);
}
numDevices++;
return DIENUM_CONTINUE;
}
BOOL CALLBACK DIEnumDevicesCallback2(LPCDIDEVICEINSTANCE pInst,
LPVOID lpvContext)
{
numDevices++;
return DIENUM_CONTINUE;
}
static int getPovState(DWORD value)
{
int state = 0;
if (LOWORD(value) != 0xFFFF)
{
if (value < 9000 || value > 27000)
state |= POV_UP;
if (value > 0 && value < 18000)
state |= POV_RIGHT;
if (value > 9000 && value < 27000)
state |= POV_DOWN;
if (value > 18000)
state |= POV_LEFT;
}
return state;
}
static void checkKeys()
{
LONG_PTR dev = 0;
int i;
for (i = 0; i < numDevices; i++)
pDevices[i].needed = 0;
for (i = 0; i < 4; i++)
{
dev = joypad[i][KEY_LEFT] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
joypad[i][KEY_LEFT] = DIK_LEFT;
dev = joypad[i][KEY_RIGHT] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
joypad[i][KEY_RIGHT] = DIK_RIGHT;
dev = joypad[i][KEY_UP] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
joypad[i][KEY_UP] = DIK_UP;
dev = joypad[i][KEY_DOWN] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
joypad[i][KEY_DOWN] = DIK_DOWN;
dev = joypad[i][KEY_BUTTON_A] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
joypad[i][KEY_BUTTON_A] = DIK_Z;
dev = joypad[i][KEY_BUTTON_B] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
joypad[i][KEY_BUTTON_B] = DIK_X;
dev = joypad[i][KEY_BUTTON_L] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
joypad[i][KEY_BUTTON_L] = DIK_A;
dev = joypad[i][KEY_BUTTON_R] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
joypad[i][KEY_BUTTON_R] = DIK_S;
dev = joypad[i][KEY_BUTTON_START] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
joypad[i][KEY_BUTTON_START] = DIK_RETURN;
dev = joypad[i][KEY_BUTTON_SELECT] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
joypad[i][KEY_BUTTON_SELECT] = DIK_BACK;
dev = joypad[i][KEY_BUTTON_SPEED] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
joypad[i][KEY_BUTTON_SPEED] = DIK_SPACE;
dev = joypad[i][KEY_BUTTON_CAPTURE] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
joypad[i][KEY_BUTTON_CAPTURE] = DIK_F12;
dev = joypad[i][KEY_BUTTON_GS] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
joypad[i][KEY_BUTTON_GS] = DIK_C;
}
dev = motion[KEY_UP] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
motion[KEY_UP] = DIK_NUMPAD8;
dev = motion[KEY_DOWN] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
motion[KEY_DOWN] = DIK_NUMPAD2;
dev = motion[KEY_LEFT] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
motion[KEY_LEFT] = DIK_NUMPAD4;
dev = motion[KEY_RIGHT] >> 8;
if (dev < numDevices && dev >= 0)
pDevices[dev].needed = 1;
else
motion[KEY_RIGHT] = DIK_NUMPAD6;
}
#define KEYDOWN(buffer, key) (buffer[key] & 0x80)
static bool IsKeyDownAsync (WORD KeyIdent)
{
//if (KeyIdent == 0 || KeyIdent == VK_ESCAPE) // if it's the 'disabled' key, it's never pressed
// return false;
//if (!GUI.BackgroundInput && GUI.hWnd != GetForegroundWindow())
// return false;
// the pause key is special, need this to catch all presses of it
// Both GetKeyState and GetAsyncKeyState cannot catch it anyway,
// so this should be handled in WM_KEYDOWN message.
if (KeyIdent == VK_PAUSE)
{
return false;
// if(GetAsyncKeyState(VK_PAUSE)) // not &'ing this with 0x8000 is intentional and necessary
// return true;
}
if (KeyIdent == VK_CAPITAL || KeyIdent == VK_NUMLOCK || KeyIdent == VK_SCROLL)
return ((GetKeyState(KeyIdent) & 0x01) != 0);
else
return ((GetAsyncKeyState(KeyIdent) & 0x8000) != 0);
//return ((GetKeyState (KeyIdent) & 0x80) != 0);
}
static bool readKeyboard()
{
#ifndef USE_GETASYNCKEYSTATE_FOR_KEYBOARD
if (pDevices[0].needed)
{
HRESULT hret = pDevices[0].device->
GetDeviceState(256,
(LPVOID)pDevices[0].data);
if (hret == DIERR_INPUTLOST || hret == DIERR_NOTACQUIRED)
{
hret = pDevices[0].device->Acquire();
if (hret != DI_OK)
return false;
hret = pDevices[0].device->GetDeviceState(256, (LPVOID)pDevices[0].data);
}
return hret == DI_OK;
}
#else
for (int i = 0; i < sizeof(win_key_trans_table)/sizeof(win_key_trans_table[0]); i++) {
pDevices[0].data[win_key_trans_table[i].dik] = IsKeyDownAsync(win_key_trans_table[i].vk) ? 0x80 : 0;
}
#endif
return true;
}
static bool readJoystick(int joy)
{
if (pDevices[joy].needed)
{
if (pDevices[joy].isPolled)
((LPDIRECTINPUTDEVICE2)pDevices[joy].device)->Poll();
HRESULT hret = pDevices[joy].device->
GetDeviceState(sizeof(DIJOYSTATE),
(LPVOID)&pDevices[joy].state);
if (hret == DIERR_INPUTLOST || hret == DIERR_NOTACQUIRED)
{
hret = pDevices[joy].device->Acquire();
if (hret == DI_OK)
{
if (pDevices[joy].isPolled)
((LPDIRECTINPUTDEVICE2)pDevices[joy].device)->Poll();
hret = pDevices[joy].device->
GetDeviceState(sizeof(DIJOYSTATE),
(LPVOID)&pDevices[joy].state);
}
}
return hret == DI_OK;
}
return true;
}
static void checkKeyboard()
{
// mham fix. Patch #1378104
UCHAR keystate[256];
HRESULT hret = pDevices[0].device->Acquire();
if (pDevices[0].first)
{
pDevices[0].device->GetDeviceState(256, (LPVOID)pDevices[0].data);
pDevices[0].first = FALSE;
return;
}
hret = pDevices[0].device->
GetDeviceState(256, (LPVOID)keystate);
if (hret == DIERR_INPUTLOST || hret == DIERR_NOTACQUIRED)
{
return;
}
if (hret == DI_OK)
{
for (int i = 0; i < 256; i++)
{
if (keystate[i] == pDevices[0].data[i])
continue;
if (KEYDOWN(keystate, i))
{
SendMessage(GetFocus(), JOYCONFIG_MESSAGE, 0, i);
break;
}
}
}
memcpy(pDevices[0].data, keystate, sizeof(UCHAR) * 256);
}
static void checkJoypads()
{
DIDEVICEOBJECTINSTANCE di;
ZeroMemory(&di, sizeof(DIDEVICEOBJECTINSTANCE));
di.dwSize = sizeof(DIDEVICEOBJECTINSTANCE);
int i = 0;
DIJOYSTATE joystick;
for (i = 1; i < numDevices; i++)
{
HRESULT hret = pDevices[i].device->Acquire();
if (pDevices[i].isPolled)
((LPDIRECTINPUTDEVICE2)pDevices[i].device)->Poll();
hret = pDevices[i].device->GetDeviceState(sizeof(joystick), &joystick);
int j;
if (pDevices[i].first)
{
memcpy(&pDevices[i].state, &joystick, sizeof(joystick));
pDevices[i].first = FALSE;
continue;
}
for (j = 0; j < pDevices[i].nButtons; j++)
{
if (((pDevices[i].state.rgbButtons[j] ^ joystick.rgbButtons[j])
& joystick.rgbButtons[j]) & 0x80)
{
HWND focus = GetFocus();
SendMessage(focus, JOYCONFIG_MESSAGE, i, j+128);
}
}
for (j = 0; j < pDevices[i].nAxes && j < 8; j++)
{
LONG value = pDevices[i].axis[j].center;
LONG old = 0;
switch (pDevices[i].axis[j].offset)
{
case DIJOFS_X:
value = joystick.lX;
old = pDevices[i].state.lX;
break;
case DIJOFS_Y:
value = joystick.lY;
old = pDevices[i].state.lY;
break;
case DIJOFS_Z:
value = joystick.lZ;
old = pDevices[i].state.lZ;
break;
case DIJOFS_RX:
value = joystick.lRx;
old = pDevices[i].state.lRx;
break;
case DIJOFS_RY:
value = joystick.lRy;
old = pDevices[i].state.lRy;
break;
case DIJOFS_RZ:
value = joystick.lRz;
old = pDevices[i].state.lRz;
break;
case DIJOFS_SLIDER(0):
value = joystick.rglSlider[0];
old = pDevices[i].state.rglSlider[0];
break;
case DIJOFS_SLIDER(1):
value = joystick.rglSlider[1];
old = pDevices[i].state.rglSlider[1];
break;
}
if (value != old)
{
if (value < pDevices[i].axis[j].negative)
SendMessage(GetFocus(), JOYCONFIG_MESSAGE, i, (j<<1));
else if (value > pDevices[i].axis[j].positive)
SendMessage(GetFocus(), JOYCONFIG_MESSAGE, i, (j<<1)+1);
}
}
for (j = 0; j < 4 && j < pDevices[i].nPovs; j++)
{
if (LOWORD(pDevices[i].state.rgdwPOV[j]) != LOWORD(joystick.rgdwPOV[j]))
{
int state = getPovState(joystick.rgdwPOV[j]);
if (state & POV_UP)
SendMessage(GetFocus(), JOYCONFIG_MESSAGE, i, (j<<2)+0x20);
else if (state & POV_DOWN)
SendMessage(GetFocus(), JOYCONFIG_MESSAGE, i, (j<<2)+0x21);
else if (state & POV_RIGHT)
SendMessage(GetFocus(), JOYCONFIG_MESSAGE, i, (j<<2)+0x22);
else if (state & POV_LEFT)
SendMessage(GetFocus(), JOYCONFIG_MESSAGE, i, (j<<2)+0x23);
}
}
memcpy(&pDevices[i].state, &joystick, sizeof(joystick));
}
}
BOOL checkKey(LONG_PTR key)
{
LONG_PTR dev = (key >> 8);
LONG_PTR k = (key & 255);
if (dev == 0)
{
return KEYDOWN(pDevices[0].data, k);
}
else if (dev >= numDevices)
{
return FALSE;
}
else
{
if (k < 16)
{
LONG_PTR axis = k >> 1;
LONG value = pDevices[dev].axis[axis].center;
switch (pDevices[dev].axis[axis].offset)
{
case DIJOFS_X:
value = pDevices[dev].state.lX;
break;
case DIJOFS_Y:
value = pDevices[dev].state.lY;
break;
case DIJOFS_Z:
value = pDevices[dev].state.lZ;
break;
case DIJOFS_RX:
value = pDevices[dev].state.lRx;
break;
case DIJOFS_RY:
value = pDevices[dev].state.lRy;
break;
case DIJOFS_RZ:
value = pDevices[dev].state.lRz;
break;
case DIJOFS_SLIDER(0):
value = pDevices[dev].state.rglSlider[0];
break;
case DIJOFS_SLIDER(1):
value = pDevices[dev].state.rglSlider[1];
break;
}
if (k & 1)
return value > pDevices[dev].axis[axis].positive;
return value < pDevices[dev].axis[axis].negative;
}
else if (k < 48)
{
LONG_PTR hat = (k >> 2) & 3;
int state = getPovState(pDevices[dev].state.rgdwPOV[hat]);
BOOL res = FALSE;
switch (k & 3)
{
case 0:
res = state & POV_UP;
break;
case 1:
res = state & POV_DOWN;
break;
case 2:
res = state & POV_RIGHT;
break;
case 3:
res = state & POV_LEFT;
break;
}
return res;
}
else if (k >= 128)
{
return pDevices[dev].state.rgbButtons[k-128] & 0x80;
}
}
return FALSE;
}
DirectInput::DirectInput()
{
dinputDLL = NULL;
}
DirectInput::~DirectInput()
{
saveSettings();
if (pDirectInput != NULL)
{
if (pDevices)
{
for (int i = 0; i < numDevices; i++)
{
if (pDevices[i].device)
{
pDevices[i].device->Unacquire();
pDevices[i].device->Release();
pDevices[i].device = NULL;
}
}
free(pDevices);
pDevices = NULL;
}
pDirectInput->Release();
pDirectInput = NULL;
}
if (dinputDLL)
{
/**/ ::FreeLibrary(dinputDLL);
dinputDLL = NULL;
}
}
bool DirectInput::initialize()
{
joyDebug = GetPrivateProfileInt("config",
"joyDebug",
0,
"VBA.ini");
dinputDLL = /**/ ::LoadLibrary("DINPUT.DLL");
HRESULT (WINAPI *DInputCreate)(HINSTANCE, DWORD, LPDIRECTINPUT *, IUnknown *);
if (dinputDLL != NULL)
{
DInputCreate = (HRESULT (WINAPI *)(HINSTANCE, DWORD, LPDIRECTINPUT *, IUnknown *))
GetProcAddress(dinputDLL, "DirectInputCreateA");
if (DInputCreate == NULL)
{
directXMessage("DirectInputCreateA");
return false;
}
}
else
{
directXMessage("DINPUT.DLL");
return false;
}
HRESULT hret = DInputCreate(AfxGetInstanceHandle(),
DIRECTINPUT_VERSION,
&pDirectInput,
NULL);
if (hret != DI_OK)
{
// errorMessage(myLoadString(IDS_ERROR_DISP_CREATE), hret);
return false;
}
hret = pDirectInput->EnumDevices(DIDEVTYPE_JOYSTICK,
DIEnumDevicesCallback2,
NULL,
DIEDFL_ATTACHEDONLY);
pDevices = (deviceInfo *)calloc(numDevices, sizeof(deviceInfo));
hret = pDirectInput->CreateDevice(GUID_SysKeyboard, &pDevices[0].device, NULL);
pDevices[0].isPolled = false;
pDevices[0].needed = true;
pDevices[0].first = true;
if (hret != DI_OK)
{
// errorMessage(myLoadString(IDS_ERROR_DISP_CREATEDEVICE), hret);
return false;
}
numDevices = 1;
hret = pDirectInput->EnumDevices(DIDEVTYPE_JOYSTICK,
DIEnumDevicesCallback,
NULL,
DIEDFL_ATTACHEDONLY);
// hret = pDevices[0].device->SetCooperativeLevel(hWindow,
// DISCL_FOREGROUND|
// DISCL_NONEXCLUSIVE);
if (hret != DI_OK)
{
// errorMessage(myLoadString(IDS_ERROR_DISP_LEVEL), hret);
return false;
}
hret = pDevices[0].device->SetDataFormat(&c_dfDIKeyboard);
if (hret != DI_OK)
{
// errorMessage(myLoadString(IDS_ERROR_DISP_DATAFORMAT), hret);
return false;
}
for (int i = 1; i < numDevices; i++)
{
pDevices[i].device->SetDataFormat(&c_dfDIJoystick);
pDevices[i].needed = false;
pDevices[i].first = true;
currentDevice = &pDevices[i];
axisNumber = 0;
currentDevice->device->EnumObjects(EnumAxesCallback, NULL, DIDFT_AXIS);
currentDevice->device->EnumObjects(EnumPovsCallback, NULL, DIDFT_POV);
if (joyDebug)
{
// don't translate. debug only
winlog("Joystick %2d polled : %d\n", i, currentDevice->isPolled);
winlog("Joystick %2d buttons : %d\n", i, currentDevice->nButtons);
winlog("Joystick %2d povs : %d\n", i, currentDevice->nPovs);
winlog("Joystick %2d axes : %d\n", i, currentDevice->nAxes);
for (int j = 0; j < currentDevice->nAxes; j++)
{
winlog("Axis %2d offset : %08lx\n", j, currentDevice->axis[j].
offset);
winlog("Axis %2d center : %08lx\n", j, currentDevice->axis[j].
center);
winlog("Axis %2d negative : %08lx\n", j, currentDevice->axis[j].
negative);
winlog("Axis %2d positive : %08lx\n", j, currentDevice->axis[j].
positive);
}
}
currentDevice = NULL;
}
for (int i = 0; i < numDevices; i++)
pDevices[i].device->Acquire();
return true;
}
bool DirectInput::readDevices()
{
bool ok = true;
for (int i = 0; i < numDevices; i++)
{
if (pDevices[i].needed)
{
ok = (i > 0 ? readJoystick(i) : readKeyboard()) || ok;
}
}
return ok;
}
bool inputActive = true; // used to disable all input when the window is inactive
u32 DirectInput::readDevice(int i, bool sensor)
{
// this old hack is evil
extern int systemGetDefaultJoypad();
extern int gbSgbMode, gbSgbMultiplayer;
if (!(gbSgbMode && gbSgbMultiplayer))
i = systemGetDefaultJoypad();
u32 res = 0;
// manual input
if (inputActive)
{
if (checkKey(joypad[i][KEY_BUTTON_A]))
res |= BUTTON_MASK_A;
if (checkKey(joypad[i][KEY_BUTTON_B]))
res |= BUTTON_MASK_B;
if (checkKey(joypad[i][KEY_BUTTON_SELECT]))
res |= BUTTON_MASK_SELECT;
if (checkKey(joypad[i][KEY_BUTTON_START]))
res |= BUTTON_MASK_START;
if (checkKey(joypad[i][KEY_RIGHT]))
res |= BUTTON_MASK_RIGHT;
if (checkKey(joypad[i][KEY_LEFT]))
res |= BUTTON_MASK_LEFT;
if (checkKey(joypad[i][KEY_UP]))
res |= BUTTON_MASK_UP;
if (checkKey(joypad[i][KEY_DOWN]))
res |= BUTTON_MASK_DOWN;
if (checkKey(joypad[i][KEY_BUTTON_R]))
res |= BUTTON_MASK_R;
if (checkKey(joypad[i][KEY_BUTTON_L]))
res |= BUTTON_MASK_L;
// unused
if (checkKey(motion[KEY_LEFT]))
res |= BUTTON_MASK_LEFT_MOTION;
else if (checkKey(motion[KEY_RIGHT]))
res |= BUTTON_MASK_RIGHT_MOTION;
if (checkKey(motion[KEY_UP]))
res |= BUTTON_MASK_UP_MOTION;
else if (checkKey(motion[KEY_DOWN]))
res |= BUTTON_MASK_DOWN_MOTION;
}
u32 hackedButtons = 0;
if (inputActive)
{
// the "non-button" buttons (what a hack!)
if (checkKey(joypad[i][KEY_BUTTON_SPEED]))
hackedButtons |= BUTTON_MASK_SPEED;
if (checkKey(joypad[i][KEY_BUTTON_CAPTURE]))
hackedButtons |= BUTTON_MASK_CAPTURE;
if (checkKey(joypad[i][KEY_BUTTON_GS]))
hackedButtons |= BUTTON_MASK_GAMESHARK;
}
extern bool systemIsSpedUp();
if (systemIsSpedUp())
hackedButtons |= BUTTON_MASK_SPEED;
return res | hackedButtons;
}
CString DirectInput::getKeyName(LONG_PTR key)
{
LONG_PTR d = (key >> 8);
LONG_PTR k = key & 255;
DIDEVICEOBJECTINSTANCE di;
ZeroMemory(&di, sizeof(DIDEVICEOBJECTINSTANCE));
di.dwSize = sizeof(DIDEVICEOBJECTINSTANCE);
CString winBuffer = winResLoadString(IDS_ERROR);
if (d == 0)
{
pDevices[0].device->GetObjectInfo(&di, (DWORD)key, DIPH_BYOFFSET);
winBuffer = di.tszName;
}
else if (d < numDevices)
{
if (k < 16)
{
if (k < 4)
{
switch (k)
{
case 0:
winBuffer.Format(winResLoadString(IDS_JOY_LEFT), d);
break;
case 1:
winBuffer.Format(winResLoadString(IDS_JOY_RIGHT), d);
break;
case 2:
winBuffer.Format(winResLoadString(IDS_JOY_UP), d);
break;
case 3:
winBuffer.Format(winResLoadString(IDS_JOY_DOWN), d);
break;
}
}
else
{
pDevices[d].device->GetObjectInfo(&di,
pDevices[d].axis[k>>1].offset,
DIPH_BYOFFSET);
if (k & 1)
winBuffer.Format("Joy %d %s +", d, di.tszName);
else
winBuffer.Format("Joy %d %s -", d, di.tszName);
}
}
else if (k < 48)
{
LONG_PTR hat = (k >> 2) & 3;
pDevices[d].device->GetObjectInfo(&di,
(DWORD)DIJOFS_POV(hat),
DIPH_BYOFFSET);
char * dir = "up";
LONG_PTR dd = k & 3;
if (dd == 1)
dir = "down";
else if (dd == 2)
dir = "right";
else if (dd == 3)
dir = "left";
winBuffer.Format("Joy %d %s %s", d, di.tszName, dir);
}
else
{
pDevices[d].device->GetObjectInfo(&di,
(DWORD)DIJOFS_BUTTON(k-128),
DIPH_BYOFFSET);
winBuffer.Format(winResLoadString(IDS_JOY_BUTTON), d, di.tszName);
}
}
else
{
// Joystick isn't plugged in. We can't decipher k, so just show its value.
winBuffer.Format("Joy %d (%d)", d, k);
}
return winBuffer;
}
void DirectInput::checkKeys()
{
::checkKeys();
}
Input *newDirectInput()
{
return new DirectInput;
}
void DirectInput::checkDevices()
{
checkJoypads();
checkKeyboard();
}
void DirectInput::activate()
{
for (int i = 0; i < numDevices; i++)
{
if (pDevices != NULL && pDevices[i].device != NULL)
pDevices[i].device->Acquire();
}
}
void DirectInput::loadSettings()
{
winReadKeys();
}
void DirectInput::saveSettings()
{
winSaveKeys();
}
| gpl-2.0 |
ISTweak/android_kernel_nec_msm7x30 | fs/file_table.c | 1 | 10606 | /*
* linux/fs/file_table.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
/***********************************************************************/
/* Modified by */
/* (C) NEC CASIO Mobile Communications, Ltd. 2011 */
/***********************************************************************/
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/eventpoll.h>
#include <linux/rcupdate.h>
#include <linux/mount.h>
#include <linux/capability.h>
#include <linux/cdev.h>
#include <linux/fsnotify.h>
#include <linux/sysctl.h>
#include <linux/percpu_counter.h>
#include <linux/ima.h>
#include <asm/atomic.h>
#include "internal.h"
/* sysctl tunables... */
struct files_stat_struct files_stat = {
.max_files = NR_FILE
};
/* public. Not pretty! */
__cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
/* SLAB cache for file structures */
static struct kmem_cache *filp_cachep __read_mostly;
static struct percpu_counter nr_files __cacheline_aligned_in_smp;
static inline void file_free_rcu(struct rcu_head *head)
{
struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
put_cred(f->f_cred);
kmem_cache_free(filp_cachep, f);
}
static inline void file_free(struct file *f)
{
percpu_counter_dec(&nr_files);
file_check_state(f);
call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
}
/*
* Return the total number of open files in the system
*/
static int get_nr_files(void)
{
return percpu_counter_read_positive(&nr_files);
}
/*
* Return the maximum number of open files in the system
*/
int get_max_files(void)
{
return files_stat.max_files;
}
EXPORT_SYMBOL_GPL(get_max_files);
/*
* Handle nr_files sysctl
*/
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
int proc_nr_files(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
files_stat.nr_files = get_nr_files();
return proc_dointvec(table, write, buffer, lenp, ppos);
}
#else
int proc_nr_files(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
#endif
/* Find an unused file structure and return a pointer to it.
* Returns NULL, if there are no more free file structures or
* we run out of memory.
*
* Be very careful using this. You are responsible for
* getting write access to any mount that you might assign
* to this filp, if it is opened for write. If this is not
* done, you will imbalance int the mount's writer count
* and a warning at __fput() time.
*/
struct file *get_empty_filp(void)
{
const struct cred *cred = current_cred();
static int old_max;
struct file * f;
/*
* Privileged users can go above max_files
*/
if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
/*
* percpu_counters are inaccurate. Do an expensive check before
* we go and fail.
*/
if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
goto over;
}
f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
if (f == NULL)
goto fail;
percpu_counter_inc(&nr_files);
f->f_cred = get_cred(cred);
if (security_file_alloc(f))
goto fail_sec;
INIT_LIST_HEAD(&f->f_u.fu_list);
atomic_long_set(&f->f_count, 1);
rwlock_init(&f->f_owner.lock);
spin_lock_init(&f->f_lock);
eventpoll_init_file(f);
/* f->f_version: 0 */
return f;
over:
/* Ran out of filps - report that */
if (get_nr_files() > old_max) {
printk(KERN_INFO "VFS: file-max limit %d reached\n",
get_max_files());
old_max = get_nr_files();
}
goto fail;
fail_sec:
file_free(f);
fail:
return NULL;
}
/**
* alloc_file - allocate and initialize a 'struct file'
* @mnt: the vfsmount on which the file will reside
* @dentry: the dentry representing the new file
* @mode: the mode with which the new file will be opened
* @fop: the 'struct file_operations' for the new file
*
* Use this instead of get_empty_filp() to get a new
* 'struct file'. Do so because of the same initialization
* pitfalls reasons listed for init_file(). This is a
* preferred interface to using init_file().
*
* If all the callers of init_file() are eliminated, its
* code should be moved into this function.
*/
struct file *alloc_file(struct path *path, fmode_t mode,
const struct file_operations *fop)
{
struct file *file;
file = get_empty_filp();
if (!file)
return NULL;
file->f_path = *path;
file->f_mapping = path->dentry->d_inode->i_mapping;
file->f_mode = mode;
file->f_op = fop;
/*
* These mounts don't really matter in practice
* for r/o bind mounts. They aren't userspace-
* visible. We do this for consistency, and so
* that we can do debugging checks at __fput()
*/
if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
file_take_write(file);
WARN_ON(mnt_clone_write(path->mnt));
}
ima_counts_get(file);
return file;
}
EXPORT_SYMBOL(alloc_file);
/**
* drop_file_write_access - give up ability to write to a file
* @file: the file to which we will stop writing
*
* This is a central place which will give up the ability
* to write to @file, along with access to write through
* its vfsmount.
*/
void drop_file_write_access(struct file *file)
{
struct vfsmount *mnt = file->f_path.mnt;
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
put_write_access(inode);
if (special_file(inode->i_mode))
return;
if (file_check_writeable(file) != 0)
return;
mnt_drop_write(mnt);
file_release_write(file);
}
EXPORT_SYMBOL_GPL(drop_file_write_access);
/* the real guts of fput() - releasing the last reference to file
*/
static void __fput(struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
struct vfsmount *mnt = file->f_path.mnt;
struct inode *inode = dentry->d_inode;
might_sleep();
fsnotify_close(file);
/*
* The function eventpoll_release() should be the first called
* in the file cleanup chain.
*/
eventpoll_release(file);
locks_remove_flock(file);
if (unlikely(file->f_flags & FASYNC)) {
if (file->f_op && file->f_op->fasync)
file->f_op->fasync(-1, file, 0);
}
if (file->f_op && file->f_op->release)
file->f_op->release(inode, file);
security_file_free(file);
ima_file_free(file);
if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
cdev_put(inode->i_cdev);
fops_put(file->f_op);
put_pid(file->f_owner.pid);
file_kill(file);
if (file->f_mode & FMODE_WRITE)
drop_file_write_access(file);
file->f_path.dentry = NULL;
file->f_path.mnt = NULL;
file_free(file);
dput(dentry);
mntput(mnt);
}
void fput(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count))
__fput(file);
}
EXPORT_SYMBOL(fput);
struct file *fget(unsigned int fd)
{
struct file *file;
struct files_struct *files = current->files;
rcu_read_lock();
file = fcheck_files(files, fd);
if (file) {
if (!atomic_long_inc_not_zero(&file->f_count)) {
/* File object ref couldn't be taken */
rcu_read_unlock();
return NULL;
}
}
rcu_read_unlock();
return file;
}
EXPORT_SYMBOL(fget);
/*
* Lightweight file lookup - no refcnt increment if fd table isn't shared.
* You can use this only if it is guranteed that the current task already
* holds a refcnt to that file. That check has to be done at fget() only
* and a flag is returned to be passed to the corresponding fput_light().
* There must not be a cloning between an fget_light/fput_light pair.
*/
struct file *fget_light(unsigned int fd, int *fput_needed)
{
struct file *file;
struct files_struct *files = current->files;
*fput_needed = 0;
if (likely((atomic_read(&files->count) == 1))) {
file = fcheck_files(files, fd);
} else {
rcu_read_lock();
file = fcheck_files(files, fd);
if (file) {
if (atomic_long_inc_not_zero(&file->f_count))
*fput_needed = 1;
else
/* Didn't get the reference, someone's freed */
file = NULL;
}
rcu_read_unlock();
}
return file;
}
void put_filp(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count)) {
security_file_free(file);
file_kill(file);
file_free(file);
}
}
void file_move(struct file *file, struct list_head *list)
{
if (!list)
return;
file_list_lock();
list_move(&file->f_u.fu_list, list);
file_list_unlock();
}
void file_kill(struct file *file)
{
if (!list_empty(&file->f_u.fu_list)) {
file_list_lock();
list_del_init(&file->f_u.fu_list);
file_list_unlock();
}
}
int fs_may_remount_ro(struct super_block *sb)
{
struct file *file;
/* Check that no files are currently opened for writing. */
file_list_lock();
list_for_each_entry(file, &sb->s_files, f_u.fu_list) {
struct inode *inode = file->f_path.dentry->d_inode;
/* File with pending delete? */
if (inode->i_nlink == 0)
goto too_bad;
/* Writeable file? */
if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
goto too_bad;
}
file_list_unlock();
return 1; /* Tis' cool bro. */
too_bad:
file_list_unlock();
return 0;
}
/**
* mark_files_ro - mark all files read-only
* @sb: superblock in question
*
* All files are marked read-only. We don't care about pending
* delete files so this should be used in 'force' mode only.
*/
void mark_files_ro(struct super_block *sb)
{
struct file *f;
retry:
file_list_lock();
list_for_each_entry(f, &sb->s_files, f_u.fu_list) {
struct vfsmount *mnt;
if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
continue;
if (!file_count(f))
continue;
if (!(f->f_mode & FMODE_WRITE))
continue;
spin_lock(&f->f_lock);
f->f_mode &= ~FMODE_WRITE;
spin_unlock(&f->f_lock);
if (file_check_writeable(f) != 0)
continue;
file_release_write(f);
mnt = mntget(f->f_path.mnt);
file_list_unlock();
/*
* This can sleep, so we can't hold
* the file_list_lock() spinlock.
*/
mnt_drop_write(mnt);
mntput(mnt);
goto retry;
}
file_list_unlock();
}
void __init files_init(unsigned long mempages)
{
int n;
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
/*
* One file with associated inode and dcache is very roughly 1K.
* Per default don't use more than 10% of our memory for files.
*/
n = (mempages * (PAGE_SIZE / 1024)) / 10;
files_stat.max_files = n;
if (files_stat.max_files < NR_FILE)
files_stat.max_files = NR_FILE;
files_defer_init();
percpu_counter_init(&nr_files, 0);
}
| gpl-2.0 |
mfursov/ugene | src/corelibs/U2Gui/src/util/project/ProjectUpdater.cpp | 1 | 5153 | /**
* UGENE - Integrated Bioinformatics Tools.
* Copyright (C) 2008-2017 UniPro <ugene@unipro.ru>
* http://ugene.net
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <U2Core/DocumentModel.h>
#include <U2Core/U2DbiUtils.h>
#include <U2Core/U2ObjectDbi.h>
#include <U2Core/U2OpStatusUtils.h>
#include <U2Core/U2SafePoints.h>
#include <U2Gui/ObjectViewModel.h>
#include "DocumentFolders.h"
#include "ProjectUtils.h"
#include "ProjectUpdater.h"
namespace U2 {
ProjectUpdater::ProjectUpdater()
: QThread(), mutex(QMutex::Recursive), stopped(0)
{
moveToThread(this);
}
void ProjectUpdater::run() {
#if (QT_VERSION < 0x050000) //Qt 5
while (0 == stopped) {
readData();
msleep(U2ObjectDbi::OBJECT_ACCESS_UPDATE_INTERVAL);
}
#else
while (0 == stopped.loadAcquire()) {
readData();
msleep(U2ObjectDbi::OBJECT_ACCESS_UPDATE_INTERVAL);
}
#endif
}
void ProjectUpdater::stop() {
stopped = 1;
}
void ProjectUpdater::invalidate(const Document *doc) {
QMutexLocker lock(&mutex);
valid[doc->getDbiRef().dbiId] = false;
}
bool ProjectUpdater::takeData(Document *doc, DocumentFoldersUpdate &result) {
QMutexLocker lock(&mutex);
CHECK(valid.value(doc->getDbiRef().dbiId, true), false);
CHECK(data.contains(doc->getDbiRef().dbiId), false);
result = data.take(doc->getDbiRef().dbiId);
return true;
}
void ProjectUpdater::addDocument(Document *doc) {
QMutexLocker lock(&mutex);
docs << doc;
}
void ProjectUpdater::removeDocument(Document *doc) {
QMutexLocker lock(&mutex);
docs.removeAll(doc);
data.remove(doc->getDbiRef().dbiId);
}
QList<U2DbiRef> ProjectUpdater::getDbiRefs() {
QMutexLocker lock(&mutex);
QList<U2DbiRef> result;
foreach (Document *doc, docs) {
if (!ProjectUtils::isConnectedDatabaseDoc(doc) || doc->isStateLocked()) {
continue;
}
result << doc->getDbiRef();
}
return result;
}
void ProjectUpdater::readData() {
updateAccessedObjects();
QList<U2DbiRef> refs = getDbiRefs();
foreach (const U2DbiRef &dbiRef, refs) {
bool repeat = false;
do {
{
QMutexLocker lock(&mutex);
if (!valid.value(dbiRef.dbiId, true)) {
data.take(dbiRef.dbiId);
valid[dbiRef.dbiId] = true;
}
}
U2OpStatus2Log os;
fetchObjectsInUse(dbiRef, os);
DocumentFoldersUpdate update(dbiRef, os);
if (!os.hasError()) {
QMutexLocker lock(&mutex);
if (valid.value(dbiRef.dbiId, true)) {
data[dbiRef.dbiId] = update;
repeat = false;
} else {
// repeat only if document is in the list
repeat = getDbiRefs().contains(dbiRef);
}
}
} while (repeat);
}
}
void ProjectUpdater::fetchObjectsInUse(const U2DbiRef &dbiRef, U2OpStatus &os) {
DbiConnection connection(dbiRef, os);
SAFE_POINT(NULL != connection.dbi, "Invalid database connection", );
U2ObjectDbi *oDbi = connection.dbi->getObjectDbi();
SAFE_POINT(NULL != oDbi, "Invalid database connection", );
const QSet<U2DataId> usedObjects = oDbi->getAllObjectsInUse(os).toSet();
CHECK_OP(os, );
foreach (Document *doc, docs) {
if (doc->getDbiRef() == dbiRef) {
doc->setObjectsInUse(usedObjects);
}
}
}
void ProjectUpdater::updateAccessedObjects() {
const QList<GObjectViewWindow *> activeViews = GObjectViewUtils::getAllActiveViews();
QMap<U2DbiRef, DbiConnection *> dbiRef2Connections; // when changing the code below, beware mem leaks
U2OpStatus2Log os;
foreach (GObjectViewWindow *view, activeViews) {
foreach (GObject *object, view->getObjects()) {
Document *doc = object->getDocument();
if (NULL != doc && doc->isStateLocked()) {
continue;
}
const U2EntityRef ref = object->getEntityRef();
if (!dbiRef2Connections.contains(ref.dbiRef)) {
dbiRef2Connections.insert(ref.dbiRef, new DbiConnection(ref.dbiRef, os));
}
DbiConnection *con = dbiRef2Connections.value(ref.dbiRef);
con->dbi->getObjectDbi()->updateObjectAccessTime(ref.entityId, os);
}
}
qDeleteAll(dbiRef2Connections.values());
}
} // U2
| gpl-2.0 |
Ginfred/DeathCore | src/server/scripts/EasternKingdoms/ZulAman/boss_akilzon.cpp | 1 | 15854 | /*
* Copyright (C) 2013-2015 DeathCore <http://www.noffearrdeathproject.net/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
**/
#include "ScriptPCH.h"
#include "ObjectMgr.h"
#include "ScriptMgr.h"
#include "ScriptedCreature.h"
#include "SpellScript.h"
#include "SpellAuraEffects.h"
#include "SpellAuras.h"
#include "MapManager.h"
#include "Spell.h"
#include "Vehicle.h"
#include "Cell.h"
#include "CellImpl.h"
#include "GridNotifiers.h"
#include "GridNotifiersImpl.h"
#include "CreatureTextMgr.h"
#include "Weather.h"
#include "zulaman.h"
enum Yells
{
SAY_AGGRO = 0,
SAY_SUMMON_EAGLE = 1,
SAY_INTRO = 2,
SAY_BERSERK = 3,
SAY_SLAY = 4,
SAY_DEATH = 5,
ANN_STORM = 6
};
enum Spells
{
// Boss
SPELL_STATIC_DISRUPTION = 43622,
SPELL_STATIC_VISUAL = 45265,
SPELL_CALL_LIGHTNING = 43661,
SPELL_SUMMON_KIDNAPPER = 43621,
SPELL_ELECTRICAL_STORM = 43648,
SPELL_ELECTRICAL_STORM_VIS = 44007, // Cloud above.
SPELL_BERSERK = 45078,
SPELL_ELECTRICAL_OVERLOAD = 43658,
// Eagles & Kidnapper
SPELL_EAGLE_SWOOP = 44732,
SPELL_PLUCKED = 97318,
SPELL_ENTER_VEHICLE = 46598
};
enum Mobs
{
MOB_SOARING_EAGLE = 24858,
MOB_AMANI_KIDNAPPER = 52648
};
enum EagleLocations
{
SE_LOC_X_MAX = 400,
SE_LOC_X_MIN = 335,
SE_LOC_Y_MAX = 1435,
SE_LOC_Y_MIN = 1370
};
enum Events
{
EVENT_STATIC_DISRUPTION = 1,
EVENT_CALL_LIGHTNING,
EVENT_ELECTRICAL_STORM,
EVENT_SUMMON_EAGLES,
EVENT_SUMMON_KIDNAPPER,
EVENT_RELEASE_PLAYER,
EVENT_BERSERK
};
class boss_akilzon : public CreatureScript
{
public:
boss_akilzon() : CreatureScript("boss_akilzon") { }
CreatureAI* GetAI(Creature* creature) const
{
return new boss_akilzonAI(creature);
}
struct boss_akilzonAI : public BossAI
{
boss_akilzonAI(Creature* creature) : BossAI(creature, DATA_AKILZONEVENT), summons(me)
{
instance = creature->GetInstanceScript();
introDone = false;
}
InstanceScript* instance;
EventMap events;
SummonList summons;
bool introDone, isStorm;
Unit* stormTarget;
Unit* stormVehicle;
void Reset()
{
events.Reset();
summons.DespawnAll();
isStorm = false;
if (instance)
instance->SetData(DATA_AKILZONEVENT, NOT_STARTED);
SetWeather(WEATHER_STATE_FINE);
_Reset();
}
void MoveInLineOfSight(Unit* who)
{
if (!introDone && me->IsWithinDistInMap(who, 30) && who->GetTypeId() == TYPEID_PLAYER)
{
Talk(SAY_INTRO);
introDone = true;
}
}
void EnterCombat(Unit* /*who*/)
{
Talk(SAY_AGGRO);
if (instance)
{
instance->SetData(DATA_AKILZONEVENT, IN_PROGRESS);
instance->SendEncounterUnit(ENCOUNTER_FRAME_ENGAGE, me); // Add
}
events.ScheduleEvent(EVENT_STATIC_DISRUPTION, urand(10000, 20000));
events.ScheduleEvent(EVENT_ELECTRICAL_STORM, 60000);
events.ScheduleEvent(EVENT_SUMMON_EAGLES, 30000);
events.ScheduleEvent(EVENT_SUMMON_KIDNAPPER, urand(20000, 30000));
events.ScheduleEvent(EVENT_BERSERK, 10 * MINUTE * IN_MILLISECONDS);
_EnterCombat();
}
void JustSummoned(Creature* summon)
{
summons.Summon(summon);
summon->setActive(true);
if (me->isInCombat())
summon->AI()->DoZoneInCombat();
}
void KilledUnit(Unit* /*victim*/)
{
Talk(SAY_SLAY);
}
void JustDied(Unit* /*killer*/)
{
Talk(SAY_DEATH);
summons.DespawnAll();
if (instance)
{
instance->SetData(DATA_AKILZONEVENT, DONE);
instance->SendEncounterUnit(ENCOUNTER_FRAME_DISENGAGE, me); // Remove
}
_JustDied();
}
void EnterEvadeMode()
{
Reset();
SetCombatMovement(true);
me->GetMotionMaster()->MoveTargetedHome();
me->RemoveAllAuras();
if (instance)
{
instance->SetData(DATA_AKILZONEVENT, FAIL);
instance->SendEncounterUnit(ENCOUNTER_FRAME_DISENGAGE, me); // Remove
}
_EnterEvadeMode();
}
void SetWeather(uint32 weather)
{
Map* map = me->GetMap();
if (!map->IsDungeon())
return;
WorldPacket data(SMSG_WEATHER, 9);
data << uint32(weather) << float(0.5f) << uint8(0);
map->SendToPlayers(&data);
}
void UpdateAI(const uint32 diff)
{
if (!UpdateVictim() || me->HasUnitState(UNIT_STATE_CASTING))
return;
events.Update(diff);
while (uint32 eventId = events.ExecuteEvent())
{
switch(eventId)
{
case EVENT_BERSERK:
Talk(SAY_BERSERK);
DoCast(me, SPELL_BERSERK);
break;
case EVENT_STATIC_DISRUPTION:
if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0))
DoCast(target, SPELL_STATIC_DISRUPTION);
events.ScheduleEvent(EVENT_STATIC_DISRUPTION, urand(10000, 20000));
events.ScheduleEvent(EVENT_CALL_LIGHTNING, urand(2000, 5000));
break;
case EVENT_CALL_LIGHTNING:
DoCastVictim(SPELL_CALL_LIGHTNING);
break;
case EVENT_ELECTRICAL_STORM:
Talk(ANN_STORM);
SetWeather(WEATHER_STATE_HEAVY_RAIN);
SetCombatMovement(false);
isStorm = true;
if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 50.0f, true))
{
target->CastSpell(target, SPELL_ELECTRICAL_STORM_VIS, true); // Cloud visual.
target->SetUnitMovementFlags(MOVEMENTFLAG_DISABLE_GRAVITY);
target->SetCanFly(true);
target->MonsterMoveWithSpeed(target->GetPositionX(), target->GetPositionY(), target->GetPositionZ()+15, 2);
DoCast(target, SPELL_ELECTRICAL_STORM, false); // Real spell.
stormTarget = target;
}
events.ScheduleEvent(EVENT_ELECTRICAL_STORM, 60000);
events.ScheduleEvent(EVENT_RELEASE_PLAYER, 8100);
break;
case EVENT_SUMMON_EAGLES:
Talk(SAY_SUMMON_EAGLE);
float x, y, z;
me->GetPosition(x, y, z);
for (uint8 i = 0; i < 8; ++i)
{
if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0))
{
x = target->GetPositionX() + irand(-10, 10);
y = target->GetPositionY() + irand(-10, 10);
z = target->GetPositionZ() + urand(16, 20);
if (z > 95)
z = 95.0f - urand(0, 5);
}
Creature* creature = me->SummonCreature(MOB_SOARING_EAGLE, x, y, z, 0, TEMPSUMMON_CORPSE_DESPAWN, 0);
if (creature)
{
creature->AddThreat(me->GetVictim(), 1.0f);
creature->AI()->AttackStart(me->GetVictim());
}
}
events.ScheduleEvent(EVENT_SUMMON_EAGLES, urand(50000, 60000));
events.ScheduleEvent(EVENT_SUMMON_KIDNAPPER, urand(5000, 8000));
break;
case EVENT_SUMMON_KIDNAPPER:
me->CastSpell(me->GetPositionX(), me->GetPositionY(), me->GetPositionZ()+15, SPELL_SUMMON_KIDNAPPER, true);
break;
case EVENT_RELEASE_PLAYER:
SetWeather(WEATHER_STATE_FINE);
SetCombatMovement(true);
isStorm = false;
if (stormTarget)
{
stormTarget->SetUnitMovementFlags(MOVEMENTFLAG_NONE);
stormTarget->SetCanFly(false);
}
break;
}
}
if (!isStorm)
DoMeleeAttackIfReady();
}
};
};
class npc_akilzon_eagle : public CreatureScript
{
public:
npc_akilzon_eagle() : CreatureScript("npc_akilzon_eagle") { }
CreatureAI* GetAI(Creature* creature) const
{
return new npc_akilzon_eagleAI(creature);
}
struct npc_akilzon_eagleAI : public ScriptedAI
{
npc_akilzon_eagleAI(Creature* creature) : ScriptedAI(creature) { }
uint32 EagleSwoop_Timer;
bool arrived;
uint64 TargetGUID;
void Reset()
{
EagleSwoop_Timer = urand(5000, 10000);
arrived = true;
TargetGUID = 0;
me->SetUnitMovementFlags(MOVEMENTFLAG_DISABLE_GRAVITY);
}
void MovementInform(uint32, uint32)
{
arrived = true;
if (TargetGUID)
{
if (Unit* target = Unit::GetUnit(*me, TargetGUID))
DoCast(target, SPELL_EAGLE_SWOOP);
TargetGUID = 0;
me->SetSpeed(MOVE_RUN, 1.2f);
EagleSwoop_Timer = urand(5000, 10000);
}
}
void UpdateAI(const uint32 diff)
{
if (EagleSwoop_Timer <= diff)
EagleSwoop_Timer = 0;
else
EagleSwoop_Timer -= diff;
if (arrived)
{
if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0))
{
float x, y, z;
if (EagleSwoop_Timer)
{
x = target->GetPositionX() + irand(-10, 10);
y = target->GetPositionY() + irand(-10, 10);
z = target->GetPositionZ() + urand(10, 15);
if (z > 95)
z = 95.0f - urand(0, 5);
}
else
{
target->GetContactPoint(me, x, y, z);
z += 2;
me->SetSpeed(MOVE_RUN, 5.0f);
TargetGUID = target->GetGUID();
}
me->GetMotionMaster()->MovePoint(0, x, y, z);
arrived = false;
}
}
}
};
};
class npc_amani_kidnapper : public CreatureScript
{
public:
npc_amani_kidnapper() : CreatureScript("npc_amani_kidnapper") { }
CreatureAI* GetAI(Creature* creature) const
{
return new npc_amani_kidnapperAI(creature);
}
struct npc_amani_kidnapperAI : public ScriptedAI
{
npc_amani_kidnapperAI(Creature* creature) : ScriptedAI(creature) { }
bool arrived;
uint64 TargetGUID;
uint8 numb;
void Reset()
{
arrived = true;
TargetGUID = 0;
me->SetUnitMovementFlags(MOVEMENTFLAG_DISABLE_GRAVITY);
numb = 0;
}
void EnterCombat(Unit* /*who*/)
{
Map::PlayerList const& players = me->GetMap()->GetPlayers();
for (Map::PlayerList::const_iterator itr = players.begin(); itr != players.end(); ++itr)
if (Player* player = itr->getSource())
++numb;
}
void MovementInform(uint32, uint32)
{
if (TargetGUID)
{
if (Unit* target = Unit::GetUnit(*me, TargetGUID))
{
me->AddAura(SPELL_PLUCKED, target);
target->CastSpell(me, SPELL_ENTER_VEHICLE, true);
}
TargetGUID = 0;
me->SetSpeed(MOVE_RUN, 1.2f);
me->GetMotionMaster()->MoveRandom(30.0f);
}
}
void UpdateAI(const uint32 diff)
{
if (arrived && numb > 1)
{
if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0))
{
float x, y, z;
target->GetContactPoint(me, x, y, z);
z += 3;
me->SetSpeed(MOVE_RUN, 5.0f);
TargetGUID = target->GetGUID();
me->GetMotionMaster()->MovePoint(0, x, y, z);
arrived = false;
}
}
else
DoMeleeAttackIfReady();
}
};
};
class ExactDistanceCheck
{
public:
ExactDistanceCheck(WorldObject* source, float dist) : _source(source), _dist(dist) {}
bool operator()(WorldObject* unit)
{
return _source->GetExactDist2d(unit) < _dist;
}
private:
WorldObject* _source;
float _dist;
};
class spell_electrical_storm_dmg : public SpellScriptLoader // 43657, 97300
{
public:
spell_electrical_storm_dmg() : SpellScriptLoader("spell_electrical_storm_dmg") { }
class spell_electrical_storm_dmg_SpellScript : public SpellScript
{
PrepareSpellScript(spell_electrical_storm_dmg_SpellScript);
void TargetSelect(std::list<WorldObject*>& targets)
{
if (targets.empty())
return;
if (Unit* owner = GetCaster())
targets.remove(owner);
// Set targets.
targets.remove_if(ExactDistanceCheck(GetCaster(), 21.0f));
}
void Register()
{
OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_electrical_storm_dmg_SpellScript::TargetSelect, EFFECT_0, TARGET_UNIT_SRC_AREA_ALLY);
}
};
SpellScript* GetSpellScript() const
{
return new spell_electrical_storm_dmg_SpellScript();
}
};
void AddSC_boss_akilzon()
{
new boss_akilzon();
new npc_akilzon_eagle();
new npc_amani_kidnapper();
new spell_electrical_storm_dmg();
} | gpl-2.0 |
MarcGroef/ros-vision | catkin_ws/src/bold/src/bold_vector.cpp | 1 | 2383 | #include "bold_vector.hpp"
namespace BOLD{
BVector::BVector()
{
for(int i = 0; i < VECTOR_SIZE; ++i)
a[i] = 0;
}
BVector::BVector(double* array)
{
for(int i = 0; i < VECTOR_SIZE; ++i)
a[i] = array[i];
}
BVector::BVector(double a1, double a2, double a3)
{
a[0] = a1;
a[1] = a2;
a[2] = a3;
}
void BVector::set(double a1, double a2, double a3)
{
a[0] = a1;
a[1] = a2;
a[2] = a3;
}
void BVector::set(BVector b)
{
a[0] = b.getElement(0);
a[1] = b.getElement(1);
a[2] = b.getElement(2);
}
void BVector::setElement(int index, double value)
{
a[index]=value;
}
double BVector::getElement(int index)
{
if (index >= VECTOR_SIZE)
{
std::cout << "BOLD::Vector::getElement() error: request index out of range..\n" ;
std::abort();
}
return a[index];
}
double BVector::dot(BVector b)
{
double answer = 0;
for(int i = 0; i < VECTOR_SIZE; ++i)
answer += a[i] * b.getElement(i);
return answer;
}
BVector BVector::cross(BVector b)
{
if(VECTOR_SIZE != 3)
{
std::cout << "BOLD::Vector::cross(BVector*) error: VECTOR_SIZE is not set to 3\n";
std::abort();
}
BVector ans(a[1] * b.getElement(2) - a[2] * b.getElement(1), a[2] * b.getElement(0) - a[0] * b.getElement(2), a[0] * b.getElement(1) - a[1] * b.getElement(0));
return ans;
}
double BVector::abs()
{
return sqrt(a[0] * a[0] + a[1] * a[1] + a[2] * a[2]) ;
}
double BVector::abs2D()
{
return sqrt(a[0] * a[0] + a[1] * a[1]) ;
}
BVector BVector::minus(BVector b)
{
BVector c(a[0] - b.getElement(0), a[1] - b.getElement(1), a[2] - b.getElement(2));
return c;
}
BVector BVector::minus2D(BVector b)
{
BVector c(a[0] - b.getElement(0), a[1] - b.getElement(1), 0);
return c;
}
BVector BVector::plus(BVector b)
{
BVector c(a[0] + b.getElement(0), a[1] + b.getElement(1), a[2] + b.getElement(2));
return c;
}
BVector BVector::plus2D(BVector b)
{
BVector c(a[0] + b.getElement(0), a[1] + b.getElement(1), 0) ;
return c;
}
BVector BVector::divByScalar(double b)
{
BVector c(a[0] / b, a[1] / b, a[2] / b);
return c;
}
BVector BVector::timesScalar(double b)
{
BVector c(a[0] * b, a[1] * b, a[2] * b);
return c;
}
}
| gpl-2.0 |
sd44/TaobaoCppQtSDK | TaoApiCpp/request/TopatsSimbaCampkeywordeffectGetRequest.cpp | 1 | 1888 | #include <TaoApiCpp/request/TopatsSimbaCampkeywordeffectGetRequest.h>
QString TopatsSimbaCampkeywordeffectGetRequest::getApiMethodName() const {
return "taobao.topats.simba.campkeywordeffect.get";
}
qlonglong TopatsSimbaCampkeywordeffectGetRequest::getCampaignId() const {
return campaignId;
}
void TopatsSimbaCampkeywordeffectGetRequest::setCampaignId (qlonglong campaignId) {
this->campaignId = campaignId;
appParams.insert("campaign_id", QString::number(campaignId));
}
QString TopatsSimbaCampkeywordeffectGetRequest::getNick() const {
return nick;
}
void TopatsSimbaCampkeywordeffectGetRequest::setNick (QString nick) {
this->nick = nick;
appParams.insert("nick", nick);
}
QString TopatsSimbaCampkeywordeffectGetRequest::getSearchType() const {
return searchType;
}
void TopatsSimbaCampkeywordeffectGetRequest::setSearchType (QString searchType) {
this->searchType = searchType;
appParams.insert("search_type", searchType);
}
QString TopatsSimbaCampkeywordeffectGetRequest::getSource() const {
return source;
}
void TopatsSimbaCampkeywordeffectGetRequest::setSource (QString source) {
this->source = source;
appParams.insert("source", source);
}
QString TopatsSimbaCampkeywordeffectGetRequest::getTimeSlot() const {
return timeSlot;
}
void TopatsSimbaCampkeywordeffectGetRequest::setTimeSlot (QString timeSlot) {
this->timeSlot = timeSlot;
appParams.insert("time_slot", timeSlot);
}
TopatsSimbaCampkeywordeffectGetResponse *TopatsSimbaCampkeywordeffectGetRequest::getResponseClass(const QString &session,
const QString &accessToken)
{
TopatsSimbaCampkeywordeffectGetResponse *tmpResponse = new TopatsSimbaCampkeywordeffectGetResponse;
QString result = getWebReply(session, accessToken);
tmpResponse->setParser(TaoParser(result, taoApiInfo->format));
tmpResponse->parseResponse();
return tmpResponse;
}
| gpl-2.0 |
vijay03/optfs | fs/ext4bf/symlink.c | 1 | 1409 | /*
* linux/fs/ext4bf/symlink.c
*
* Only fast symlinks left here - the rest is done by generic code. AV, 1999
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/symlink.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* ext4bf symlink handling code
*/
#include <linux/fs.h>
#include "jbdbf.h"
#include <linux/namei.h>
#include "ext4bf.h"
#include "xattr.h"
static void *ext4bf_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct ext4bf_inode_info *ei = EXT4_I(dentry->d_inode);
nd_set_link(nd, (char *) ei->i_data);
return NULL;
}
const struct inode_operations ext4bf_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.setattr = ext4bf_setattr,
#ifdef CONFIG_EXT4_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext4bf_listxattr,
.removexattr = generic_removexattr,
#endif
};
const struct inode_operations ext4bf_fast_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = ext4bf_follow_link,
.setattr = ext4bf_setattr,
#ifdef CONFIG_EXT4_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext4bf_listxattr,
.removexattr = generic_removexattr,
#endif
};
| gpl-2.0 |
broonie/regulator-2.6 | drivers/char/sysrq.c | 1 | 14223 | /* -*- linux-c -*-
*
* $Id: sysrq.c,v 1.15 1998/08/23 14:56:41 mj Exp $
*
* Linux Magic System Request Key Hacks
*
* (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
* based on ideas by Pavel Machek <pavel@atrey.karlin.mff.cuni.cz>
*
* (c) 2000 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
* overhauled to use key registration
* based upon discusions in irc://irc.openprojects.net/#kernelnewbies
*/
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/tty.h>
#include <linux/mount.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
#include <linux/reboot.h>
#include <linux/sysrq.h>
#include <linux/kbd_kern.h>
#include <linux/proc_fs.h>
#include <linux/quotaops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h> /* for fsync_bdev() */
#include <linux/swap.h>
#include <linux/spinlock.h>
#include <linux/vt_kern.h>
#include <linux/workqueue.h>
#include <linux/kexec.h>
#include <linux/irq.h>
#include <linux/hrtimer.h>
#include <linux/oom.h>
#include <asm/ptrace.h>
#include <asm/irq_regs.h>
/* Whether we react on sysrq keys or just ignore them */
int __read_mostly __sysrq_enabled = 1;
static int __read_mostly sysrq_always_enabled;
int sysrq_on(void)
{
return __sysrq_enabled || sysrq_always_enabled;
}
/*
* A value of 1 means 'all', other nonzero values are an op mask:
*/
static inline int sysrq_on_mask(int mask)
{
return sysrq_always_enabled || __sysrq_enabled == 1 ||
(__sysrq_enabled & mask);
}
static int __init sysrq_always_enabled_setup(char *str)
{
sysrq_always_enabled = 1;
printk(KERN_INFO "debug: sysrq always enabled.\n");
return 1;
}
__setup("sysrq_always_enabled", sysrq_always_enabled_setup);
static void sysrq_handle_loglevel(int key, struct tty_struct *tty)
{
int i;
i = key - '0';
console_loglevel = 7;
printk("Loglevel set to %d\n", i);
console_loglevel = i;
}
static struct sysrq_key_op sysrq_loglevel_op = {
.handler = sysrq_handle_loglevel,
.help_msg = "loglevel0-8",
.action_msg = "Changing Loglevel",
.enable_mask = SYSRQ_ENABLE_LOG,
};
#ifdef CONFIG_VT
static void sysrq_handle_SAK(int key, struct tty_struct *tty)
{
struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
schedule_work(SAK_work);
}
static struct sysrq_key_op sysrq_SAK_op = {
.handler = sysrq_handle_SAK,
.help_msg = "saK",
.action_msg = "SAK",
.enable_mask = SYSRQ_ENABLE_KEYBOARD,
};
#else
#define sysrq_SAK_op (*(struct sysrq_key_op *)0)
#endif
#ifdef CONFIG_VT
static void sysrq_handle_unraw(int key, struct tty_struct *tty)
{
struct kbd_struct *kbd = &kbd_table[fg_console];
if (kbd)
kbd->kbdmode = default_utf8 ? VC_UNICODE : VC_XLATE;
}
static struct sysrq_key_op sysrq_unraw_op = {
.handler = sysrq_handle_unraw,
.help_msg = "unRaw",
.action_msg = "Keyboard mode set to system default",
.enable_mask = SYSRQ_ENABLE_KEYBOARD,
};
#else
#define sysrq_unraw_op (*(struct sysrq_key_op *)0)
#endif /* CONFIG_VT */
#ifdef CONFIG_KEXEC
static void sysrq_handle_crashdump(int key, struct tty_struct *tty)
{
crash_kexec(get_irq_regs());
}
static struct sysrq_key_op sysrq_crashdump_op = {
.handler = sysrq_handle_crashdump,
.help_msg = "Crashdump",
.action_msg = "Trigger a crashdump",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
#else
#define sysrq_crashdump_op (*(struct sysrq_key_op *)0)
#endif
static void sysrq_handle_reboot(int key, struct tty_struct *tty)
{
lockdep_off();
local_irq_enable();
emergency_restart();
}
static struct sysrq_key_op sysrq_reboot_op = {
.handler = sysrq_handle_reboot,
.help_msg = "reBoot",
.action_msg = "Resetting",
.enable_mask = SYSRQ_ENABLE_BOOT,
};
static void sysrq_handle_sync(int key, struct tty_struct *tty)
{
emergency_sync();
}
static struct sysrq_key_op sysrq_sync_op = {
.handler = sysrq_handle_sync,
.help_msg = "Sync",
.action_msg = "Emergency Sync",
.enable_mask = SYSRQ_ENABLE_SYNC,
};
static void sysrq_handle_show_timers(int key, struct tty_struct *tty)
{
sysrq_timer_list_show();
}
static struct sysrq_key_op sysrq_show_timers_op = {
.handler = sysrq_handle_show_timers,
.help_msg = "show-all-timers(Q)",
.action_msg = "Show clockevent devices & pending hrtimers (no others)",
};
static void sysrq_handle_mountro(int key, struct tty_struct *tty)
{
emergency_remount();
}
static struct sysrq_key_op sysrq_mountro_op = {
.handler = sysrq_handle_mountro,
.help_msg = "Unmount",
.action_msg = "Emergency Remount R/O",
.enable_mask = SYSRQ_ENABLE_REMOUNT,
};
#ifdef CONFIG_LOCKDEP
static void sysrq_handle_showlocks(int key, struct tty_struct *tty)
{
debug_show_all_locks();
}
static struct sysrq_key_op sysrq_showlocks_op = {
.handler = sysrq_handle_showlocks,
.help_msg = "show-all-locks(D)",
.action_msg = "Show Locks Held",
};
#else
#define sysrq_showlocks_op (*(struct sysrq_key_op *)0)
#endif
#ifdef CONFIG_SMP
static DEFINE_SPINLOCK(show_lock);
static void showacpu(void *dummy)
{
unsigned long flags;
/* Idle CPUs have no interesting backtrace. */
if (idle_cpu(smp_processor_id()))
return;
spin_lock_irqsave(&show_lock, flags);
printk(KERN_INFO "CPU%d:\n", smp_processor_id());
show_stack(NULL, NULL);
spin_unlock_irqrestore(&show_lock, flags);
}
static void sysrq_showregs_othercpus(struct work_struct *dummy)
{
smp_call_function(showacpu, NULL, 0);
}
static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
{
struct pt_regs *regs = get_irq_regs();
if (regs) {
printk(KERN_INFO "CPU%d:\n", smp_processor_id());
show_regs(regs);
}
schedule_work(&sysrq_showallcpus);
}
static struct sysrq_key_op sysrq_showallcpus_op = {
.handler = sysrq_handle_showallcpus,
.help_msg = "aLlcpus",
.action_msg = "Show backtrace of all active CPUs",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
#endif
static void sysrq_handle_showregs(int key, struct tty_struct *tty)
{
struct pt_regs *regs = get_irq_regs();
if (regs)
show_regs(regs);
}
static struct sysrq_key_op sysrq_showregs_op = {
.handler = sysrq_handle_showregs,
.help_msg = "showPc",
.action_msg = "Show Regs",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
static void sysrq_handle_showstate(int key, struct tty_struct *tty)
{
show_state();
}
static struct sysrq_key_op sysrq_showstate_op = {
.handler = sysrq_handle_showstate,
.help_msg = "showTasks",
.action_msg = "Show State",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty)
{
show_state_filter(TASK_UNINTERRUPTIBLE);
}
static struct sysrq_key_op sysrq_showstate_blocked_op = {
.handler = sysrq_handle_showstate_blocked,
.help_msg = "shoW-blocked-tasks",
.action_msg = "Show Blocked State",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
#ifdef CONFIG_TRACING
#include <linux/ftrace.h>
static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
{
ftrace_dump();
}
static struct sysrq_key_op sysrq_ftrace_dump_op = {
.handler = sysrq_ftrace_dump,
.help_msg = "dumpZ-ftrace-buffer",
.action_msg = "Dump ftrace buffer",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
#else
#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)0)
#endif
static void sysrq_handle_showmem(int key, struct tty_struct *tty)
{
show_mem();
}
static struct sysrq_key_op sysrq_showmem_op = {
.handler = sysrq_handle_showmem,
.help_msg = "showMem",
.action_msg = "Show Memory",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
/*
* Signal sysrq helper function. Sends a signal to all user processes.
*/
static void send_sig_all(int sig)
{
struct task_struct *p;
for_each_process(p) {
if (p->mm && !is_global_init(p))
/* Not swapper, init nor kernel thread */
force_sig(sig, p);
}
}
static void sysrq_handle_term(int key, struct tty_struct *tty)
{
send_sig_all(SIGTERM);
console_loglevel = 8;
}
static struct sysrq_key_op sysrq_term_op = {
.handler = sysrq_handle_term,
.help_msg = "tErm",
.action_msg = "Terminate All Tasks",
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
static void moom_callback(struct work_struct *ignored)
{
out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0);
}
static DECLARE_WORK(moom_work, moom_callback);
static void sysrq_handle_moom(int key, struct tty_struct *tty)
{
schedule_work(&moom_work);
}
static struct sysrq_key_op sysrq_moom_op = {
.handler = sysrq_handle_moom,
.help_msg = "Full",
.action_msg = "Manual OOM execution",
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
static void sysrq_handle_kill(int key, struct tty_struct *tty)
{
send_sig_all(SIGKILL);
console_loglevel = 8;
}
static struct sysrq_key_op sysrq_kill_op = {
.handler = sysrq_handle_kill,
.help_msg = "kIll",
.action_msg = "Kill All Tasks",
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
static void sysrq_handle_unrt(int key, struct tty_struct *tty)
{
normalize_rt_tasks();
}
static struct sysrq_key_op sysrq_unrt_op = {
.handler = sysrq_handle_unrt,
.help_msg = "Nice",
.action_msg = "Nice All RT Tasks",
.enable_mask = SYSRQ_ENABLE_RTNICE,
};
/* Key Operations table and lock */
static DEFINE_SPINLOCK(sysrq_key_table_lock);
static struct sysrq_key_op *sysrq_key_table[36] = {
&sysrq_loglevel_op, /* 0 */
&sysrq_loglevel_op, /* 1 */
&sysrq_loglevel_op, /* 2 */
&sysrq_loglevel_op, /* 3 */
&sysrq_loglevel_op, /* 4 */
&sysrq_loglevel_op, /* 5 */
&sysrq_loglevel_op, /* 6 */
&sysrq_loglevel_op, /* 7 */
&sysrq_loglevel_op, /* 8 */
&sysrq_loglevel_op, /* 9 */
/*
* a: Don't use for system provided sysrqs, it is handled specially on
* sparc and will never arrive.
*/
NULL, /* a */
&sysrq_reboot_op, /* b */
&sysrq_crashdump_op, /* c & ibm_emac driver debug */
&sysrq_showlocks_op, /* d */
&sysrq_term_op, /* e */
&sysrq_moom_op, /* f */
/* g: May be registered by ppc for kgdb */
NULL, /* g */
NULL, /* h */
&sysrq_kill_op, /* i */
NULL, /* j */
&sysrq_SAK_op, /* k */
#ifdef CONFIG_SMP
&sysrq_showallcpus_op, /* l */
#else
NULL, /* l */
#endif
&sysrq_showmem_op, /* m */
&sysrq_unrt_op, /* n */
/* o: This will often be registered as 'Off' at init time */
NULL, /* o */
&sysrq_showregs_op, /* p */
&sysrq_show_timers_op, /* q */
&sysrq_unraw_op, /* r */
&sysrq_sync_op, /* s */
&sysrq_showstate_op, /* t */
&sysrq_mountro_op, /* u */
/* v: May be registered at init time by SMP VOYAGER */
NULL, /* v */
&sysrq_showstate_blocked_op, /* w */
/* x: May be registered on ppc/powerpc for xmon */
NULL, /* x */
/* y: May be registered on sparc64 for global register dump */
NULL, /* y */
&sysrq_ftrace_dump_op, /* z */
};
/* key2index calculation, -1 on invalid index */
static int sysrq_key_table_key2index(int key)
{
int retval;
if ((key >= '0') && (key <= '9'))
retval = key - '0';
else if ((key >= 'a') && (key <= 'z'))
retval = key + 10 - 'a';
else
retval = -1;
return retval;
}
/*
* get and put functions for the table, exposed to modules.
*/
struct sysrq_key_op *__sysrq_get_key_op(int key)
{
struct sysrq_key_op *op_p = NULL;
int i;
i = sysrq_key_table_key2index(key);
if (i != -1)
op_p = sysrq_key_table[i];
return op_p;
}
static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
{
int i = sysrq_key_table_key2index(key);
if (i != -1)
sysrq_key_table[i] = op_p;
}
/*
* This is the non-locking version of handle_sysrq. It must/can only be called
* by sysrq key handlers, as they are inside of the lock
*/
void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
{
struct sysrq_key_op *op_p;
int orig_log_level;
int i;
unsigned long flags;
spin_lock_irqsave(&sysrq_key_table_lock, flags);
orig_log_level = console_loglevel;
console_loglevel = 7;
printk(KERN_INFO "SysRq : ");
op_p = __sysrq_get_key_op(key);
if (op_p) {
/*
* Should we check for enabled operations (/proc/sysrq-trigger
* should not) and is the invoked operation enabled?
*/
if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
printk("%s\n", op_p->action_msg);
console_loglevel = orig_log_level;
op_p->handler(key, tty);
} else {
printk("This sysrq operation is disabled.\n");
}
} else {
printk("HELP : ");
/* Only print the help msg once per handler */
for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) {
if (sysrq_key_table[i]) {
int j;
for (j = 0; sysrq_key_table[i] !=
sysrq_key_table[j]; j++)
;
if (j != i)
continue;
printk("%s ", sysrq_key_table[i]->help_msg);
}
}
printk("\n");
console_loglevel = orig_log_level;
}
spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
}
/*
* This function is called by the keyboard handler when SysRq is pressed
* and any other keycode arrives.
*/
void handle_sysrq(int key, struct tty_struct *tty)
{
if (sysrq_on())
__handle_sysrq(key, tty, 1);
}
EXPORT_SYMBOL(handle_sysrq);
static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p,
struct sysrq_key_op *remove_op_p)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&sysrq_key_table_lock, flags);
if (__sysrq_get_key_op(key) == remove_op_p) {
__sysrq_put_key_op(key, insert_op_p);
retval = 0;
} else {
retval = -1;
}
spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
return retval;
}
int register_sysrq_key(int key, struct sysrq_key_op *op_p)
{
return __sysrq_swap_key_ops(key, op_p, NULL);
}
EXPORT_SYMBOL(register_sysrq_key);
int unregister_sysrq_key(int key, struct sysrq_key_op *op_p)
{
return __sysrq_swap_key_ops(key, NULL, op_p);
}
EXPORT_SYMBOL(unregister_sysrq_key);
#ifdef CONFIG_PROC_FS
/*
* writing 'C' to /proc/sysrq-trigger is like sysrq-C
*/
static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
if (count) {
char c;
if (get_user(c, buf))
return -EFAULT;
__handle_sysrq(c, NULL, 0);
}
return count;
}
static const struct file_operations proc_sysrq_trigger_operations = {
.write = write_sysrq_trigger,
};
static int __init sysrq_init(void)
{
proc_create("sysrq-trigger", S_IWUSR, NULL, &proc_sysrq_trigger_operations);
return 0;
}
module_init(sysrq_init);
#endif
| gpl-2.0 |
stden/ejudge | compile_packet_5.c | 1 | 3678 | /* -*- c -*- */
/* $Id: compile_packet_5.c 7537 2013-11-06 11:49:33Z cher $ */
/* Copyright (C) 2005-2013 Alexander Chernov <cher@ejudge.ru> */
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "ej_types.h"
#include "ej_limits.h"
#include "ej_byteorder.h"
#include "compile_packet.h"
#include "compile_packet_priv.h"
#include "pathutl.h"
#include "errlog.h"
#include "prepare.h"
#include "runlog.h"
#include "reuse_xalloc.h"
#include "reuse_logger.h"
#include "reuse_integral.h"
#include <stdlib.h>
#include <string.h>
#define FAIL_IF(c) if (c)do { errcode = __LINE__; goto failed; } while (0)
int
compile_reply_packet_write(const struct compile_reply_packet *in_data,
size_t *p_out_size, void **p_out_data)
{
struct compile_reply_bin_packet *out_data = 0;
unsigned char *out_ptr;
int errcode = 0, out_size;
FAIL_IF(in_data->judge_id < 0 || in_data->judge_id > EJ_MAX_JUDGE_ID);
FAIL_IF(in_data->contest_id <= 0 || in_data->contest_id > EJ_MAX_CONTEST_ID);
FAIL_IF(in_data->run_id < 0 || in_data->run_id > EJ_MAX_RUN_ID);
FAIL_IF(in_data->status != RUN_OK && in_data->status != RUN_COMPILE_ERR && in_data->status != RUN_CHECK_FAILED && in_data->status != RUN_STYLE_ERR);
FAIL_IF(in_data->ts1_us < 0 || in_data->ts1_us > USEC_MAX);
FAIL_IF(in_data->ts2_us < 0 || in_data->ts2_us > USEC_MAX);
FAIL_IF(in_data->ts3_us < 0 || in_data->ts3_us > USEC_MAX);
FAIL_IF(in_data->run_block_len < 0 || in_data->run_block_len > EJ_MAX_COMPILE_RUN_BLOCK_LEN);
out_size = sizeof(*out_data);
out_size += pkt_bin_align(in_data->run_block_len);
FAIL_IF(out_size < 0 || out_size > EJ_MAX_COMPILE_PACKET_SIZE);
out_data = xcalloc(1, out_size);
out_ptr = (unsigned char*) out_data + sizeof(*out_data);
out_data->packet_len = cvt_host_to_bin_32(out_size);
out_data->version = cvt_host_to_bin_32(1);
out_data->judge_id = cvt_host_to_bin_32(in_data->judge_id);
out_data->contest_id = cvt_host_to_bin_32(in_data->contest_id);
out_data->run_id = cvt_host_to_bin_32(in_data->run_id);
out_data->status = cvt_host_to_bin_32(in_data->status);
out_data->ts1 = cvt_host_to_bin_32(in_data->ts1);
out_data->ts1_us = cvt_host_to_bin_32(in_data->ts1_us);
out_data->ts2 = cvt_host_to_bin_32(in_data->ts2);
out_data->ts2_us = cvt_host_to_bin_32(in_data->ts2_us);
out_data->ts3 = cvt_host_to_bin_32(in_data->ts3);
out_data->ts3_us = cvt_host_to_bin_32(in_data->ts3_us);
out_data->use_uuid = cvt_host_to_bin_32(in_data->use_uuid);
out_data->uuid[0] = cvt_host_to_bin_32(in_data->uuid[0]);
out_data->uuid[1] = cvt_host_to_bin_32(in_data->uuid[1]);
out_data->uuid[2] = cvt_host_to_bin_32(in_data->uuid[2]);
out_data->uuid[3] = cvt_host_to_bin_32(in_data->uuid[3]);
out_data->run_block_len = cvt_host_to_bin_32(in_data->run_block_len);
if (in_data->run_block_len) {
memcpy(out_ptr, in_data->run_block, in_data->run_block_len);
}
*p_out_size = (size_t) out_size;
*p_out_data = out_data;
return 0;
failed:
err("compile_reply_packet_write: error %s, %d", "$Revision: 7537 $", errcode);
xfree(out_data);
return -1;
}
/*
* Local variables:
* compile-command: "make"
* c-font-lock-extra-types: ("\\sw+_t" "FILE")
* End:
*/
| gpl-2.0 |
koying/xbmc-vidonme | xbmc/cores/amlplayer/AMLUtils.cpp | 1 | 3224 | /*
* Copyright (C) 2011-2013 Team XBMC
* http://www.xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include <unistd.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <string>
int aml_set_sysfs_str(const char *path, const char *val)
{
int fd = open(path, O_CREAT | O_RDWR | O_TRUNC, 0644);
if (fd >= 0)
{
write(fd, val, strlen(val));
close(fd);
return 0;
}
return -1;
}
int aml_get_sysfs_str(const char *path, char *valstr, const int size)
{
int fd = open(path, O_RDONLY);
if (fd >= 0)
{
read(fd, valstr, size - 1);
valstr[strlen(valstr)] = '\0';
close(fd);
return 0;
}
sprintf(valstr, "%s", "fail");
return -1;
}
int aml_set_sysfs_int(const char *path, const int val)
{
int fd = open(path, O_CREAT | O_RDWR | O_TRUNC, 0644);
if (fd >= 0)
{
char bcmd[16];
sprintf(bcmd, "%d", val);
write(fd, bcmd, strlen(bcmd));
close(fd);
return 0;
}
return -1;
}
int aml_get_sysfs_int(const char *path)
{
int val = 0;
int fd = open(path, O_RDONLY);
if (fd >= 0)
{
char bcmd[16];
read(fd, bcmd, sizeof(bcmd));
val = strtol(bcmd, NULL, 16);
close(fd);
}
return val;
}
bool aml_present()
{
static int has_aml = -1;
if (has_aml == -1)
{
if (aml_get_sysfs_int("/sys/class/amhdmitx/amhdmitx0/disp_cap") != -1)
has_aml = 1;
else
has_aml = 0;
}
return has_aml;
}
void aml_cpufreq_limit(bool limit)
{
static int audiotrack_cputype = -1;
if (audiotrack_cputype == -1)
{
// defualt to m1 SoC
audiotrack_cputype = 1;
FILE *cpuinfo_fd = fopen("/proc/cpuinfo", "r");
if (cpuinfo_fd)
{
char buffer[512];
while (fgets(buffer, sizeof(buffer), cpuinfo_fd))
{
std::string stdbuffer(buffer);
if (stdbuffer.find("MESON-M3") != std::string::npos)
{
audiotrack_cputype = 3;
break;
}
}
fclose(cpuinfo_fd);
}
}
// On M1 SoCs, when playing hw decoded audio, we cannot drop below 600MHz
// or risk hw audio dropouts. AML code does a 2X scaling based off
// /sys/class/audiodsp/codec_mips but tests show that this is
// seems risky so we just clamp to 600Mhz to be safe.
if (audiotrack_cputype == 3)
return;
int cpufreq = 300000;
if (limit)
cpufreq = 600000;
aml_set_sysfs_int("/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq", cpufreq);
}
void aml_set_audio_passthrough(bool passthrough)
{
if (aml_present())
aml_set_sysfs_int("/sys/class/audiodsp/digital_raw", passthrough ? 1:0);
}
| gpl-2.0 |
pandreetto/irdc4cube | src/parseconf.c | 1 | 4405 | #include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include "irdctype.h"
int mapOption(const char* opt){
if (strncmp(opt, "cancel:", 7)==0) return 1;
if (strncmp(opt, "1:", 2)==0) return 2;
if (strncmp(opt, "2:", 2)==0) return 3;
if (strncmp(opt, "3:", 2)==0) return 4;
if (strncmp(opt, "4:", 2)==0) return 5;
if (strncmp(opt, "5:", 2)==0) return 6;
if (strncmp(opt, "6:", 2)==0) return 7;
if (strncmp(opt, "7:", 2)==0) return 8;
if (strncmp(opt, "8:", 2)==0) return 9;
if (strncmp(opt, "9:", 2)==0) return 10;
if (strncmp(opt, "0:", 2)==0) return 11;
if (strncmp(opt, "tab:", 4)==0) return 15;
if (strncmp(opt, "epg:", 4)==0) return 18;
if (strncmp(opt, "pause:", 6)==0) return 20;
if (strncmp(opt, "rewind:", 7)==0) return 23;
if (strncmp(opt, "capture:", 8)==0) return 25;
if (strncmp(opt, "enter:", 6)==0) return 28;
if (strncmp(opt, "teletext:", 9)==0) return 30;
if (strncmp(opt, "preview:", 8)==0) return 37;
if (strncmp(opt, "list:", 5)==0) return 38;
if (strncmp(opt, "recall:", 7)==0) return 46;
if (strncmp(opt, "favorite:", 9)==0) return 47;
if (strncmp(opt, "forward:", 8)==0) return 49;
if (strncmp(opt, "mute:", 5)==0) return 50;
if (strncmp(opt, "rec:", 4)==0) return 102;
if (strncmp(opt, "vol+:", 5)==0) return 103;
if (strncmp(opt, "ch+:", 4)==0) return 104;
if (strncmp(opt, "stop:", 5)==0) return 107;
if (strncmp(opt, "vol-:", 5)==0) return 108;
if (strncmp(opt, "ch-:", 4)==0) return 109;
return 0;
}
/*
TODO
- check the patch for execve
- free structure procedure
- ret_code as an enumeration item
*/
int parse_conf(const char* conf_filename, int* n_params, config_item* config_params[]){
size_t COUNT = 512;
size_t MAXBUFLEN = 10000;
char* LINESEP = "\n";
char* CMDSEP = " \t";
ssize_t len;
char buf[512];
char* config_raw;
int left;
int cmd_idx;
int tmpOpt;
char *line, *saveptr1;
char *token, *saveptr2;
config_item *tmplist = NULL;
char** arg_list;
int ret_code = IRDC_OK;
FILE* conf_file = fopen(conf_filename, "r");
if( conf_file==NULL ){
return IRDC_NOCNFERR;
}
int conf_fd = fileno(conf_file);
if( conf_fd<0 ){
ret_code = IRDC_NOCNFERR;
goto close_all;
}
memset(buf, 0, COUNT);
if( config_raw=malloc(MAXBUFLEN) ){
memset(config_raw, 0 , MAXBUFLEN);
left = MAXBUFLEN-1;
}else{
ret_code = IRDC_MEMERR;
goto close_all;
}
len = read(conf_fd, buf, COUNT);
while( len>0 && len<left ){
strncat(config_raw, buf, len);
left = left - len;
memset(buf, 0, COUNT);
len = read(conf_fd, buf, COUNT);
}
if( len<0 ){
ret_code = IRDC_CNFREADERR;
goto close_all;
}
if( len>left ){
ret_code = IRDC_CNFLONGERR;
goto close_all;
}
tmplist = malloc(NUMOFBUTTONS * sizeof(config_item));
*n_params = 0;
arg_list = NULL;
line = strtok_r(config_raw, LINESEP, &saveptr1);
while( line!=NULL ){
if( line[0]!='#' ){
cmd_idx = 0;
token = strtok_r(line, CMDSEP, &saveptr2);
while( token!=NULL ){
switch(cmd_idx){
case 0:
tmpOpt = mapOption(token);
if (tmpOpt==0) goto next_line;
tmplist[*n_params].sel = tmpOpt;
break;
case 1:
tmplist[*n_params].executable = strdup(token);
tmplist[*n_params].args = NULL;
break;
case 2:
arg_list = malloc(NUMOFARGS * sizeof(char*));
if (arg_list==NULL){
ret_code = IRDC_MEMERR;
goto close_all;
}
tmplist[*n_params].args = arg_list;
arg_list[cmd_idx-2] = strdup(""); /*just a path*/
default:
if (cmd_idx>NUMOFARGS-2){
ret_code = IRDC_TOOARGSERR;
goto close_all;
}
arg_list[cmd_idx-1] = strdup(token);
}
token = strtok_r(NULL, CMDSEP, &saveptr2);
cmd_idx++;
}
if (arg_list!=NULL){
arg_list[cmd_idx-1] = NULL;
}
if (cmd_idx>0){
*n_params = (*n_params) + 1;
}
}
next_line:
line = strtok_r(NULL, LINESEP, &saveptr1);
}
*config_params = tmplist;
free(config_raw);
close_all:
fclose(conf_file);
return ret_code;
}
| gpl-2.0 |
f12c/android_kernel_fujitsu_f11eif | drivers/staging/comedi/drivers/addi-data/APCI1710_INCCPT.c | 1 | 206975 | /**
@verbatim
Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
ADDI-DATA GmbH
Dieselstrasse 3
D-77833 Ottersweier
Tel: +19(0)7223/9493-0
Fax: +49(0)7223/9493-92
http://www.addi-data-com
info@addi-data.com
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
You should also find the complete GPL in the COPYING file accompanying this source code.
@endverbatim
*/
/*
+-----------------------------------------------------------------------+
| (C) ADDI-DATA GmbH Dieselstraee 3 D-77833 Ottersweier |
+-----------------------------------------------------------------------+
| Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
| Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
+-----------------------------------------------------------------------+
| Project : API APCI1710 | Compiler : gcc |
| Module name : INC_CPT.C | Version : 2.96 |
+-------------------------------+---------------------------------------+
| Project manager: Eric Stolz | Date : 02/12/2002 |
+-----------------------------------------------------------------------+
| Description : APCI-1710 incremental counter module |
| |
| |
+-----------------------------------------------------------------------+
| UPDATES |
+-----------------------------------------------------------------------+
| Date | Author | Description of updates |
+----------+-----------+------------------------------------------------+
| | | |
|----------|-----------|------------------------------------------------|
| 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
| | | available |
+-----------------------------------------------------------------------+
| 29/06/01 | Guinot C. | - 1100/0231 -> 0701/0232 |
| | | See i_APCI1710_DisableFrequencyMeasurement |
+-----------------------------------------------------------------------+
*/
/*
+----------------------------------------------------------------------------+
| Included files |
+----------------------------------------------------------------------------+
*/
#include "APCI1710_INCCPT.h"
/*
+----------------------------------------------------------------------------+
| int i_APCI1710_InsnConfigINCCPT(struct comedi_device *dev,struct comedi_subdevice *s,
struct comedi_insn *insn,unsigned int *data)
+----------------------------------------------------------------------------+
| Task : Configuration function for INC_CPT |
+----------------------------------------------------------------------------+
| Input Parameters : |
+----------------------------------------------------------------------------+
| Output Parameters : *data
+----------------------------------------------------------------------------+
| Return Value : |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InsnConfigINCCPT(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int ui_ConfigType;
int i_ReturnValue = 0;
ui_ConfigType = CR_CHAN(insn->chanspec);
printk("\nINC_CPT");
devpriv->tsk_Current = current; /* Save the current process task structure */
switch (ui_ConfigType) {
case APCI1710_INCCPT_INITCOUNTER:
i_ReturnValue = i_APCI1710_InitCounter(dev,
CR_AREF(insn->chanspec),
(unsigned char) data[0],
(unsigned char) data[1],
(unsigned char) data[2], (unsigned char) data[3], (unsigned char) data[4]);
break;
case APCI1710_INCCPT_COUNTERAUTOTEST:
i_ReturnValue = i_APCI1710_CounterAutoTest(dev,
(unsigned char *) &data[0]);
break;
case APCI1710_INCCPT_INITINDEX:
i_ReturnValue = i_APCI1710_InitIndex(dev,
CR_AREF(insn->chanspec),
(unsigned char) data[0],
(unsigned char) data[1], (unsigned char) data[2], (unsigned char) data[3]);
break;
case APCI1710_INCCPT_INITREFERENCE:
i_ReturnValue = i_APCI1710_InitReference(dev,
CR_AREF(insn->chanspec), (unsigned char) data[0]);
break;
case APCI1710_INCCPT_INITEXTERNALSTROBE:
i_ReturnValue = i_APCI1710_InitExternalStrobe(dev,
CR_AREF(insn->chanspec),
(unsigned char) data[0], (unsigned char) data[1]);
break;
case APCI1710_INCCPT_INITCOMPARELOGIC:
i_ReturnValue = i_APCI1710_InitCompareLogic(dev,
CR_AREF(insn->chanspec), (unsigned int) data[0]);
break;
case APCI1710_INCCPT_INITFREQUENCYMEASUREMENT:
i_ReturnValue = i_APCI1710_InitFrequencyMeasurement(dev,
CR_AREF(insn->chanspec),
(unsigned char) data[0],
(unsigned char) data[1], (unsigned int) data[2], (unsigned int *) &data[0]);
break;
default:
printk("Insn Config : Config Parameter Wrong\n");
}
if (i_ReturnValue >= 0)
i_ReturnValue = insn->n;
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_InitCounter |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_CounterRange, |
| unsigned char_ b_FirstCounterModus, |
| unsigned char_ b_FirstCounterOption, |
| unsigned char_ b_SecondCounterModus, |
| unsigned char_ b_SecondCounterOption) |
+----------------------------------------------------------------------------+
| Task : Configure the counter operating mode from selected |
| module (b_ModulNbr). You must calling this function be |
| for you call any other function witch access of |
| counters. |
| |
| Counter range |
| ------------- |
| +------------------------------------+-----------------------------------+ |
| | Parameter Passed value | Description | |
| |------------------------------------+-----------------------------------| |
| |b_ModulNbr APCI1710_16BIT_COUNTER | The module is configured for | |
| | | two 16-bit counter. | |
| | | - b_FirstCounterModus and | |
| | | b_FirstCounterOption | |
| | | configure the first 16 bit | |
| | | counter. | |
| | | - b_SecondCounterModus and | |
| | | b_SecondCounterOption | |
| | | configure the second 16 bit | |
| | | counter. | |
| |------------------------------------+-----------------------------------| |
| |b_ModulNbr APCI1710_32BIT_COUNTER | The module is configured for one | |
| | | 32-bit counter. | |
| | | - b_FirstCounterModus and | |
| | | b_FirstCounterOption | |
| | | configure the 32 bit counter. | |
| | | - b_SecondCounterModus and | |
| | | b_SecondCounterOption | |
| | | are not used and have no | |
| | | importance. | |
| +------------------------------------+-----------------------------------+ |
| |
| Counter operating mode |
| ---------------------- |
| |
| +--------------------+-------------------------+-------------------------+ |
| | Parameter | Passed value | Description | |
| |--------------------+-------------------------+-------------------------| |
| |b_FirstCounterModus | APCI1710_QUADRUPLE_MODE | In the quadruple mode, | |
| | or | | the edge analysis | |
| |b_SecondCounterModus| | circuit generates a | |
| | | | counting pulse from | |
| | | | each edge of 2 signals | |
| | | | which are phase shifted | |
| | | | in relation to each | |
| | | | other. | |
| |--------------------+-------------------------+-------------------------| |
| |b_FirstCounterModus | APCI1710_DOUBLE_MODE | Functions in the same | |
| | or | | way as the quadruple | |
| |b_SecondCounterModus| | mode, except that only | |
| | | | two of the four edges | |
| | | | are analysed per | |
| | | | period | |
| |--------------------+-------------------------+-------------------------| |
| |b_FirstCounterModus | APCI1710_SIMPLE_MODE | Functions in the same | |
| | or | | way as the quadruple | |
| |b_SecondCounterModus| | mode, except that only | |
| | | | one of the four edges | |
| | | | is analysed per | |
| | | | period. | |
| |--------------------+-------------------------+-------------------------| |
| |b_FirstCounterModus | APCI1710_DIRECT_MODE | In the direct mode the | |
| | or | | both edge analysis | |
| |b_SecondCounterModus| | circuits are inactive. | |
| | | | The inputs A, B in the | |
| | | | 32-bit mode or A, B and | |
| | | | C, D in the 16-bit mode | |
| | | | represent, each, one | |
| | | | clock pulse gate circuit| |
| | | | There by frequency and | |
| | | | pulse duration | |
| | | | measurements can be | |
| | | | performed. | |
| +--------------------+-------------------------+-------------------------+ |
| |
| |
| IMPORTANT! |
| If you have configured the module for two 16-bit counter, a mixed |
| mode with a counter in quadruple/double/single mode |
| and the other counter in direct mode is not possible! |
| |
| |
| Counter operating option for quadruple/double/simple mode |
| --------------------------------------------------------- |
| |
| +----------------------+-------------------------+------------------------+|
| | Parameter | Passed value | Description ||
| |----------------------+-------------------------+------------------------||
| |b_FirstCounterOption | APCI1710_HYSTERESIS_ON | In both edge analysis ||
| | or | | circuits is available ||
| |b_SecondCounterOption | | one hysteresis circuit.||
| | | | It suppresses each ||
| | | | time the first counting||
| | | | pulse after a change ||
| | | | of rotation. ||
| |----------------------+-------------------------+------------------------||
| |b_FirstCounterOption | APCI1710_HYSTERESIS_OFF | The first counting ||
| | or | | pulse is not suppress ||
| |b_SecondCounterOption | | after a change of ||
| | | | rotation. ||
| +----------------------+-------------------------+------------------------+|
| |
| |
| IMPORTANT! |
| This option are only avaible if you have selected the direct mode. |
| |
| |
| Counter operating option for direct mode |
| ---------------------------------------- |
| |
| +----------------------+--------------------+----------------------------+ |
| | Parameter | Passed value | Description | |
| |----------------------+--------------------+----------------------------| |
| |b_FirstCounterOption | APCI1710_INCREMENT | The counter increment for | |
| | or | | each counting pulse | |
| |b_SecondCounterOption | | | |
| |----------------------+--------------------+----------------------------| |
| |b_FirstCounterOption | APCI1710_DECREMENT | The counter decrement for | |
| | or | | each counting pulse | |
| |b_SecondCounterOption | | | |
| +----------------------+--------------------+----------------------------+ |
| |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
| unsigned char_ b_ModulNbr : Module number to |
| configure (0 to 3) |
| unsigned char_ b_CounterRange : Selection form counter |
| range. |
| unsigned char_ b_FirstCounterModus : First counter operating |
| mode. |
| unsigned char_ b_FirstCounterOption : First counter option. |
| unsigned char_ b_SecondCounterModus : Second counter operating |
| mode. |
| unsigned char_ b_SecondCounterOption : Second counter option. |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: The module is not a counter module |
| -3: The selected counter range is wrong. |
| -4: The selected first counter operating mode is wrong. |
| -5: The selected first counter operating option is wrong|
| -6: The selected second counter operating mode is wrong.|
| -7: The selected second counter operating option is |
| wrong. |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InitCounter(struct comedi_device *dev,
unsigned char b_ModulNbr,
unsigned char b_CounterRange,
unsigned char b_FirstCounterModus,
unsigned char b_FirstCounterOption,
unsigned char b_SecondCounterModus, unsigned char b_SecondCounterOption)
{
int i_ReturnValue = 0;
/*******************************/
/* Test if incremental counter */
/*******************************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) ==
APCI1710_INCREMENTAL_COUNTER) {
/**************************/
/* Test the counter range */
/**************************/
if (b_CounterRange == APCI1710_16BIT_COUNTER
|| b_CounterRange == APCI1710_32BIT_COUNTER) {
/********************************/
/* Test the first counter modus */
/********************************/
if (b_FirstCounterModus == APCI1710_QUADRUPLE_MODE ||
b_FirstCounterModus == APCI1710_DOUBLE_MODE ||
b_FirstCounterModus == APCI1710_SIMPLE_MODE ||
b_FirstCounterModus == APCI1710_DIRECT_MODE) {
/*********************************/
/* Test the first counter option */
/*********************************/
if ((b_FirstCounterModus == APCI1710_DIRECT_MODE
&& (b_FirstCounterOption ==
APCI1710_INCREMENT
|| b_FirstCounterOption
== APCI1710_DECREMENT))
|| (b_FirstCounterModus !=
APCI1710_DIRECT_MODE
&& (b_FirstCounterOption ==
APCI1710_HYSTERESIS_ON
|| b_FirstCounterOption
==
APCI1710_HYSTERESIS_OFF)))
{
/**************************/
/* Test if 16-bit counter */
/**************************/
if (b_CounterRange ==
APCI1710_16BIT_COUNTER) {
/*********************************/
/* Test the second counter modus */
/*********************************/
if ((b_FirstCounterModus !=
APCI1710_DIRECT_MODE
&&
(b_SecondCounterModus
==
APCI1710_QUADRUPLE_MODE
||
b_SecondCounterModus
==
APCI1710_DOUBLE_MODE
||
b_SecondCounterModus
==
APCI1710_SIMPLE_MODE))
|| (b_FirstCounterModus
==
APCI1710_DIRECT_MODE
&&
b_SecondCounterModus
==
APCI1710_DIRECT_MODE))
{
/**********************************/
/* Test the second counter option */
/**********************************/
if ((b_SecondCounterModus == APCI1710_DIRECT_MODE && (b_SecondCounterOption == APCI1710_INCREMENT || b_SecondCounterOption == APCI1710_DECREMENT)) || (b_SecondCounterModus != APCI1710_DIRECT_MODE && (b_SecondCounterOption == APCI1710_HYSTERESIS_ON || b_SecondCounterOption == APCI1710_HYSTERESIS_OFF))) {
i_ReturnValue =
0;
} else {
/*********************************************************/
/* The selected second counter operating option is wrong */
/*********************************************************/
DPRINTK("The selected second counter operating option is wrong\n");
i_ReturnValue =
-7;
}
} else {
/*******************************************************/
/* The selected second counter operating mode is wrong */
/*******************************************************/
DPRINTK("The selected second counter operating mode is wrong\n");
i_ReturnValue = -6;
}
}
} else {
/********************************************************/
/* The selected first counter operating option is wrong */
/********************************************************/
DPRINTK("The selected first counter operating option is wrong\n");
i_ReturnValue = -5;
}
} else {
/******************************************************/
/* The selected first counter operating mode is wrong */
/******************************************************/
DPRINTK("The selected first counter operating mode is wrong\n");
i_ReturnValue = -4;
}
} else {
/***************************************/
/* The selected counter range is wrong */
/***************************************/
DPRINTK("The selected counter range is wrong\n");
i_ReturnValue = -3;
}
/*************************/
/* Test if a error occur */
/*************************/
if (i_ReturnValue == 0) {
/**************************/
/* Test if 16-Bit counter */
/**************************/
if (b_CounterRange == APCI1710_32BIT_COUNTER) {
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister1 = b_CounterRange |
b_FirstCounterModus |
b_FirstCounterOption;
} else {
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister1 = b_CounterRange |
(b_FirstCounterModus & 0x5) |
(b_FirstCounterOption & 0x20) |
(b_SecondCounterModus & 0xA) |
(b_SecondCounterOption & 0x40);
/***********************/
/* Test if direct mode */
/***********************/
if (b_FirstCounterModus == APCI1710_DIRECT_MODE) {
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister1 = devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister1 |
APCI1710_DIRECT_MODE;
}
}
/***************************/
/* Write the configuration */
/***************************/
outl(devpriv->s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
dw_ModeRegister1_2_3_4,
devpriv->s_BoardInfos.
ui_Address + 20 + (64 * b_ModulNbr));
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.b_CounterInit = 1;
}
} else {
/**************************************/
/* The module is not a counter module */
/**************************************/
DPRINTK("The module is not a counter module\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_CounterAutoTest |
| (unsigned char_ b_BoardHandle, |
| unsigned char *_ pb_TestStatus) |
+----------------------------------------------------------------------------+
| Task : A test mode is intended for testing the component and |
| the connected periphery. All the 8-bit counter chains |
| are operated internally as down counters. |
| Independently from the external signals, |
| all the four 8-bit counter chains are decremented in |
| parallel by each negative clock pulse edge of CLKX. |
| |
| Counter auto test conclusion |
| ---------------------------- |
| +-----------------+-----------------------------+ |
| | pb_TestStatus | Error description | |
| | mask | | |
| |-----------------+-----------------------------| |
| | 0000 | No error detected | |
| |-----------------|-----------------------------| |
| | 0001 | Error detected of counter 0 | |
| |-----------------|-----------------------------| |
| | 0010 | Error detected of counter 1 | |
| |-----------------|-----------------------------| |
| | 0100 | Error detected of counter 2 | |
| |-----------------|-----------------------------| |
| | 1000 | Error detected of counter 3 | |
| +-----------------+-----------------------------+ |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | |
+----------------------------------------------------------------------------+
| Output Parameters : unsigned char *_ pb_TestStatus : Auto test conclusion. See table|
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_CounterAutoTest(struct comedi_device *dev, unsigned char *pb_TestStatus)
{
unsigned char b_ModulCpt = 0;
int i_ReturnValue = 0;
unsigned int dw_LathchValue;
*pb_TestStatus = 0;
/********************************/
/* Test if counter module found */
/********************************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[0] & 0xFFFF0000UL) ==
APCI1710_INCREMENTAL_COUNTER
|| (devpriv->s_BoardInfos.
dw_MolduleConfiguration[1] & 0xFFFF0000UL) ==
APCI1710_INCREMENTAL_COUNTER
|| (devpriv->s_BoardInfos.
dw_MolduleConfiguration[2] & 0xFFFF0000UL) ==
APCI1710_INCREMENTAL_COUNTER
|| (devpriv->s_BoardInfos.
dw_MolduleConfiguration[3] & 0xFFFF0000UL) ==
APCI1710_INCREMENTAL_COUNTER) {
for (b_ModulCpt = 0; b_ModulCpt < 4; b_ModulCpt++) {
/*******************************/
/* Test if incremental counter */
/*******************************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulCpt] &
0xFFFF0000UL) ==
APCI1710_INCREMENTAL_COUNTER) {
/******************/
/* Start the test */
/******************/
outl(3, devpriv->s_BoardInfos.
ui_Address + 16 + (64 * b_ModulCpt));
/*********************/
/* Tatch the counter */
/*********************/
outl(1, devpriv->s_BoardInfos.
ui_Address + (64 * b_ModulCpt));
/************************/
/* Read the latch value */
/************************/
dw_LathchValue = inl(devpriv->s_BoardInfos.
ui_Address + 4 + (64 * b_ModulCpt));
if ((dw_LathchValue & 0xFF) !=
((dw_LathchValue >> 8) & 0xFF)
&& (dw_LathchValue & 0xFF) !=
((dw_LathchValue >> 16) & 0xFF)
&& (dw_LathchValue & 0xFF) !=
((dw_LathchValue >> 24) & 0xFF)) {
*pb_TestStatus =
*pb_TestStatus | (1 <<
b_ModulCpt);
}
/*****************/
/* Stop the test */
/*****************/
outl(0, devpriv->s_BoardInfos.
ui_Address + 16 + (64 * b_ModulCpt));
}
}
} else {
/***************************/
/* No counter module found */
/***************************/
DPRINTK("No counter module found\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_InitIndex (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_ReferenceAction, |
| unsigned char_ b_IndexOperation, |
| unsigned char_ b_AutoMode, |
| unsigned char_ b_InterruptEnable) |
+----------------------------------------------------------------------------+
| Task : Initialise the index corresponding to the selected |
| module (b_ModulNbr). If a INDEX flag occur, you have |
| the possibility to clear the 32-Bit counter or to latch|
| the current 32-Bit value in to the first latch |
| register. The b_IndexOperation parameter give the |
| possibility to choice the INDEX action. |
| If you have enabled the automatic mode, each INDEX |
| action is cleared automatically, else you must read |
| the index status ("i_APCI1710_ReadIndexStatus") |
| after each INDEX action. |
| |
| |
| Index action |
| ------------ |
| |
| +------------------------+------------------------------------+ |
| | b_IndexOperation | Operation | |
| |------------------------+------------------------------------| |
| |APCI1710_LATCH_COUNTER | After a index signal, the counter | |
| | | value (32-Bit) is latched in to | |
| | | the first latch register | |
| |------------------------|------------------------------------| |
| |APCI1710_CLEAR_COUNTER | After a index signal, the counter | |
| | | value is cleared (32-Bit) | |
| +------------------------+------------------------------------+ |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
| unsigned char_ b_ReferenceAction : Determine if the reference |
| must set or no for the |
| acceptance from index |
| APCI1710_ENABLE : |
| Reference must be set for |
| accepted the index |
| APCI1710_DISABLE : |
| Reference have not |
| importance |
| unsigned char_ b_IndexOperation : Index operating mode. |
| See table. |
| unsigned char_ b_AutoMode : Enable or disable the |
| automatic index reset. |
| APCI1710_ENABLE : |
| Enable the automatic mode |
| APCI1710_DISABLE : |
| Disable the automatic mode |
| unsigned char_ b_InterruptEnable : Enable or disable the |
| interrupt. |
| APCI1710_ENABLE : |
| Enable the interrupt |
| APCI1710_DISABLE : |
| Disable the interrupt |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4 The reference action parameter is wrong |
| -5: The index operating mode parameter is wrong |
| -6: The auto mode parameter is wrong |
| -7: Interrupt parameter is wrong |
| -8: Interrupt function not initialised. |
| See function "i_APCI1710_SetBoardIntRoutineX" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InitIndex(struct comedi_device *dev,
unsigned char b_ModulNbr,
unsigned char b_ReferenceAction,
unsigned char b_IndexOperation, unsigned char b_AutoMode, unsigned char b_InterruptEnable)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/********************************/
/* Test the reference parameter */
/********************************/
if (b_ReferenceAction == APCI1710_ENABLE ||
b_ReferenceAction == APCI1710_DISABLE) {
/****************************/
/* Test the index parameter */
/****************************/
if (b_IndexOperation ==
APCI1710_HIGH_EDGE_LATCH_COUNTER
|| b_IndexOperation ==
APCI1710_LOW_EDGE_LATCH_COUNTER
|| b_IndexOperation ==
APCI1710_HIGH_EDGE_CLEAR_COUNTER
|| b_IndexOperation ==
APCI1710_LOW_EDGE_CLEAR_COUNTER
|| b_IndexOperation ==
APCI1710_HIGH_EDGE_LATCH_AND_CLEAR_COUNTER
|| b_IndexOperation ==
APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER)
{
/********************************/
/* Test the auto mode parameter */
/********************************/
if (b_AutoMode == APCI1710_ENABLE ||
b_AutoMode == APCI1710_DISABLE)
{
/***************************/
/* Test the interrupt mode */
/***************************/
if (b_InterruptEnable ==
APCI1710_ENABLE
|| b_InterruptEnable ==
APCI1710_DISABLE) {
/************************************/
/* Makte the configuration commando */
/************************************/
if (b_ReferenceAction ==
APCI1710_ENABLE)
{
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2
|
APCI1710_ENABLE_INDEX_ACTION;
} else {
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2
&
APCI1710_DISABLE_INDEX_ACTION;
}
/****************************************/
/* Test if low level latch or/and clear */
/****************************************/
if (b_IndexOperation ==
APCI1710_LOW_EDGE_LATCH_COUNTER
||
b_IndexOperation
==
APCI1710_LOW_EDGE_CLEAR_COUNTER
||
b_IndexOperation
==
APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER)
{
/*************************************/
/* Set the index level to low (DQ26) */
/*************************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
|
APCI1710_SET_LOW_INDEX_LEVEL;
} else {
/**************************************/
/* Set the index level to high (DQ26) */
/**************************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
&
APCI1710_SET_HIGH_INDEX_LEVEL;
}
/***********************************/
/* Test if latch and clear counter */
/***********************************/
if (b_IndexOperation ==
APCI1710_HIGH_EDGE_LATCH_AND_CLEAR_COUNTER
||
b_IndexOperation
==
APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER)
{
/***************************************/
/* Set the latch and clear flag (DQ27) */
/***************************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
|
APCI1710_ENABLE_LATCH_AND_CLEAR;
} /* if (b_IndexOperation == APCI1710_HIGH_EDGE_LATCH_AND_CLEAR_COUNTER || b_IndexOperation == APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER) */
else {
/*****************************************/
/* Clear the latch and clear flag (DQ27) */
/*****************************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
&
APCI1710_DISABLE_LATCH_AND_CLEAR;
/*************************/
/* Test if latch counter */
/*************************/
if (b_IndexOperation == APCI1710_HIGH_EDGE_LATCH_COUNTER || b_IndexOperation == APCI1710_LOW_EDGE_LATCH_COUNTER) {
/*********************************/
/* Enable the latch from counter */
/*********************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2
|
APCI1710_INDEX_LATCH_COUNTER;
} else {
/*********************************/
/* Enable the clear from counter */
/*********************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2
&
(~APCI1710_INDEX_LATCH_COUNTER);
}
} /* // if (b_IndexOperation == APCI1710_HIGH_EDGE_LATCH_AND_CLEAR_COUNTER || b_IndexOperation == APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER) */
if (b_AutoMode ==
APCI1710_DISABLE)
{
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2
|
APCI1710_INDEX_AUTO_MODE;
} else {
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2
&
(~APCI1710_INDEX_AUTO_MODE);
}
if (b_InterruptEnable ==
APCI1710_ENABLE)
{
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3
|
APCI1710_ENABLE_INDEX_INT;
} else {
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3
&
APCI1710_DISABLE_INDEX_INT;
}
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.
b_IndexInit = 1;
} else {
/********************************/
/* Interrupt parameter is wrong */
/********************************/
DPRINTK("Interrupt parameter is wrong\n");
i_ReturnValue = -7;
}
} else {
/************************************/
/* The auto mode parameter is wrong */
/************************************/
DPRINTK("The auto mode parameter is wrong\n");
i_ReturnValue = -6;
}
} else {
/***********************************************/
/* The index operating mode parameter is wrong */
/***********************************************/
DPRINTK("The index operating mode parameter is wrong\n");
i_ReturnValue = -5;
}
} else {
/*******************************************/
/* The reference action parameter is wrong */
/*******************************************/
DPRINTK("The reference action parameter is wrong\n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_InitReference |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_ReferenceLevel) |
+----------------------------------------------------------------------------+
| Task : Initialise the reference corresponding to the selected |
| module (b_ModulNbr). |
| |
| Reference level |
| --------------- |
| +--------------------+-------------------------+ |
| | b_ReferenceLevel | Operation | |
| +--------------------+-------------------------+ |
| | APCI1710_LOW | Reference occur if "0" | |
| |--------------------|-------------------------| |
| | APCI1710_HIGH | Reference occur if "1" | |
| +--------------------+-------------------------+ |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
| unsigned char_ b_ReferenceLevel : Reference level. |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: The selected module number parameter is wrong |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: Reference level parameter is wrong |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InitReference(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char b_ReferenceLevel)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/**************************************/
/* Test the reference level parameter */
/**************************************/
if (b_ReferenceLevel == 0 || b_ReferenceLevel == 1) {
if (b_ReferenceLevel == 1) {
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2 = devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2 |
APCI1710_REFERENCE_HIGH;
} else {
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2 = devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2 &
APCI1710_REFERENCE_LOW;
}
outl(devpriv->s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
dw_ModeRegister1_2_3_4,
devpriv->s_BoardInfos.ui_Address + 20 +
(64 * b_ModulNbr));
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.b_ReferenceInit = 1;
} else {
/**************************************/
/* Reference level parameter is wrong */
/**************************************/
DPRINTK("Reference level parameter is wrong\n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_InitExternalStrobe |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_ExternalStrobe, |
| unsigned char_ b_ExternalStrobeLevel) |
+----------------------------------------------------------------------------+
| Task : Initialises the external strobe level corresponding to |
| the selected module (b_ModulNbr). |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
| unsigned char_ b_ExternalStrobe : External strobe selection |
| 0 : External strobe A |
| 1 : External strobe B |
| unsigned char_ b_ExternalStrobeLevel : External strobe level |
| APCI1710_LOW : |
| External latch occurs if "0" |
| APCI1710_HIGH : |
| External latch occurs if "1" |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: The selected module number is wrong |
| -3: Counter not initialised. |
| See function "i_APCI1710_InitCounter" |
| -4: External strobe selection is wrong |
| -5: External strobe level parameter is wrong |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InitExternalStrobe(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char b_ExternalStrobe, unsigned char b_ExternalStrobeLevel)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/**************************************/
/* Test the external strobe selection */
/**************************************/
if (b_ExternalStrobe == 0 || b_ExternalStrobe == 1) {
/******************/
/* Test the level */
/******************/
if ((b_ExternalStrobeLevel == APCI1710_HIGH) ||
((b_ExternalStrobeLevel == APCI1710_LOW
&& (devpriv->
s_BoardInfos.
dw_MolduleConfiguration
[b_ModulNbr] &
0xFFFF) >=
0x3135))) {
/*****************/
/* Set the level */
/*****************/
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4 = (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4 & (0xFF -
(0x10 << b_ExternalStrobe))) | ((b_ExternalStrobeLevel ^ 1) << (4 + b_ExternalStrobe));
} else {
/********************************************/
/* External strobe level parameter is wrong */
/********************************************/
DPRINTK("External strobe level parameter is wrong\n");
i_ReturnValue = -5;
}
} /* if (b_ExternalStrobe == 0 || b_ExternalStrobe == 1) */
else {
/**************************************/
/* External strobe selection is wrong */
/**************************************/
DPRINTK("External strobe selection is wrong\n");
i_ReturnValue = -4;
} /* if (b_ExternalStrobe == 0 || b_ExternalStrobe == 1) */
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_InitCompareLogic |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned int_ ui_CompareValue) |
+----------------------------------------------------------------------------+
| Task : Set the 32-Bit compare value. At that moment that the |
| incremental counter arrive to the compare value |
| (ui_CompareValue) a interrupt is generated. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
| unsigned int_ ui_CompareValue : 32-Bit compare value |
+----------------------------------------------------------------------------+
| Output Parameters : -
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InitCompareLogic(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned int ui_CompareValue)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
outl(ui_CompareValue, devpriv->s_BoardInfos.
ui_Address + 28 + (64 * b_ModulNbr));
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.b_CompareLogicInit = 1;
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_InitFrequencyMeasurement |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_PCIInputClock, |
| unsigned char_ b_TimingUnity, |
| ULONG_ ul_TimingInterval, |
| PULONG_ pul_RealTimingInterval) |
+----------------------------------------------------------------------------+
| Task : Sets the time for the frequency measurement. |
| Configures the selected TOR incremental counter of the |
| selected module (b_ModulNbr). The ul_TimingInterval and|
| ul_TimingUnity determine the time base for the |
| measurement. The pul_RealTimingInterval returns the |
| real time value. You must call up this function before |
| you call up any other function which gives access to |
| the frequency measurement. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Number of the module to be |
| configured (0 to 3) |
| unsigned char_ b_PCIInputClock : Selection of the PCI bus |
| clock |
| - APCI1710_30MHZ : |
| The PC has a PCI bus clock |
| of 30 MHz |
| - APCI1710_33MHZ : |
| The PC has a PCI bus clock |
| of 33 MHz |
| unsigned char_ b_TimingUnity : Base time unit (0 to 2) |
| 0 : ns |
| 1 : es |
| 2 : ms |
| ULONG_ ul_TimingInterval: Base time value. |
+----------------------------------------------------------------------------+
| Output Parameters : PULONG_ pul_RealTimingInterval : Real base time value. |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: The selected module number is wrong |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: The selected PCI input clock is wrong |
| -5: Timing unity selection is wrong |
| -6: Base timing selection is wrong |
| -7: 40MHz quartz not on board |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InitFrequencyMeasurement(struct comedi_device *dev,
unsigned char b_ModulNbr,
unsigned char b_PCIInputClock,
unsigned char b_TimingUnity,
unsigned int ul_TimingInterval, unsigned int *pul_RealTimingInterval)
{
int i_ReturnValue = 0;
unsigned int ul_TimerValue = 0;
double d_RealTimingInterval;
unsigned int dw_Status = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/**************************/
/* Test the PCI bus clock */
/**************************/
if ((b_PCIInputClock == APCI1710_30MHZ) ||
(b_PCIInputClock == APCI1710_33MHZ) ||
(b_PCIInputClock == APCI1710_40MHZ)) {
/************************/
/* Test the timing unit */
/************************/
if (b_TimingUnity <= 2) {
/**********************************/
/* Test the base timing selection */
/**********************************/
if (((b_PCIInputClock == APCI1710_30MHZ)
&& (b_TimingUnity == 0)
&& (ul_TimingInterval >=
266)
&& (ul_TimingInterval <=
8738133UL))
|| ((b_PCIInputClock ==
APCI1710_30MHZ)
&& (b_TimingUnity == 1)
&& (ul_TimingInterval >=
1)
&& (ul_TimingInterval <=
8738UL))
|| ((b_PCIInputClock ==
APCI1710_30MHZ)
&& (b_TimingUnity == 2)
&& (ul_TimingInterval >=
1)
&& (ul_TimingInterval <=
8UL))
|| ((b_PCIInputClock ==
APCI1710_33MHZ)
&& (b_TimingUnity == 0)
&& (ul_TimingInterval >=
242)
&& (ul_TimingInterval <=
7943757UL))
|| ((b_PCIInputClock ==
APCI1710_33MHZ)
&& (b_TimingUnity == 1)
&& (ul_TimingInterval >=
1)
&& (ul_TimingInterval <=
7943UL))
|| ((b_PCIInputClock ==
APCI1710_33MHZ)
&& (b_TimingUnity == 2)
&& (ul_TimingInterval >=
1)
&& (ul_TimingInterval <=
7UL))
|| ((b_PCIInputClock ==
APCI1710_40MHZ)
&& (b_TimingUnity == 0)
&& (ul_TimingInterval >=
200)
&& (ul_TimingInterval <=
6553500UL))
|| ((b_PCIInputClock ==
APCI1710_40MHZ)
&& (b_TimingUnity == 1)
&& (ul_TimingInterval >=
1)
&& (ul_TimingInterval <=
6553UL))
|| ((b_PCIInputClock ==
APCI1710_40MHZ)
&& (b_TimingUnity == 2)
&& (ul_TimingInterval >=
1)
&& (ul_TimingInterval <=
6UL))) {
/**********************/
/* Test if 40MHz used */
/**********************/
if (b_PCIInputClock ==
APCI1710_40MHZ) {
/******************************/
/* Test if firmware >= Rev1.5 */
/******************************/
if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3135) {
/*********************************/
/* Test if 40MHz quartz on board */
/*********************************/
/*INPDW (ps_APCI1710Variable->
s_Board [b_BoardHandle].
s_BoardInfos.
ui_Address + 36 + (64 * b_ModulNbr), &dw_Status); */
dw_Status =
inl
(devpriv->
s_BoardInfos.
ui_Address
+ 36 +
(64 * b_ModulNbr));
/******************************/
/* Test the quartz flag (DQ0) */
/******************************/
if ((dw_Status & 1) != 1) {
/*****************************/
/* 40MHz quartz not on board */
/*****************************/
DPRINTK("40MHz quartz not on board\n");
i_ReturnValue
=
-7;
}
} else {
/*****************************/
/* 40MHz quartz not on board */
/*****************************/
DPRINTK("40MHz quartz not on board\n");
i_ReturnValue =
-7;
}
} /* if (b_PCIInputClock == APCI1710_40MHZ) */
/***************************/
/* Test if not error occur */
/***************************/
if (i_ReturnValue == 0) {
/****************************/
/* Test the INC_CPT version */
/****************************/
if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3131) {
/**********************/
/* Test if 40MHz used */
/**********************/
if (b_PCIInputClock == APCI1710_40MHZ) {
/*********************************/
/* Enable the 40MHz quarz (DQ30) */
/*********************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
|
APCI1710_ENABLE_40MHZ_FREQUENCY;
} /* if (b_PCIInputClock == APCI1710_40MHZ) */
else {
/**********************************/
/* Disable the 40MHz quarz (DQ30) */
/**********************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
&
APCI1710_DISABLE_40MHZ_FREQUENCY;
} /* if (b_PCIInputClock == APCI1710_40MHZ) */
/********************************/
/* Calculate the division fator */
/********************************/
fpu_begin();
switch (b_TimingUnity) {
/******/
/* ns */
/******/
case 0:
/******************/
/* Timer 0 factor */
/******************/
ul_TimerValue
=
(unsigned int)
(ul_TimingInterval
*
(0.00025 * b_PCIInputClock));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_TimingInterval * (0.00025 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
ul_TimerValue
=
ul_TimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
*pul_RealTimingInterval
=
(unsigned int)
(ul_TimerValue
/
(0.00025 * (double)b_PCIInputClock));
d_RealTimingInterval
=
(double)
ul_TimerValue
/
(0.00025
*
(double)
b_PCIInputClock);
if ((double)((double)ul_TimerValue / (0.00025 * (double)b_PCIInputClock)) >= (double)((double)*pul_RealTimingInterval + 0.5)) {
*pul_RealTimingInterval
=
*pul_RealTimingInterval
+
1;
}
ul_TimingInterval
=
ul_TimingInterval
-
1;
ul_TimerValue
=
ul_TimerValue
-
2;
break;
/******/
/* es */
/******/
case 1:
/******************/
/* Timer 0 factor */
/******************/
ul_TimerValue
=
(unsigned int)
(ul_TimingInterval
*
(0.25 * b_PCIInputClock));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_TimingInterval * (0.25 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
ul_TimerValue
=
ul_TimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
*pul_RealTimingInterval
=
(unsigned int)
(ul_TimerValue
/
(0.25 * (double)b_PCIInputClock));
d_RealTimingInterval
=
(double)
ul_TimerValue
/
(
(double)
0.25
*
(double)
b_PCIInputClock);
if ((double)((double)ul_TimerValue / (0.25 * (double)b_PCIInputClock)) >= (double)((double)*pul_RealTimingInterval + 0.5)) {
*pul_RealTimingInterval
=
*pul_RealTimingInterval
+
1;
}
ul_TimingInterval
=
ul_TimingInterval
-
1;
ul_TimerValue
=
ul_TimerValue
-
2;
break;
/******/
/* ms */
/******/
case 2:
/******************/
/* Timer 0 factor */
/******************/
ul_TimerValue
=
ul_TimingInterval
*
(250.0
*
b_PCIInputClock);
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_TimingInterval * (250.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
ul_TimerValue
=
ul_TimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
*pul_RealTimingInterval
=
(unsigned int)
(ul_TimerValue
/
(250.0 * (double)b_PCIInputClock));
d_RealTimingInterval
=
(double)
ul_TimerValue
/
(250.0
*
(double)
b_PCIInputClock);
if ((double)((double)ul_TimerValue / (250.0 * (double)b_PCIInputClock)) >= (double)((double)*pul_RealTimingInterval + 0.5)) {
*pul_RealTimingInterval
=
*pul_RealTimingInterval
+
1;
}
ul_TimingInterval
=
ul_TimingInterval
-
1;
ul_TimerValue
=
ul_TimerValue
-
2;
break;
}
fpu_end();
/*************************/
/* Write the timer value */
/*************************/
outl(ul_TimerValue, devpriv->s_BoardInfos.ui_Address + 32 + (64 * b_ModulNbr));
/*******************************/
/* Set the initialisation flag */
/*******************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.
b_FrequencyMeasurementInit
= 1;
} else {
/***************************/
/* Counter not initialised */
/***************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue =
-3;
}
} /* if (i_ReturnValue == 0) */
} else {
/**********************************/
/* Base timing selection is wrong */
/**********************************/
DPRINTK("Base timing selection is wrong\n");
i_ReturnValue = -6;
}
} else {
/***********************************/
/* Timing unity selection is wrong */
/***********************************/
DPRINTK("Timing unity selection is wrong\n");
i_ReturnValue = -5;
}
} else {
/*****************************************/
/* The selected PCI input clock is wrong */
/*****************************************/
DPRINTK("The selected PCI input clock is wrong\n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*########################################################################### */
/* INSN BITS */
/*########################################################################### */
/*
+----------------------------------------------------------------------------+
| Function Name :INT i_APCI1710_InsnBitsINCCPT(struct comedi_device *dev,struct comedi_subdevice *s,
struct comedi_insn *insn,unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Set & Clear Functions for INC_CPT |
+----------------------------------------------------------------------------+
| Input Parameters :
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value :
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InsnBitsINCCPT(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int ui_BitsType;
int i_ReturnValue = 0;
ui_BitsType = CR_CHAN(insn->chanspec);
devpriv->tsk_Current = current; /* Save the current process task structure */
switch (ui_BitsType) {
case APCI1710_INCCPT_CLEARCOUNTERVALUE:
i_ReturnValue = i_APCI1710_ClearCounterValue(dev,
(unsigned char) CR_AREF(insn->chanspec));
break;
case APCI1710_INCCPT_CLEARALLCOUNTERVALUE:
i_ReturnValue = i_APCI1710_ClearAllCounterValue(dev);
break;
case APCI1710_INCCPT_SETINPUTFILTER:
i_ReturnValue = i_APCI1710_SetInputFilter(dev,
(unsigned char) CR_AREF(insn->chanspec),
(unsigned char) data[0], (unsigned char) data[1]);
break;
case APCI1710_INCCPT_LATCHCOUNTER:
i_ReturnValue = i_APCI1710_LatchCounter(dev,
(unsigned char) CR_AREF(insn->chanspec), (unsigned char) data[0]);
break;
case APCI1710_INCCPT_SETINDEXANDREFERENCESOURCE:
i_ReturnValue = i_APCI1710_SetIndexAndReferenceSource(dev,
(unsigned char) CR_AREF(insn->chanspec), (unsigned char) data[0]);
break;
case APCI1710_INCCPT_SETDIGITALCHLON:
i_ReturnValue = i_APCI1710_SetDigitalChlOn(dev,
(unsigned char) CR_AREF(insn->chanspec));
break;
case APCI1710_INCCPT_SETDIGITALCHLOFF:
i_ReturnValue = i_APCI1710_SetDigitalChlOff(dev,
(unsigned char) CR_AREF(insn->chanspec));
break;
default:
printk("Bits Config Parameter Wrong\n");
}
if (i_ReturnValue >= 0)
i_ReturnValue = insn->n;
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_ClearCounterValue |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr) |
+----------------------------------------------------------------------------+
| Task : Clear the counter value from selected module |
| (b_ModulNbr). |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: The selected module number parameter is wrong |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_ClearCounterValue(struct comedi_device *dev, unsigned char b_ModulNbr)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/*********************/
/* Clear the counter */
/*********************/
outl(1, devpriv->s_BoardInfos.
ui_Address + 16 + (64 * b_ModulNbr));
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_ClearAllCounterValue |
| (unsigned char_ b_BoardHandle) |
+----------------------------------------------------------------------------+
| Task : Clear all counter value. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_ClearAllCounterValue(struct comedi_device *dev)
{
unsigned char b_ModulCpt = 0;
int i_ReturnValue = 0;
/********************************/
/* Test if counter module found */
/********************************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[0] & 0xFFFF0000UL) ==
APCI1710_INCREMENTAL_COUNTER
|| (devpriv->s_BoardInfos.
dw_MolduleConfiguration[1] & 0xFFFF0000UL) ==
APCI1710_INCREMENTAL_COUNTER
|| (devpriv->s_BoardInfos.
dw_MolduleConfiguration[2] & 0xFFFF0000UL) ==
APCI1710_INCREMENTAL_COUNTER
|| (devpriv->s_BoardInfos.
dw_MolduleConfiguration[3] & 0xFFFF0000UL) ==
APCI1710_INCREMENTAL_COUNTER) {
for (b_ModulCpt = 0; b_ModulCpt < 4; b_ModulCpt++) {
/*******************************/
/* Test if incremental counter */
/*******************************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulCpt] &
0xFFFF0000UL) ==
APCI1710_INCREMENTAL_COUNTER) {
/*********************/
/* Clear the counter */
/*********************/
outl(1, devpriv->s_BoardInfos.
ui_Address + 16 + (64 * b_ModulCpt));
}
}
} else {
/***************************/
/* No counter module found */
/***************************/
DPRINTK("No counter module found\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_SetInputFilter |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_Module, |
| unsigned char_ b_PCIInputClock, |
| unsigned char_ b_Filter) |
+----------------------------------------------------------------------------+
| Task : Disable or enable the software filter from selected |
| module (b_ModulNbr). b_Filter determine the filter time|
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Number of the module to be |
| configured (0 to 3) |
| unsigned char_ b_PCIInputClock : Selection of the PCI bus |
| clock |
| - APCI1710_30MHZ : |
| The PC has a PCI bus clock |
| of 30 MHz |
| - APCI1710_33MHZ : |
| The PC has a PCI bus clock |
| of 33 MHz |
| - APCI1710_40MHZ : |
| The APCI1710 has a 40MHz |
| quartz |
| unsigned char_ b_Filter : Filter selection |
| |
| 30 MHz |
| ------ |
| 0: Software filter not used |
| 1: Filter from 266ns (3.750000MHz) |
| 2: Filter from 400ns (2.500000MHz) |
| 3: Filter from 533ns (1.876170MHz) |
| 4: Filter from 666ns (1.501501MHz) |
| 5: Filter from 800ns (1.250000MHz) |
| 6: Filter from 933ns (1.071800MHz) |
| 7: Filter from 1066ns (0.938080MHz) |
| 8: Filter from 1200ns (0.833333MHz) |
| 9: Filter from 1333ns (0.750000MHz) |
| 10: Filter from 1466ns (0.682100MHz) |
| 11: Filter from 1600ns (0.625000MHz) |
| 12: Filter from 1733ns (0.577777MHz) |
| 13: Filter from 1866ns (0.535900MHz) |
| 14: Filter from 2000ns (0.500000MHz) |
| 15: Filter from 2133ns (0.468800MHz) |
| |
| 33 MHz |
| ------ |
| 0: Software filter not used |
| 1: Filter from 242ns (4.125000MHz) |
| 2: Filter from 363ns (2.754820MHz) |
| 3: Filter from 484ns (2.066115MHz) |
| 4: Filter from 605ns (1.652892MHz) |
| 5: Filter from 726ns (1.357741MHz) |
| 6: Filter from 847ns (1.180637MHz) |
| 7: Filter from 968ns (1.033055MHz) |
| 8: Filter from 1089ns (0.918273MHz) |
| 9: Filter from 1210ns (0.826446MHz) |
| 10: Filter from 1331ns (0.751314MHz) |
| 11: Filter from 1452ns (0.688705MHz) |
| 12: Filter from 1573ns (0.635727MHz) |
| 13: Filter from 1694ns (0.590318MHz) |
| 14: Filter from 1815ns (0.550964MHz) |
| 15: Filter from 1936ns (0.516528MHz) |
| |
| 40 MHz |
| ------ |
| 0: Software filter not used |
| 1: Filter from 200ns (5.000000MHz) |
| 2: Filter from 300ns (3.333333MHz) |
| 3: Filter from 400ns (2.500000MHz) |
| 4: Filter from 500ns (2.000000MHz) |
| 5: Filter from 600ns (1.666666MHz) |
| 6: Filter from 700ns (1.428500MHz) |
| 7: Filter from 800ns (1.250000MHz) |
| 8: Filter from 900ns (1.111111MHz) |
| 9: Filter from 1000ns (1.000000MHz) |
| 10: Filter from 1100ns (0.909090MHz) |
| 11: Filter from 1200ns (0.833333MHz) |
| 12: Filter from 1300ns (0.769200MHz) |
| 13: Filter from 1400ns (0.714200MHz) |
| 14: Filter from 1500ns (0.666666MHz) |
| 15: Filter from 1600ns (0.625000MHz) |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: The selected module number is wrong |
| -3: The module is not a counter module |
| -4: The selected PCI input clock is wrong |
| -5: The selected filter value is wrong |
| -6: 40MHz quartz not on board |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_SetInputFilter(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char b_PCIInputClock, unsigned char b_Filter)
{
int i_ReturnValue = 0;
unsigned int dw_Status = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if incremental counter */
/*******************************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulNbr] &
0xFFFF0000UL) == APCI1710_INCREMENTAL_COUNTER) {
/******************************/
/* Test if firmware >= Rev1.5 */
/******************************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulNbr] &
0xFFFF) >= 0x3135) {
/**************************/
/* Test the PCI bus clock */
/**************************/
if ((b_PCIInputClock == APCI1710_30MHZ) ||
(b_PCIInputClock == APCI1710_33MHZ) ||
(b_PCIInputClock == APCI1710_40MHZ)) {
/*************************/
/* Test the filter value */
/*************************/
if (b_Filter < 16) {
/**********************/
/* Test if 40MHz used */
/**********************/
if (b_PCIInputClock ==
APCI1710_40MHZ) {
/*********************************/
/* Test if 40MHz quartz on board */
/*********************************/
dw_Status =
inl(devpriv->
s_BoardInfos.
ui_Address +
36 +
(64 * b_ModulNbr));
/******************************/
/* Test the quartz flag (DQ0) */
/******************************/
if ((dw_Status & 1) !=
1) {
/*****************************/
/* 40MHz quartz not on board */
/*****************************/
DPRINTK("40MHz quartz not on board\n");
i_ReturnValue =
-6;
}
} /* if (b_PCIInputClock == APCI1710_40MHZ) */
/***************************/
/* Test if error not occur */
/***************************/
if (i_ReturnValue == 0) {
/**********************/
/* Test if 40MHz used */
/**********************/
if (b_PCIInputClock ==
APCI1710_40MHZ)
{
/*********************************/
/* Enable the 40MHz quarz (DQ31) */
/*********************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
|
APCI1710_ENABLE_40MHZ_FILTER;
} /* if (b_PCIInputClock == APCI1710_40MHZ) */
else {
/**********************************/
/* Disable the 40MHz quarz (DQ31) */
/**********************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
=
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
&
APCI1710_DISABLE_40MHZ_FILTER;
} /* if (b_PCIInputClock == APCI1710_40MHZ) */
/************************/
/* Set the filter value */
/************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3
=
(devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3
& 0x1F) |
((b_Filter &
0x7) <<
5);
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
=
(devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4
& 0xFE) |
((b_Filter &
0x8) >>
3);
/***************************/
/* Write the configuration */
/***************************/
outl(devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
dw_ModeRegister1_2_3_4,
devpriv->
s_BoardInfos.
ui_Address +
20 +
(64 * b_ModulNbr));
} /* if (i_ReturnValue == 0) */
} /* if (b_Filter < 16) */
else {
/**************************************/
/* The selected filter value is wrong */
/**************************************/
DPRINTK("The selected filter value is wrong\n");
i_ReturnValue = -5;
} /* if (b_Filter < 16) */
} /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ) || (b_PCIInputClock == APCI1710_40MHZ)) */
else {
/*****************************************/
/* The selected PCI input clock is wrong */
/*****************************************/
DPRINTK("The selected PCI input clock is wrong\n");
i_ReturnValue = 4;
} /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ) || (b_PCIInputClock == APCI1710_40MHZ)) */
} else {
/**************************************/
/* The module is not a counter module */
/**************************************/
DPRINTK("The module is not a counter module\n");
i_ReturnValue = -3;
}
} else {
/**************************************/
/* The module is not a counter module */
/**************************************/
DPRINTK("The module is not a counter module\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_LatchCounter (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_LatchReg) |
+----------------------------------------------------------------------------+
| Task : Latch the courant value from selected module |
| (b_ModulNbr) in to the selected latch register |
| (b_LatchReg). |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
| unsigned char_ b_LatchReg : Selected latch register |
| 0 : for the first latch register |
| 1 : for the second latch register |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: The selected latch register parameter is wrong |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_LatchCounter(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char b_LatchReg)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/*************************************/
/* Test the latch register parameter */
/*************************************/
if (b_LatchReg < 2) {
/*********************/
/* Tatch the counter */
/*********************/
outl(1 << (b_LatchReg * 4),
devpriv->s_BoardInfos.ui_Address +
(64 * b_ModulNbr));
} else {
/**************************************************/
/* The selected latch register parameter is wrong */
/**************************************************/
DPRINTK("The selected latch register parameter is wrong\n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_SetIndexAndReferenceSource |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_SourceSelection) |
+----------------------------------------------------------------------------+
| Task : Determine the hardware source for the index and the |
| reference logic. Per default the index logic is |
| connected to the difference input C and the reference |
| logic is connected to the 24V input E |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
| unsigned char_ b_SourceSelection : APCI1710_SOURCE_0 : |
| The index logic is connected |
| to the difference input C and|
| the reference logic is |
| connected to the 24V input E.|
| This is the default |
| configuration. |
| APCI1710_SOURCE_1 : |
| The reference logic is |
| connected to the difference |
| input C and the index logic |
| is connected to the 24V |
| input E |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: The selected module number is wrong |
| -3: The module is not a counter module. |
| -4: The source selection is wrong |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_SetIndexAndReferenceSource(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char b_SourceSelection)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if incremental counter */
/*******************************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulNbr] &
0xFFFF0000UL) == APCI1710_INCREMENTAL_COUNTER) {
/******************************/
/* Test if firmware >= Rev1.5 */
/******************************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulNbr] &
0xFFFF) >= 0x3135) {
/*****************************/
/* Test the source selection */
/*****************************/
if (b_SourceSelection == APCI1710_SOURCE_0 ||
b_SourceSelection == APCI1710_SOURCE_1)
{
/******************************************/
/* Test if invert the index and reference */
/******************************************/
if (b_SourceSelection ==
APCI1710_SOURCE_1) {
/********************************************/
/* Invert index and reference source (DQ25) */
/********************************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4 =
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4 |
APCI1710_INVERT_INDEX_RFERENCE;
} else {
/****************************************/
/* Set the default configuration (DQ25) */
/****************************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4 =
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister4 &
APCI1710_DEFAULT_INDEX_RFERENCE;
}
} /* if (b_SourceSelection == APCI1710_SOURCE_0 ||b_SourceSelection == APCI1710_SOURCE_1) */
else {
/*********************************/
/* The source selection is wrong */
/*********************************/
DPRINTK("The source selection is wrong\n");
i_ReturnValue = -4;
} /* if (b_SourceSelection == APCI1710_SOURCE_0 ||b_SourceSelection == APCI1710_SOURCE_1) */
} else {
/**************************************/
/* The module is not a counter module */
/**************************************/
DPRINTK("The module is not a counter module\n");
i_ReturnValue = -3;
}
} else {
/**************************************/
/* The module is not a counter module */
/**************************************/
DPRINTK("The module is not a counter module\n");
i_ReturnValue = -3;
}
} else {
/***************************************/
/* The selected module number is wrong */
/***************************************/
DPRINTK("The selected module number is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_SetDigitalChlOn |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr) |
+----------------------------------------------------------------------------+
| Task : Sets the digital output H Setting an output means |
| setting an ouput high. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Number of the module to be |
| configured (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: The selected module number is wrong |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_SetDigitalChlOn(struct comedi_device *dev, unsigned char b_ModulNbr)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3 = devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.b_ModeRegister3 | 0x10;
/*********************/
/* Set the output On */
/*********************/
outl(devpriv->s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
dw_ModeRegister1_2_3_4, devpriv->s_BoardInfos.
ui_Address + 20 + (64 * b_ModulNbr));
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_SetDigitalChlOff |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr) |
+----------------------------------------------------------------------------+
| Task : Resets the digital output H. Resetting an output means |
| setting an ouput low. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Number of the module to be |
| configured (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: The selected module number is wrong |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_SetDigitalChlOff(struct comedi_device *dev, unsigned char b_ModulNbr)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3 = devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.b_ModeRegister3 & 0xEF;
/**********************/
/* Set the output Off */
/**********************/
outl(devpriv->s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
dw_ModeRegister1_2_3_4, devpriv->s_BoardInfos.
ui_Address + 20 + (64 * b_ModulNbr));
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*########################################################################### */
/* INSN WRITE */
/*########################################################################### */
/*
+----------------------------------------------------------------------------+
| Function Name :INT i_APCI1710_InsnWriteINCCPT(struct comedi_device *dev,struct comedi_subdevice *s,
struct comedi_insn *insn,unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Enable Disable functions for INC_CPT |
+----------------------------------------------------------------------------+
| Input Parameters :
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value :
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InsnWriteINCCPT(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int ui_WriteType;
int i_ReturnValue = 0;
ui_WriteType = CR_CHAN(insn->chanspec);
devpriv->tsk_Current = current; /* Save the current process task structure */
switch (ui_WriteType) {
case APCI1710_INCCPT_ENABLELATCHINTERRUPT:
i_ReturnValue = i_APCI1710_EnableLatchInterrupt(dev,
(unsigned char) CR_AREF(insn->chanspec));
break;
case APCI1710_INCCPT_DISABLELATCHINTERRUPT:
i_ReturnValue = i_APCI1710_DisableLatchInterrupt(dev,
(unsigned char) CR_AREF(insn->chanspec));
break;
case APCI1710_INCCPT_WRITE16BITCOUNTERVALUE:
i_ReturnValue = i_APCI1710_Write16BitCounterValue(dev,
(unsigned char) CR_AREF(insn->chanspec),
(unsigned char) data[0], (unsigned int) data[1]);
break;
case APCI1710_INCCPT_WRITE32BITCOUNTERVALUE:
i_ReturnValue = i_APCI1710_Write32BitCounterValue(dev,
(unsigned char) CR_AREF(insn->chanspec), (unsigned int) data[0]);
break;
case APCI1710_INCCPT_ENABLEINDEX:
i_APCI1710_EnableIndex(dev, (unsigned char) CR_AREF(insn->chanspec));
break;
case APCI1710_INCCPT_DISABLEINDEX:
i_ReturnValue = i_APCI1710_DisableIndex(dev,
(unsigned char) CR_AREF(insn->chanspec));
break;
case APCI1710_INCCPT_ENABLECOMPARELOGIC:
i_ReturnValue = i_APCI1710_EnableCompareLogic(dev,
(unsigned char) CR_AREF(insn->chanspec));
break;
case APCI1710_INCCPT_DISABLECOMPARELOGIC:
i_ReturnValue = i_APCI1710_DisableCompareLogic(dev,
(unsigned char) CR_AREF(insn->chanspec));
break;
case APCI1710_INCCPT_ENABLEFREQUENCYMEASUREMENT:
i_ReturnValue = i_APCI1710_EnableFrequencyMeasurement(dev,
(unsigned char) CR_AREF(insn->chanspec), (unsigned char) data[0]);
break;
case APCI1710_INCCPT_DISABLEFREQUENCYMEASUREMENT:
i_ReturnValue = i_APCI1710_DisableFrequencyMeasurement(dev,
(unsigned char) CR_AREF(insn->chanspec));
break;
default:
printk("Write Config Parameter Wrong\n");
}
if (i_ReturnValue >= 0)
i_ReturnValue = insn->n;
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_EnableLatchInterrupt |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr) |
+----------------------------------------------------------------------------+
| Task : Enable the latch interrupt from selected module |
| (b_ModulNbr). Each software or hardware latch occur a |
| interrupt. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: Interrupt routine not installed see function |
| "i_APCI1710_SetBoardIntRoutine" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_EnableLatchInterrupt(struct comedi_device *dev, unsigned char b_ModulNbr)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/********************/
/* Enable interrupt */
/********************/
devpriv->s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2 = devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2 | APCI1710_ENABLE_LATCH_INT;
/***************************/
/* Write the configuration */
/***************************/
outl(devpriv->s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
dw_ModeRegister1_2_3_4, devpriv->s_BoardInfos.
ui_Address + 20 + (64 * b_ModulNbr));
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_DisableLatchInterrupt |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr) |
+----------------------------------------------------------------------------+
| Task : Disable the latch interrupt from selected module |
| (b_ModulNbr). |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: Interrupt routine not installed see function |
| "i_APCI1710_SetBoardIntRoutine" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_DisableLatchInterrupt(struct comedi_device *dev, unsigned char b_ModulNbr)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/***************************/
/* Write the configuration */
/***************************/
outl(devpriv->s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
dw_ModeRegister1_2_3_4 &
((APCI1710_DISABLE_LATCH_INT << 8) | 0xFF),
devpriv->s_BoardInfos.ui_Address + 20 +
(64 * b_ModulNbr));
mdelay(1000);
/*********************/
/* Disable interrupt */
/*********************/
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2 = devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2 & APCI1710_DISABLE_LATCH_INT;
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_Write16BitCounterValue |
| (unsigned char_ b_BoardHandle |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_SelectedCounter, |
| unsigned int_ ui_WriteValue) |
+----------------------------------------------------------------------------+
| Task : Write a 16-Bit value (ui_WriteValue) in to the selected|
| 16-Bit counter (b_SelectedCounter) from selected module|
| (b_ModulNbr). |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
| unsigned char_ b_SelectedCounter : Selected 16-Bit counter |
| (0 or 1) |
| unsigned int_ ui_WriteValue : 16-Bit write value |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: The selected 16-Bit counter parameter is wrong |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_Write16BitCounterValue(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char b_SelectedCounter, unsigned int ui_WriteValue)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/******************************/
/* Test the counter selection */
/******************************/
if (b_SelectedCounter < 2) {
/*******************/
/* Write the value */
/*******************/
outl((unsigned int) ((unsigned int) (ui_WriteValue) << (16 *
b_SelectedCounter)),
devpriv->s_BoardInfos.ui_Address + 8 +
(b_SelectedCounter * 4) +
(64 * b_ModulNbr));
} else {
/**************************************************/
/* The selected 16-Bit counter parameter is wrong */
/**************************************************/
DPRINTK("The selected 16-Bit counter parameter is wrong\n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_Write32BitCounterValue |
| (unsigned char_ b_BoardHandle |
| unsigned char_ b_ModulNbr, |
| ULONG_ ul_WriteValue) |
+----------------------------------------------------------------------------+
| Task : Write a 32-Bit value (ui_WriteValue) in to the selected|
| module (b_ModulNbr). |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
| ULONG_ ul_WriteValue : 32-Bit write value |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_Write32BitCounterValue(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned int ul_WriteValue)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/*******************/
/* Write the value */
/*******************/
outl(ul_WriteValue, devpriv->s_BoardInfos.
ui_Address + 4 + (64 * b_ModulNbr));
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_EnableIndex (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr) |
+----------------------------------------------------------------------------+
| Task : Enable the INDEX actions |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: Index not initialised see function |
| "i_APCI1710_InitIndex" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_EnableIndex(struct comedi_device *dev, unsigned char b_ModulNbr)
{
int i_ReturnValue = 0;
unsigned int ul_InterruptLatchReg;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/*****************************/
/* Test if index initialised */
/*****************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_IndexInit) {
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2 = devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2 | APCI1710_ENABLE_INDEX;
ul_InterruptLatchReg =
inl(devpriv->s_BoardInfos.ui_Address +
24 + (64 * b_ModulNbr));
outl(devpriv->s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
dw_ModeRegister1_2_3_4,
devpriv->s_BoardInfos.ui_Address + 20 +
(64 * b_ModulNbr));
} else {
/*************************************************************/
/* Index not initialised see function "i_APCI1710_InitIndex" */
/*************************************************************/
DPRINTK("Index not initialised \n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_DisableIndex (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr) |
+----------------------------------------------------------------------------+
| Task : Disable the INDEX actions |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: Index not initialised see function |
| "i_APCI1710_InitIndex" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_DisableIndex(struct comedi_device *dev, unsigned char b_ModulNbr)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/*****************************/
/* Test if index initialised */
/*****************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_IndexInit) {
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2 = devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister2 &
APCI1710_DISABLE_INDEX;
outl(devpriv->s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
dw_ModeRegister1_2_3_4,
devpriv->s_BoardInfos.ui_Address + 20 +
(64 * b_ModulNbr));
} else {
/*************************************************************/
/* Index not initialised see function "i_APCI1710_InitIndex" */
/*************************************************************/
DPRINTK("Index not initialised \n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_EnableCompareLogic |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr) |
+----------------------------------------------------------------------------+
| Task : Enable the 32-Bit compare logic. At that moment that |
| the incremental counter arrive to the compare value a |
| interrupt is generated. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : -
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: Compare logic not initialised. |
| See function "i_APCI1710_InitCompareLogic" |
| -5: Interrupt function not initialised. |
| See function "i_APCI1710_SetBoardIntRoutineX" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_EnableCompareLogic(struct comedi_device *dev, unsigned char b_ModulNbr)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/*************************************/
/* Test if compare logic initialised */
/*************************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.b_CompareLogicInit == 1) {
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3 = devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3 |
APCI1710_ENABLE_COMPARE_INT;
/***************************/
/* Write the configuration */
/***************************/
outl(devpriv->s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
dw_ModeRegister1_2_3_4,
devpriv->s_BoardInfos.ui_Address + 20 +
(64 * b_ModulNbr));
} else {
/*********************************/
/* Compare logic not initialised */
/*********************************/
DPRINTK("Compare logic not initialised\n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_DisableCompareLogic |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr) |
+----------------------------------------------------------------------------+
| Task : Disable the 32-Bit compare logic.
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : -
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: Compare logic not initialised. |
| See function "i_APCI1710_InitCompareLogic" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_DisableCompareLogic(struct comedi_device *dev, unsigned char b_ModulNbr)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/*************************************/
/* Test if compare logic initialised */
/*************************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.b_CompareLogicInit == 1) {
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3 = devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3 &
APCI1710_DISABLE_COMPARE_INT;
/***************************/
/* Write the configuration */
/***************************/
outl(devpriv->s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
dw_ModeRegister1_2_3_4,
devpriv->s_BoardInfos.ui_Address + 20 +
(64 * b_ModulNbr));
} else {
/*********************************/
/* Compare logic not initialised */
/*********************************/
DPRINTK("Compare logic not initialised\n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_EnableFrequencyMeasurement |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_InterruptEnable) |
+----------------------------------------------------------------------------+
| Task : Enables the frequency measurement function |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Number of the module to be |
| configured (0 to 3) |
| unsigned char_ b_InterruptEnable: Enable or disable the |
| interrupt. |
| APCI1710_ENABLE: |
| Enable the interrupt |
| APCI1710_DISABLE: |
| Disable the interrupt |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: The selected module number is wrong |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: Frequency measurement logic not initialised. |
| See function "i_APCI1710_InitFrequencyMeasurement" |
| -5: Interrupt parameter is wrong |
| -6: Interrupt function not initialised. |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_EnableFrequencyMeasurement(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char b_InterruptEnable)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/********************************************/
/* Test if frequency measurement initialised */
/********************************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.b_FrequencyMeasurementInit == 1) {
/***************************/
/* Test the interrupt mode */
/***************************/
if ((b_InterruptEnable == APCI1710_DISABLE) ||
(b_InterruptEnable == APCI1710_ENABLE))
{
/************************************/
/* Enable the frequency measurement */
/************************************/
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3 = devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3 |
APCI1710_ENABLE_FREQUENCY;
/*********************************************/
/* Disable or enable the frequency interrupt */
/*********************************************/
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3 = (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3 &
APCI1710_DISABLE_FREQUENCY_INT)
| (b_InterruptEnable << 3);
/***************************/
/* Write the configuration */
/***************************/
outl(devpriv->s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
dw_ModeRegister1_2_3_4,
devpriv->s_BoardInfos.
ui_Address + 20 +
(64 * b_ModulNbr));
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.
b_FrequencyMeasurementEnable =
1;
} else {
/********************************/
/* Interrupt parameter is wrong */
/********************************/
DPRINTK("Interrupt parameter is wrong\n");
i_ReturnValue = -5;
}
} else {
/***********************************************/
/* Frequency measurement logic not initialised */
/***********************************************/
DPRINTK("Frequency measurement logic not initialised\n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_DisableFrequencyMeasurement |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr) |
+----------------------------------------------------------------------------+
| Task : Disables the frequency measurement function |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Number of the module to be |
| configured (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: The selected module number is wrong |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: Frequency measurement logic not initialised. |
| See function "i_APCI1710_InitFrequencyMeasurement" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_DisableFrequencyMeasurement(struct comedi_device *dev, unsigned char b_ModulNbr)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/********************************************/
/* Test if frequency measurement initialised */
/********************************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.b_FrequencyMeasurementInit == 1) {
/*************************************/
/* Disable the frequency measurement */
/*************************************/
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3 = devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister3 &
APCI1710_DISABLE_FREQUENCY
/* Begin CG 29/06/01 CG 1100/0231 -> 0701/0232 Frequence measure IRQ must be cleared */
& APCI1710_DISABLE_FREQUENCY_INT;
/* End CG 29/06/01 CG 1100/0231 -> 0701/0232 Frequence measure IRQ must be cleared */
/***************************/
/* Write the configuration */
/***************************/
outl(devpriv->s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
dw_ModeRegister1_2_3_4,
devpriv->s_BoardInfos.ui_Address + 20 +
(64 * b_ModulNbr));
/*************************************/
/* Disable the frequency measurement */
/*************************************/
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.
b_FrequencyMeasurementEnable = 0;
} else {
/***********************************************/
/* Frequency measurement logic not initialised */
/***********************************************/
DPRINTK("Frequency measurement logic not initialised\n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*########################################################################### */
/* INSN READ */
/*########################################################################### */
/*
+----------------------------------------------------------------------------+
| Function Name :INT i_APCI1710_InsnWriteINCCPT(struct comedi_device *dev,struct comedi_subdevice *s,
struct comedi_insn *insn,unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Read and Get functions for INC_CPT |
+----------------------------------------------------------------------------+
| Input Parameters :
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value :
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InsnReadINCCPT(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int ui_ReadType;
int i_ReturnValue = 0;
ui_ReadType = CR_CHAN(insn->chanspec);
devpriv->tsk_Current = current; /* Save the current process task structure */
switch (ui_ReadType) {
case APCI1710_INCCPT_READLATCHREGISTERSTATUS:
i_ReturnValue = i_APCI1710_ReadLatchRegisterStatus(dev,
(unsigned char) CR_AREF(insn->chanspec),
(unsigned char) CR_RANGE(insn->chanspec), (unsigned char *) &data[0]);
break;
case APCI1710_INCCPT_READLATCHREGISTERVALUE:
i_ReturnValue = i_APCI1710_ReadLatchRegisterValue(dev,
(unsigned char) CR_AREF(insn->chanspec),
(unsigned char) CR_RANGE(insn->chanspec), (unsigned int *) &data[0]);
printk("Latch Register Value %d\n", data[0]);
break;
case APCI1710_INCCPT_READ16BITCOUNTERVALUE:
i_ReturnValue = i_APCI1710_Read16BitCounterValue(dev,
(unsigned char) CR_AREF(insn->chanspec),
(unsigned char) CR_RANGE(insn->chanspec), (unsigned int *) &data[0]);
break;
case APCI1710_INCCPT_READ32BITCOUNTERVALUE:
i_ReturnValue = i_APCI1710_Read32BitCounterValue(dev,
(unsigned char) CR_AREF(insn->chanspec), (unsigned int *) &data[0]);
break;
case APCI1710_INCCPT_GETINDEXSTATUS:
i_ReturnValue = i_APCI1710_GetIndexStatus(dev,
(unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
break;
case APCI1710_INCCPT_GETREFERENCESTATUS:
i_ReturnValue = i_APCI1710_GetReferenceStatus(dev,
(unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
break;
case APCI1710_INCCPT_GETUASSTATUS:
i_ReturnValue = i_APCI1710_GetUASStatus(dev,
(unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
break;
case APCI1710_INCCPT_GETCBSTATUS:
i_ReturnValue = i_APCI1710_GetCBStatus(dev,
(unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
break;
case APCI1710_INCCPT_GET16BITCBSTATUS:
i_ReturnValue = i_APCI1710_Get16BitCBStatus(dev,
(unsigned char) CR_AREF(insn->chanspec),
(unsigned char *) &data[0], (unsigned char *) &data[1]);
break;
case APCI1710_INCCPT_GETUDSTATUS:
i_ReturnValue = i_APCI1710_GetUDStatus(dev,
(unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
break;
case APCI1710_INCCPT_GETINTERRUPTUDLATCHEDSTATUS:
i_ReturnValue = i_APCI1710_GetInterruptUDLatchedStatus(dev,
(unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
break;
case APCI1710_INCCPT_READFREQUENCYMEASUREMENT:
i_ReturnValue = i_APCI1710_ReadFrequencyMeasurement(dev,
(unsigned char) CR_AREF(insn->chanspec),
(unsigned char *) &data[0],
(unsigned char *) &data[1], (unsigned int *) &data[2]);
break;
case APCI1710_INCCPT_READINTERRUPT:
data[0] = devpriv->s_InterruptParameters.
s_FIFOInterruptParameters[devpriv->
s_InterruptParameters.ui_Read].b_OldModuleMask;
data[1] = devpriv->s_InterruptParameters.
s_FIFOInterruptParameters[devpriv->
s_InterruptParameters.ui_Read].ul_OldInterruptMask;
data[2] = devpriv->s_InterruptParameters.
s_FIFOInterruptParameters[devpriv->
s_InterruptParameters.ui_Read].ul_OldCounterLatchValue;
/**************************/
/* Increment the read FIFO */
/***************************/
devpriv->
s_InterruptParameters.
ui_Read = (devpriv->s_InterruptParameters.
ui_Read + 1) % APCI1710_SAVE_INTERRUPT;
break;
default:
printk("ReadType Parameter wrong\n");
}
if (i_ReturnValue >= 0)
i_ReturnValue = insn->n;
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_ReadLatchRegisterStatus |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_LatchReg, |
| unsigned char *_ pb_LatchStatus) |
+----------------------------------------------------------------------------+
| Task : Read the latch register status from selected module |
| (b_ModulNbr) and selected latch register (b_LatchReg). |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
| unsigned char_ b_LatchReg : Selected latch register |
| 0 : for the first latch register |
| 1 : for the second latch register |
+----------------------------------------------------------------------------+
| Output Parameters : unsigned char *_ pb_LatchStatus : Latch register status. |
| 0 : No latch occur |
| 1 : A software latch occur |
| 2 : A hardware latch occur |
| 3 : A software and hardware |
| latch occur |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: The selected latch register parameter is wrong |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_ReadLatchRegisterStatus(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char b_LatchReg, unsigned char *pb_LatchStatus)
{
int i_ReturnValue = 0;
unsigned int dw_LatchReg;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/*************************************/
/* Test the latch register parameter */
/*************************************/
if (b_LatchReg < 2) {
dw_LatchReg = inl(devpriv->s_BoardInfos.
ui_Address + (64 * b_ModulNbr));
*pb_LatchStatus =
(unsigned char) ((dw_LatchReg >> (b_LatchReg *
4)) & 0x3);
} else {
/**************************************************/
/* The selected latch register parameter is wrong */
/**************************************************/
DPRINTK("The selected latch register parameter is wrong\n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_ReadLatchRegisterValue |
| (unsigned char_ b_BoardHandle,|
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_LatchReg, |
| PULONG_ pul_LatchValue) |
+----------------------------------------------------------------------------+
| Task : Read the latch register value from selected module |
| (b_ModulNbr) and selected latch register (b_LatchReg). |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
| unsigned char_ b_LatchReg : Selected latch register |
| 0 : for the first latch register |
| 1 : for the second latch register |
+----------------------------------------------------------------------------+
| Output Parameters : PULONG_ pul_LatchValue : Latch register value |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: The selected latch register parameter is wrong |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_ReadLatchRegisterValue(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char b_LatchReg, unsigned int *pul_LatchValue)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/*************************************/
/* Test the latch register parameter */
/*************************************/
if (b_LatchReg < 2) {
*pul_LatchValue = inl(devpriv->s_BoardInfos.
ui_Address + ((b_LatchReg + 1) * 4) +
(64 * b_ModulNbr));
} else {
/**************************************************/
/* The selected latch register parameter is wrong */
/**************************************************/
DPRINTK("The selected latch register parameter is wrong\n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_Read16BitCounterValue |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_SelectedCounter, |
| unsigned int *_ pui_CounterValue) |
+----------------------------------------------------------------------------+
| Task : Latch the selected 16-Bit counter (b_SelectedCounter) |
| from selected module (b_ModulNbr) in to the first |
| latch register and return the latched value. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
| unsigned char_ b_SelectedCounter : Selected 16-Bit counter |
| (0 or 1) |
+----------------------------------------------------------------------------+
| Output Parameters : unsigned int *_ pui_CounterValue : 16-Bit counter value |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: The selected 16-Bit counter parameter is wrong |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_Read16BitCounterValue(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char b_SelectedCounter, unsigned int *pui_CounterValue)
{
int i_ReturnValue = 0;
unsigned int dw_LathchValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/******************************/
/* Test the counter selection */
/******************************/
if (b_SelectedCounter < 2) {
/*********************/
/* Latch the counter */
/*********************/
outl(1, devpriv->s_BoardInfos.
ui_Address + (64 * b_ModulNbr));
/************************/
/* Read the latch value */
/************************/
dw_LathchValue = inl(devpriv->s_BoardInfos.
ui_Address + 4 + (64 * b_ModulNbr));
*pui_CounterValue =
(unsigned int) ((dw_LathchValue >> (16 *
b_SelectedCounter)) &
0xFFFFU);
} else {
/**************************************************/
/* The selected 16-Bit counter parameter is wrong */
/**************************************************/
DPRINTK("The selected 16-Bit counter parameter is wrong\n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_Read32BitCounterValue |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| PULONG_ pul_CounterValue) |
+----------------------------------------------------------------------------+
| Task : Latch the 32-Bit counter from selected module |
| (b_ModulNbr) in to the first latch register and return |
| the latched value. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : PULONG_ pul_CounterValue : 32-Bit counter value |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_Read32BitCounterValue(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned int *pul_CounterValue)
{
int i_ReturnValue = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/*********************/
/* Tatch the counter */
/*********************/
outl(1, devpriv->s_BoardInfos.
ui_Address + (64 * b_ModulNbr));
/************************/
/* Read the latch value */
/************************/
*pul_CounterValue = inl(devpriv->s_BoardInfos.
ui_Address + 4 + (64 * b_ModulNbr));
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_GetIndexStatus (unsigned char_ b_BoardHandle,|
| unsigned char_ b_ModulNbr, |
| unsigned char *_ pb_IndexStatus)|
+----------------------------------------------------------------------------+
| Task : Return the index status |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : unsigned char *_ pb_IndexStatus : 0 : No INDEX occur |
| 1 : A INDEX occur |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: Index not initialised see function |
| "i_APCI1710_InitIndex" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_GetIndexStatus(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char *pb_IndexStatus)
{
int i_ReturnValue = 0;
unsigned int dw_StatusReg = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/*****************************/
/* Test if index initialised */
/*****************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_IndexInit) {
dw_StatusReg = inl(devpriv->s_BoardInfos.
ui_Address + 12 + (64 * b_ModulNbr));
*pb_IndexStatus = (unsigned char) (dw_StatusReg & 1);
} else {
/*************************************************************/
/* Index not initialised see function "i_APCI1710_InitIndex" */
/*************************************************************/
DPRINTK("Index not initialised\n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_GetReferenceStatus |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char *_ pb_ReferenceStatus) |
+----------------------------------------------------------------------------+
| Task : Return the reference status |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : unsigned char *_ pb_ReferenceStatus : 0 : No REFERENCE occur |
| 1 : A REFERENCE occur |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: Reference not initialised see function |
| "i_APCI1710_InitReference" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_GetReferenceStatus(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char *pb_ReferenceStatus)
{
int i_ReturnValue = 0;
unsigned int dw_StatusReg = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/*********************************/
/* Test if reference initialised */
/*********************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.b_ReferenceInit) {
dw_StatusReg = inl(devpriv->s_BoardInfos.
ui_Address + 24 + (64 * b_ModulNbr));
*pb_ReferenceStatus =
(unsigned char) (~dw_StatusReg & 1);
} else {
/*********************************************************************/
/* Reference not initialised see function "i_APCI1710_InitReference" */
/*********************************************************************/
DPRINTK("Reference not initialised\n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_GetUASStatus |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char *_ pb_UASStatus) |
+----------------------------------------------------------------------------+
| Task : Return the error signal (UAS) status |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : unsigned char *_ pb_UASStatus : 0 : UAS is low "0" |
| 1 : UAS is high "1" |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_GetUASStatus(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char *pb_UASStatus)
{
int i_ReturnValue = 0;
unsigned int dw_StatusReg = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
dw_StatusReg = inl(devpriv->s_BoardInfos.
ui_Address + 24 + (64 * b_ModulNbr));
*pb_UASStatus = (unsigned char) ((dw_StatusReg >> 1) & 1);
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_GetCBStatus |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char *_ pb_CBStatus) |
+----------------------------------------------------------------------------+
| Task : Return the counter overflow status |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : unsigned char *_ pb_CBStatus : 0 : Counter no overflow |
| 1 : Counter overflow |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_GetCBStatus(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char *pb_CBStatus)
{
int i_ReturnValue = 0;
unsigned int dw_StatusReg = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
dw_StatusReg = inl(devpriv->s_BoardInfos.
ui_Address + 16 + (64 * b_ModulNbr));
*pb_CBStatus = (unsigned char) (dw_StatusReg & 1);
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_Get16BitCBStatus |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char *_ pb_CBStatusCounter0, |
| unsigned char *_ pb_CBStatusCounter1) |
+----------------------------------------------------------------------------+
| Task : Returns the counter overflow (counter initialised to |
| 2*16-bit) status from selected incremental counter |
| module |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : unsigned char *_ pb_CBStatusCounter0 : 0 : No overflow occur for |
| the first 16-bit |
| counter |
| 1 : Overflow occur for the|
| first 16-bit counter |
| unsigned char *_ pb_CBStatusCounter1 : 0 : No overflow occur for |
| the second 16-bit |
| counter |
| 1 : Overflow occur for the|
| second 16-bit counter |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: Counter not initialised to 2*16-bit mode. |
| See function "i_APCI1710_InitCounter" |
| -5: Firmware revision error |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_Get16BitCBStatus(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char *pb_CBStatusCounter0, unsigned char *pb_CBStatusCounter1)
{
int i_ReturnValue = 0;
unsigned int dw_StatusReg = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/*************************/
/* Test if 2*16-Bit mode */
/*************************/
if ((devpriv->s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_ModeRegister.
s_ByteModeRegister.
b_ModeRegister1 & 0x10) == 0x10) {
/*****************************/
/* Test the Firmware version */
/*****************************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration
[b_ModulNbr] & 0xFFFF) >=
0x3136) {
dw_StatusReg =
inl(devpriv->s_BoardInfos.
ui_Address + 16 +
(64 * b_ModulNbr));
*pb_CBStatusCounter1 =
(unsigned char) ((dw_StatusReg >> 0) &
1);
*pb_CBStatusCounter0 =
(unsigned char) ((dw_StatusReg >> 1) &
1);
} /* if ((ps_APCI1710Variable->s_Board [b_BoardHandle].s_BoardInfos.dw_MolduleConfiguration [b_ModulNbr] & 0xFFFF) >= 0x3136) */
else {
/****************************/
/* Firmware revision error */
/****************************/
i_ReturnValue = -5;
} /* if ((ps_APCI1710Variable->s_Board [b_BoardHandle].s_BoardInfos.dw_MolduleConfiguration [b_ModulNbr] & 0xFFFF) >= 0x3136) */
} /* if ((ps_APCI1710Variable->s_Board [b_BoardHandle].s_ModuleInfo [b_ModulNbr].s_SiemensCounterInfo.s_ModeRegister.s_ByteModeRegister.b_ModeRegister1 & 0x10) == 0x10) */
else {
/********************************************/
/* Counter not initialised to 2*16-bit mode */
/* "i_APCI1710_InitCounter" */
/********************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -4;
} /* if ((ps_APCI1710Variable->s_Board [b_BoardHandle].s_ModuleInfo [b_ModulNbr].s_SiemensCounterInfo.s_ModeRegister.s_ByteModeRegister.b_ModeRegister1 & 0x10) == 0x10) */
} /* if (ps_APCI1710Variable->s_Board [b_BoardHandle].s_ModuleInfo [b_ModulNbr].s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) */
else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
} /* if (ps_APCI1710Variable->s_Board [b_BoardHandle].s_ModuleInfo [b_ModulNbr].s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) */
} /* if (b_ModulNbr < 4) */
else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
} /* if (b_ModulNbr < 4) */
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_GetUDStatus |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char *_ pb_UDStatus) |
+----------------------------------------------------------------------------+
| Task : Return the counter progress status |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : unsigned char *_ pb_UDStatus : 0 : Counter progress in the |
| selected mode down |
| 1 : Counter progress in the |
| selected mode up |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_GetUDStatus(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char *pb_UDStatus)
{
int i_ReturnValue = 0;
unsigned int dw_StatusReg = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
dw_StatusReg = inl(devpriv->s_BoardInfos.
ui_Address + 24 + (64 * b_ModulNbr));
*pb_UDStatus = (unsigned char) ((dw_StatusReg >> 2) & 1);
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_GetInterruptUDLatchedStatus |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char *_ pb_UDStatus) |
+----------------------------------------------------------------------------+
| Task : Return the counter progress latched status after a |
| index interrupt occur. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : unsigned char *_ pb_UDStatus : 0 : Counter progress in the |
| selected mode down |
| 1 : Counter progress in the |
| selected mode up |
| 2 : No index interrupt occur |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: No counter module found |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: Interrupt function not initialised. |
| See function "i_APCI1710_SetBoardIntRoutineX" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_GetInterruptUDLatchedStatus(struct comedi_device *dev,
unsigned char b_ModulNbr, unsigned char *pb_UDStatus)
{
int i_ReturnValue = 0;
unsigned int dw_StatusReg = 0;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/*********************************/
/* Test if index interrupt occur */
/*********************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.b_IndexInterruptOccur == 1) {
devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.b_IndexInterruptOccur = 0;
dw_StatusReg = inl(devpriv->s_BoardInfos.
ui_Address + 12 + (64 * b_ModulNbr));
*pb_UDStatus = (unsigned char) ((dw_StatusReg >> 1) & 1);
} else {
/****************************/
/* No index interrupt occur */
/****************************/
*pb_UDStatus = 2;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_ReadFrequencyMeasurement |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char *_ pb_Status, |
| PULONG_ pul_ReadValue) |
+----------------------------------------------------------------------------+
| Task : Returns the status (pb_Status) and the number of |
| increments in the set time. |
| See function " i_APCI1710_InitFrequencyMeasurement " |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Number of the module to be |
| configured (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : unsigned char *_ pb_Status : Returns the frequency |
| measurement status |
| 0 : Counting cycle not |
| started. |
| 1 : Counting cycle started. |
| 2 : Counting cycle stopped. |
| The measurement cycle is |
| completed. |
| unsigned char *_ pb_UDStatus : 0 : Counter progress in the |
| selected mode down |
| 1 : Counter progress in the |
| selected mode up |
| PULONG_ pul_ReadValue : Return the number of |
| increments in the defined |
| time base. |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: The selected module number is wrong |
| -3: Counter not initialised see function |
| "i_APCI1710_InitCounter" |
| -4: Frequency measurement logic not initialised. |
| See function "i_APCI1710_InitFrequencyMeasurement" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_ReadFrequencyMeasurement(struct comedi_device *dev,
unsigned char b_ModulNbr,
unsigned char *pb_Status, unsigned char *pb_UDStatus, unsigned int *pul_ReadValue)
{
int i_ReturnValue = 0;
unsigned int ui_16BitValue;
unsigned int dw_StatusReg;
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/*******************************/
/* Test if counter initialised */
/*******************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
/********************************************/
/* Test if frequency measurement initialised */
/********************************************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.b_FrequencyMeasurementInit == 1) {
/******************/
/* Test if enable */
/******************/
if (devpriv->
s_ModuleInfo[b_ModulNbr].
s_SiemensCounterInfo.
s_InitFlag.
b_FrequencyMeasurementEnable == 1) {
/*******************/
/* Read the status */
/*******************/
dw_StatusReg =
inl(devpriv->s_BoardInfos.
ui_Address + 32 +
(64 * b_ModulNbr));
/**************************/
/* Test if frequency stop */
/**************************/
if (dw_StatusReg & 1) {
*pb_Status = 2;
*pb_UDStatus =
(unsigned char) ((dw_StatusReg >>
1) & 3);
/******************/
/* Read the value */
/******************/
*pul_ReadValue =
inl(devpriv->
s_BoardInfos.
ui_Address + 28 +
(64 * b_ModulNbr));
if (*pb_UDStatus == 0) {
/*************************/
/* Test the counter mode */
/*************************/
if ((devpriv->s_ModuleInfo[b_ModulNbr].s_SiemensCounterInfo.s_ModeRegister.s_ByteModeRegister.b_ModeRegister1 & APCI1710_16BIT_COUNTER) == APCI1710_16BIT_COUNTER) {
/****************************************/
/* Test if 16-bit counter 1 pulse occur */
/****************************************/
if ((*pul_ReadValue & 0xFFFFU) != 0) {
ui_16BitValue
=
(unsigned int)
*
pul_ReadValue
&
0xFFFFU;
*pul_ReadValue
=
(*pul_ReadValue
&
0xFFFF0000UL)
|
(0xFFFFU
-
ui_16BitValue);
}
/****************************************/
/* Test if 16-bit counter 2 pulse occur */
/****************************************/
if ((*pul_ReadValue & 0xFFFF0000UL) != 0) {
ui_16BitValue
=
(unsigned int)
(
(*pul_ReadValue
>>
16)
&
0xFFFFU);
*pul_ReadValue
=
(*pul_ReadValue
&
0xFFFFUL)
|
(
(0xFFFFU - ui_16BitValue) << 16);
}
} else {
if (*pul_ReadValue != 0) {
*pul_ReadValue
=
0xFFFFFFFFUL
-
*pul_ReadValue;
}
}
} else {
if (*pb_UDStatus == 1) {
/****************************************/
/* Test if 16-bit counter 2 pulse occur */
/****************************************/
if ((*pul_ReadValue & 0xFFFF0000UL) != 0) {
ui_16BitValue
=
(unsigned int)
(
(*pul_ReadValue
>>
16)
&
0xFFFFU);
*pul_ReadValue
=
(*pul_ReadValue
&
0xFFFFUL)
|
(
(0xFFFFU - ui_16BitValue) << 16);
}
} else {
if (*pb_UDStatus
== 2) {
/****************************************/
/* Test if 16-bit counter 1 pulse occur */
/****************************************/
if ((*pul_ReadValue & 0xFFFFU) != 0) {
ui_16BitValue
=
(unsigned int)
*
pul_ReadValue
&
0xFFFFU;
*pul_ReadValue
=
(*pul_ReadValue
&
0xFFFF0000UL)
|
(0xFFFFU
-
ui_16BitValue);
}
}
}
}
} else {
*pb_Status = 1;
*pb_UDStatus = 0;
}
} else {
*pb_Status = 0;
*pb_UDStatus = 0;
}
} else {
/***********************************************/
/* Frequency measurement logic not initialised */
/***********************************************/
DPRINTK("Frequency measurement logic not initialised\n");
i_ReturnValue = -4;
}
} else {
/****************************************/
/* Counter not initialised see function */
/* "i_APCI1710_InitCounter" */
/****************************************/
DPRINTK("Counter not initialised\n");
i_ReturnValue = -3;
}
} else {
/*************************************************/
/* The selected module number parameter is wrong */
/*************************************************/
DPRINTK("The selected module number parameter is wrong\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
| gpl-2.0 |
profglavcho/mt6577-kernel-3.10.65 | drivers/misc/mediatek/thermal/mt6580/mtk_ts_pa.c | 1 | 15461 | #include <linux/version.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/thermal.h>
#include <linux/platform_device.h>
#include <linux/aee.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/syscalls.h>
#include <linux/sched.h>
#include <linux/writeback.h>
#include <asm/uaccess.h>
#include <asm/string.h>
#include <linux/spinlock.h>
#include "mach/mtk_thermal_monitor.h"
#include "mach/mt_typedefs.h"
#include "mach/mt_thermal.h"
#include "mach/mtk_mdm_monitor.h"
extern struct proc_dir_entry * mtk_thermal_get_proc_drv_therm_dir_entry(void);
static unsigned int interval = 0; /* seconds, 0 : no auto polling */
static unsigned int trip_temp[10] = {85000,80000,70000,60000,50000,40000,30000,20000,10000,5000};
static int g_THERMAL_TRIP[10] = {0,0,0,0,0,0,0,0,0,0};
static unsigned int cl_dev_sysrst_state = 0;
static struct thermal_zone_device *thz_dev;
static struct thermal_cooling_device *cl_dev_sysrst;
static int mtktspa_debug_log = 0;
static int kernelmode = 0;
static int num_trip=0;
static char g_bind0[20]="mtktspa-sysrst";
static char g_bind1[20]={0};
static char g_bind2[20]={0};
static char g_bind3[20]={0};
static char g_bind4[20]={0};
static char g_bind5[20]={0};
static char g_bind6[20]={0};
static char g_bind7[20]={0};
static char g_bind8[20]={0};
static char g_bind9[20]={0};
/**
* If curr_temp >= polling_trip_temp1, use interval
* else if cur_temp >= polling_trip_temp2 && curr_temp < polling_trip_temp1, use interval*polling_factor1
* else, use interval*polling_factor2
*/
static int polling_trip_temp1 = 40000;
static int polling_trip_temp2 = 20000;
static int polling_factor1 = 5000;
static int polling_factor2 = 10000;
#define mtktspa_TEMP_CRIT 85000 /* 85.000 degree Celsius */
#define mtktspa_dprintk(fmt, args...) \
do { \
if (mtktspa_debug_log) { \
pr_notice("Power/PA_Thermal" fmt, ##args); \
} \
} while(0)
/*
struct md_info{
char *attribute;
int value;
char *unit;
int invalid_value;
int index;
};
struct md_info g_pinfo_list[] =
{{"TXPWR_MD1", -127, "db", -127, 0},
{"TXPWR_MD2", -127, "db", -127, 1},
{"RFTEMP_2G_MD1", -32767, "¢XC", -32767, 2},
{"RFTEMP_2G_MD2", -32767, "¢XC", -32767, 3},
{"RFTEMP_3G_MD1", -32767, "¢XC", -32767, 4},
{"RFTEMP_3G_MD2", -32767, "¢XC", -32767, 5}};
*/
static DEFINE_MUTEX(TSPA_lock);
static int mtktspa_get_hw_temp(void)
{
struct md_info *p_info;
int size, i;
mutex_lock(&TSPA_lock);
mtk_mdm_get_md_info(&p_info, &size);
for(i=0; i<size; i++)
{
mtktspa_dprintk("PA temperature: name:%s, vaule:%d, invalid_value=%d \n",p_info[i].attribute, p_info[i].value, p_info[i].invalid_value);
if(!strcmp(p_info[i].attribute, "RFTEMP_2G_MD1"))
{
mtktspa_dprintk("PA temperature: RFTEMP_2G_MD1\n");
if(p_info[i].value != p_info[i].invalid_value)
break;
}
else if(!strcmp(p_info[i].attribute, "RFTEMP_3G_MD1"))
{
mtktspa_dprintk("PA temperature: RFTEMP_3G_MD1\n");
if(p_info[i].value != p_info[i].invalid_value)
break;
}
}
if(i==size)
{
mtktspa_dprintk("PA temperature: not ready\n");
mutex_unlock(&TSPA_lock);
return -127000;
}
else
{
mtktspa_dprintk("PA temperature: %d\n",p_info[i].value);
if((p_info[i].value>100000) || (p_info[i].value<-30000))
printk("[Power/PA_Thermal] PA T=%d\n",p_info[i].value);
mutex_unlock(&TSPA_lock);
return (p_info[i].value);
}
}
static int mtktspa_get_temp(struct thermal_zone_device *thermal,
unsigned long *t)
{
*t = mtktspa_get_hw_temp();
if ((int) *t >= polling_trip_temp1)
thermal->polling_delay = interval*1000;
else if ((int) *t < polling_trip_temp2)
thermal->polling_delay = interval * polling_factor2;
else
thermal->polling_delay = interval * polling_factor1;
return 0;
}
static int mtktspa_bind(struct thermal_zone_device *thermal,
struct thermal_cooling_device *cdev)
{
int table_val=0;
if(!strcmp(cdev->type, g_bind0))
{
table_val = 0;
mtktspa_dprintk("[mtktspa_bind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind1))
{
table_val = 1;
mtktspa_dprintk("[mtktspa_bind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind2))
{
table_val = 2;
mtktspa_dprintk("[mtktspa_bind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind3))
{
table_val = 3;
mtktspa_dprintk("[mtktspa_bind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind4))
{
table_val = 4;
mtktspa_dprintk("[mtktspa_bind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind5))
{
table_val = 5;
mtktspa_dprintk("[mtktspa_bind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind6))
{
table_val = 6;
mtktspa_dprintk("[mtktspa_bind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind7))
{
table_val = 7;
mtktspa_dprintk("[mtktspa_bind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind8))
{
table_val = 8;
mtktspa_dprintk("[mtktspa_bind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind9))
{
table_val = 9;
mtktspa_dprintk("[mtktspa_bind] %s\n", cdev->type);
}
else
return 0;
if (mtk_thermal_zone_bind_cooling_device(thermal, table_val, cdev))
{
mtktspa_dprintk("[mtktspa_bind] error binding cooling dev\n");
return -EINVAL;
}
else
{
mtktspa_dprintk("[mtktspa_bind] binding OK\n");
}
return 0;
}
static int mtktspa_unbind(struct thermal_zone_device *thermal,
struct thermal_cooling_device *cdev)
{
int table_val=0;
if(!strcmp(cdev->type, g_bind0))
{
table_val = 0;
mtktspa_dprintk("[mtktspa_unbind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind1))
{
table_val = 1;
mtktspa_dprintk("[mtktspa_unbind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind2))
{
table_val = 2;
mtktspa_dprintk("[mtktspa_unbind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind3))
{
table_val = 3;
mtktspa_dprintk("[mtktspa_unbind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind4))
{
table_val = 4;
mtktspa_dprintk("[mtktspa_unbind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind5))
{
table_val = 5;
mtktspa_dprintk("[mtktspa_unbind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind6))
{
table_val = 6;
mtktspa_dprintk("[mtktspa_unbind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind7))
{
table_val = 7;
mtktspa_dprintk("[mtktspa_unbind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind8))
{
table_val = 8;
mtktspa_dprintk("[mtktspa_unbind] %s\n", cdev->type);
}
else if(!strcmp(cdev->type, g_bind9))
{
table_val = 9;
mtktspa_dprintk("[mtktspa_unbind] %s\n", cdev->type);
}
else
return 0;
if (thermal_zone_unbind_cooling_device(thermal, table_val, cdev))
{
mtktspa_dprintk("[mtktspa_unbind] error unbinding cooling dev\n");
return -EINVAL;
}
else
{
mtktspa_dprintk("[mtktspa_unbind] unbinding OK\n");
}
return 0;
}
static int mtktspa_get_mode(struct thermal_zone_device *thermal,
enum thermal_device_mode *mode)
{
*mode = (kernelmode) ? THERMAL_DEVICE_ENABLED
: THERMAL_DEVICE_DISABLED;
return 0;
}
static int mtktspa_set_mode(struct thermal_zone_device *thermal,
enum thermal_device_mode mode)
{
kernelmode = mode;
return 0;
}
static int mtktspa_get_trip_type(struct thermal_zone_device *thermal, int trip,
enum thermal_trip_type *type)
{
*type = g_THERMAL_TRIP[trip];
return 0;
}
static int mtktspa_get_trip_temp(struct thermal_zone_device *thermal, int trip,
unsigned long *temp)
{
*temp = trip_temp[trip];
return 0;
}
static int mtktspa_get_crit_temp(struct thermal_zone_device *thermal,
unsigned long *temperature)
{
*temperature = mtktspa_TEMP_CRIT;
return 0;
}
/* bind callback functions to thermalzone */
static struct thermal_zone_device_ops mtktspa_dev_ops = {
.bind = mtktspa_bind,
.unbind = mtktspa_unbind,
.get_temp = mtktspa_get_temp,
.get_mode = mtktspa_get_mode,
.set_mode = mtktspa_set_mode,
.get_trip_type = mtktspa_get_trip_type,
.get_trip_temp = mtktspa_get_trip_temp,
.get_crit_temp = mtktspa_get_crit_temp,
};
/*
* cooling device callback functions (mtktspa_cooling_sysrst_ops)
* 1 : ON and 0 : OFF
*/
static int tspa_sysrst_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
*state = 1;
return 0;
}
static int tspa_sysrst_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
*state = cl_dev_sysrst_state;
return 0;
}
static int tspa_sysrst_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
cl_dev_sysrst_state = state;
if(cl_dev_sysrst_state == 1)
{
printk("Power/PA_Thermal: reset, reset, reset!!!");
printk("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@");
printk("*****************************************");
printk("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@");
#ifndef CONFIG_ARM64
BUG();
#else
*(unsigned int*) 0x0 = 0xdead; // To trigger data abort to reset the system for thermal protection.
#endif
}
return 0;
}
/* bind fan callbacks to fan device */
static struct thermal_cooling_device_ops mtktspa_cooling_sysrst_ops = {
.get_max_state = tspa_sysrst_get_max_state,
.get_cur_state = tspa_sysrst_get_cur_state,
.set_cur_state = tspa_sysrst_set_cur_state,
};
int mtktspa_register_thermal(void);
void mtktspa_unregister_thermal(void);
static int mtktspa_read(struct seq_file *m, void *v)
{
seq_printf(m, "[ mtktspa_read] trip_0_temp=%d,trip_1_temp=%d,trip_2_temp=%d,trip_3_temp=%d,trip_4_temp=%d,\n\
trip_5_temp=%d,trip_6_temp=%d,trip_7_temp=%d,trip_8_temp=%d,trip_9_temp=%d,\n\
g_THERMAL_TRIP_0=%d,g_THERMAL_TRIP_1=%d,g_THERMAL_TRIP_2=%d,g_THERMAL_TRIP_3=%d,g_THERMAL_TRIP_4=%d,\n\
g_THERMAL_TRIP_5=%d,g_THERMAL_TRIP_6=%d,g_THERMAL_TRIP_7=%d,g_THERMAL_TRIP_8=%d,g_THERMAL_TRIP_9=%d,\n\
cooldev0=%s,cooldev1=%s,cooldev2=%s,cooldev3=%s,cooldev4=%s,\n\
cooldev5=%s,cooldev6=%s,cooldev7=%s,cooldev8=%s,cooldev9=%s,time_ms=%d\n",
trip_temp[0],trip_temp[1],trip_temp[2],trip_temp[3],trip_temp[4],
trip_temp[5],trip_temp[6],trip_temp[7],trip_temp[8],trip_temp[9],
g_THERMAL_TRIP[0],g_THERMAL_TRIP[1],g_THERMAL_TRIP[2],g_THERMAL_TRIP[3],g_THERMAL_TRIP[4],
g_THERMAL_TRIP[5],g_THERMAL_TRIP[6],g_THERMAL_TRIP[7],g_THERMAL_TRIP[8],g_THERMAL_TRIP[9],
g_bind0,g_bind1,g_bind2,g_bind3,g_bind4,g_bind5,g_bind6,g_bind7,g_bind8,g_bind9,
interval*1000);
return 0;
}
static ssize_t mtktspa_write(struct file *file, const char __user *buffer, size_t count, loff_t *data)
{
int len=0,time_msec=0;
int trip[10]={0};
int t_type[10]={0};
int i;
char bind0[20],bind1[20],bind2[20],bind3[20],bind4[20];
char bind5[20],bind6[20],bind7[20],bind8[20],bind9[20];
char desc[512];
len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
if (copy_from_user(desc, buffer, len))
{
return 0;
}
desc[len] = '\0';
if (sscanf(desc, "%d %d %d %s %d %d %s %d %d %s %d %d %s %d %d %s %d %d %s %d %d %s %d %d %s %d %d %s %d %d %s %d",
&num_trip, &trip[0],&t_type[0],bind0, &trip[1],&t_type[1],bind1,
&trip[2],&t_type[2],bind2, &trip[3],&t_type[3],bind3,
&trip[4],&t_type[4],bind4, &trip[5],&t_type[5],bind5,
&trip[6],&t_type[6],bind6, &trip[7],&t_type[7],bind7,
&trip[8],&t_type[8],bind8, &trip[9],&t_type[9],bind9,
&time_msec) == 32)
{
mtktspa_dprintk("[mtktspa_write] mtktspa_unregister_thermal\n");
mtktspa_unregister_thermal();
for(i=0; i<num_trip; i++)
g_THERMAL_TRIP[i] = t_type[i];
g_bind0[0]=g_bind1[0]=g_bind2[0]=g_bind3[0]=g_bind4[0]=g_bind5[0]=g_bind6[0]=g_bind7[0]=g_bind8[0]=g_bind9[0]='\0';
for(i=0; i<20; i++)
{
g_bind0[i]=bind0[i];
g_bind1[i]=bind1[i];
g_bind2[i]=bind2[i];
g_bind3[i]=bind3[i];
g_bind4[i]=bind4[i];
g_bind5[i]=bind5[i];
g_bind6[i]=bind6[i];
g_bind7[i]=bind7[i];
g_bind8[i]=bind8[i];
g_bind9[i]=bind9[i];
}
mtktspa_dprintk("[mtktspa_write] g_THERMAL_TRIP_0=%d,g_THERMAL_TRIP_1=%d,g_THERMAL_TRIP_2=%d,g_THERMAL_TRIP_3=%d,g_THERMAL_TRIP_4=%d,\
g_THERMAL_TRIP_5=%d,g_THERMAL_TRIP_6=%d,g_THERMAL_TRIP_7=%d,g_THERMAL_TRIP_8=%d,g_THERMAL_TRIP_9=%d,\n",
g_THERMAL_TRIP[0],g_THERMAL_TRIP[1],g_THERMAL_TRIP[2],g_THERMAL_TRIP[3],g_THERMAL_TRIP[4],
g_THERMAL_TRIP[5],g_THERMAL_TRIP[6],g_THERMAL_TRIP[7],g_THERMAL_TRIP[8],g_THERMAL_TRIP[9]);
mtktspa_dprintk("[mtktspa_write] cooldev0=%s,cooldev1=%s,cooldev2=%s,cooldev3=%s,cooldev4=%s,\
cooldev5=%s,cooldev6=%s,cooldev7=%s,cooldev8=%s,cooldev9=%s\n",
g_bind0,g_bind1,g_bind2,g_bind3,g_bind4,g_bind5,g_bind6,g_bind7,g_bind8,g_bind9);
for(i=0; i<num_trip; i++)
{
trip_temp[i]=trip[i];
}
interval=time_msec / 1000;
mtktspa_dprintk("[mtktspa_write] trip_0_temp=%d,trip_1_temp=%d,trip_2_temp=%d,trip_3_temp=%d,trip_4_temp=%d,\
trip_5_temp=%d,trip_6_temp=%d,trip_7_temp=%d,trip_8_temp=%d,trip_9_temp=%d,time_ms=%d\n",
trip_temp[0],trip_temp[1],trip_temp[2],trip_temp[3],trip_temp[4],
trip_temp[5],trip_temp[6],trip_temp[7],trip_temp[8],trip_temp[9],interval*1000);
mtktspa_dprintk("[mtktspa_write] mtktspa_register_thermal\n");
mtktspa_register_thermal();
return count;
}
else
{
mtktspa_dprintk("[mtktspa_write] bad argument\n");
}
return -EINVAL;
}
static int mtktspa_open(struct inode *inode, struct file *file)
{
return single_open(file, mtktspa_read, NULL);
}
static const struct file_operations mtktspa_fops = {
.owner = THIS_MODULE,
.open = mtktspa_open,
.read = seq_read,
.llseek = seq_lseek,
.write = mtktspa_write,
.release = single_release,
};
int mtktspa_register_cooler(void)
{
/* cooling devices */
cl_dev_sysrst = mtk_thermal_cooling_device_register("mtktspa-sysrst", NULL,
&mtktspa_cooling_sysrst_ops);
return 0;
}
int mtktspa_register_thermal(void)
{
mtktspa_dprintk("[mtktspa_register_thermal] \n");
/* trips */
thz_dev = mtk_thermal_zone_device_register("mtktspa", num_trip, NULL,
&mtktspa_dev_ops, 0, 0, 0, interval*1000);
mtk_mdm_set_md1_signal_period(interval);
return 0;
}
void mtktspa_unregister_cooler(void)
{
if (cl_dev_sysrst)
{
mtk_thermal_cooling_device_unregister(cl_dev_sysrst);
cl_dev_sysrst = NULL;
}
}
void mtktspa_unregister_thermal(void)
{
mtktspa_dprintk("[mtktspa_unregister_thermal] \n");
if (thz_dev)
{
mtk_thermal_zone_device_unregister(thz_dev);
thz_dev = NULL;
}
}
static int __init mtktspa_init(void)
{
int err = 0;
struct proc_dir_entry *entry = NULL;
struct proc_dir_entry *mtktspa_dir = NULL;
mtktspa_dprintk("[%s]\n", __func__);
err = mtktspa_register_cooler();
if(err)
return err;
err = mtktspa_register_thermal();
if (err)
goto err_unreg;
mtktspa_dir = mtk_thermal_get_proc_drv_therm_dir_entry();
if (!mtktspa_dir)
{
mtktspa_dprintk("[%s]: mkdir /proc/driver/thermal failed\n", __func__);
}
else
{
entry = proc_create("tzpa", S_IRUGO | S_IWUSR | S_IWGRP, mtktspa_dir, &mtktspa_fops);
if (entry) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
proc_set_user(entry, 0, 1000);
#else
entry->gid = 1000;
#endif
}
}
return 0;
err_unreg:
mtktspa_unregister_cooler();
return err;
}
static void __exit mtktspa_exit(void)
{
mtktspa_dprintk("[mtktspa_exit] \n");
mtktspa_unregister_thermal();
mtktspa_unregister_cooler();
}
module_init(mtktspa_init);
module_exit(mtktspa_exit);
| gpl-2.0 |
kevingessner/git | builtin/fast-export.c | 1 | 18603 | /*
* "git fast-export" builtin command
*
* Copyright (C) 2007 Johannes E. Schindelin
*/
#include "builtin.h"
#include "cache.h"
#include "commit.h"
#include "object.h"
#include "tag.h"
#include "diff.h"
#include "diffcore.h"
#include "log-tree.h"
#include "revision.h"
#include "decorate.h"
#include "string-list.h"
#include "utf8.h"
#include "parse-options.h"
#include "quote.h"
static const char *fast_export_usage[] = {
"git fast-export [rev-list-opts]",
NULL
};
static int progress;
static enum { ABORT, VERBATIM, WARN, STRIP } signed_tag_mode = ABORT;
static enum { ERROR, DROP, REWRITE } tag_of_filtered_mode = ABORT;
static int fake_missing_tagger;
static int use_done_feature;
static int no_data;
static int full_tree;
static int parse_opt_signed_tag_mode(const struct option *opt,
const char *arg, int unset)
{
if (unset || !strcmp(arg, "abort"))
signed_tag_mode = ABORT;
else if (!strcmp(arg, "verbatim") || !strcmp(arg, "ignore"))
signed_tag_mode = VERBATIM;
else if (!strcmp(arg, "warn"))
signed_tag_mode = WARN;
else if (!strcmp(arg, "strip"))
signed_tag_mode = STRIP;
else
return error("Unknown signed-tag mode: %s", arg);
return 0;
}
static int parse_opt_tag_of_filtered_mode(const struct option *opt,
const char *arg, int unset)
{
if (unset || !strcmp(arg, "abort"))
tag_of_filtered_mode = ABORT;
else if (!strcmp(arg, "drop"))
tag_of_filtered_mode = DROP;
else if (!strcmp(arg, "rewrite"))
tag_of_filtered_mode = REWRITE;
else
return error("Unknown tag-of-filtered mode: %s", arg);
return 0;
}
static struct decoration idnums;
static uint32_t last_idnum;
static int has_unshown_parent(struct commit *commit)
{
struct commit_list *parent;
for (parent = commit->parents; parent; parent = parent->next)
if (!(parent->item->object.flags & SHOWN) &&
!(parent->item->object.flags & UNINTERESTING))
return 1;
return 0;
}
/* Since intptr_t is C99, we do not use it here */
static inline uint32_t *mark_to_ptr(uint32_t mark)
{
return ((uint32_t *)NULL) + mark;
}
static inline uint32_t ptr_to_mark(void * mark)
{
return (uint32_t *)mark - (uint32_t *)NULL;
}
static inline void mark_object(struct object *object, uint32_t mark)
{
add_decoration(&idnums, object, mark_to_ptr(mark));
}
static inline void mark_next_object(struct object *object)
{
mark_object(object, ++last_idnum);
}
static int get_object_mark(struct object *object)
{
void *decoration = lookup_decoration(&idnums, object);
if (!decoration)
return 0;
return ptr_to_mark(decoration);
}
static void show_progress(void)
{
static int counter = 0;
if (!progress)
return;
if ((++counter % progress) == 0)
printf("progress %d objects\n", counter);
}
static void handle_object(const unsigned char *sha1)
{
unsigned long size;
enum object_type type;
char *buf;
struct object *object;
if (no_data)
return;
if (is_null_sha1(sha1))
return;
object = parse_object(sha1);
if (!object)
die ("Could not read blob %s", sha1_to_hex(sha1));
if (object->flags & SHOWN)
return;
buf = read_sha1_file(sha1, &type, &size);
if (!buf)
die ("Could not read blob %s", sha1_to_hex(sha1));
mark_next_object(object);
printf("blob\nmark :%"PRIu32"\ndata %lu\n", last_idnum, size);
if (size && fwrite(buf, size, 1, stdout) != 1)
die_errno ("Could not write blob '%s'", sha1_to_hex(sha1));
printf("\n");
show_progress();
object->flags |= SHOWN;
free(buf);
}
static int depth_first(const void *a_, const void *b_)
{
const struct diff_filepair *a = *((const struct diff_filepair **)a_);
const struct diff_filepair *b = *((const struct diff_filepair **)b_);
const char *name_a, *name_b;
int len_a, len_b, len;
int cmp;
name_a = a->one ? a->one->path : a->two->path;
name_b = b->one ? b->one->path : b->two->path;
len_a = strlen(name_a);
len_b = strlen(name_b);
len = (len_a < len_b) ? len_a : len_b;
/* strcmp will sort 'd' before 'd/e', we want 'd/e' before 'd' */
cmp = memcmp(name_a, name_b, len);
if (cmp)
return cmp;
cmp = len_b - len_a;
if (cmp)
return cmp;
/*
* Move 'R'ename entries last so that all references of the file
* appear in the output before it is renamed (e.g., when a file
* was copied and renamed in the same commit).
*/
return (a->status == 'R') - (b->status == 'R');
}
static void print_path(const char *path)
{
int need_quote = quote_c_style(path, NULL, NULL, 0);
if (need_quote)
quote_c_style(path, NULL, stdout, 0);
else
printf("%s", path);
}
static void show_filemodify(struct diff_queue_struct *q,
struct diff_options *options, void *data)
{
int i;
/*
* Handle files below a directory first, in case they are all deleted
* and the directory changes to a file or symlink.
*/
qsort(q->queue, q->nr, sizeof(q->queue[0]), depth_first);
for (i = 0; i < q->nr; i++) {
struct diff_filespec *ospec = q->queue[i]->one;
struct diff_filespec *spec = q->queue[i]->two;
switch (q->queue[i]->status) {
case DIFF_STATUS_DELETED:
printf("D ");
print_path(spec->path);
putchar('\n');
break;
case DIFF_STATUS_COPIED:
case DIFF_STATUS_RENAMED:
printf("%c ", q->queue[i]->status);
print_path(ospec->path);
putchar(' ');
print_path(spec->path);
putchar('\n');
if (!hashcmp(ospec->sha1, spec->sha1) &&
ospec->mode == spec->mode)
break;
/* fallthrough */
case DIFF_STATUS_TYPE_CHANGED:
case DIFF_STATUS_MODIFIED:
case DIFF_STATUS_ADDED:
/*
* Links refer to objects in another repositories;
* output the SHA-1 verbatim.
*/
if (no_data || S_ISGITLINK(spec->mode))
printf("M %06o %s ", spec->mode,
sha1_to_hex(spec->sha1));
else {
struct object *object = lookup_object(spec->sha1);
printf("M %06o :%d ", spec->mode,
get_object_mark(object));
}
print_path(spec->path);
putchar('\n');
break;
default:
die("Unexpected comparison status '%c' for %s, %s",
q->queue[i]->status,
ospec->path ? ospec->path : "none",
spec->path ? spec->path : "none");
}
}
}
static const char *find_encoding(const char *begin, const char *end)
{
const char *needle = "\nencoding ";
char *bol, *eol;
bol = memmem(begin, end ? end - begin : strlen(begin),
needle, strlen(needle));
if (!bol)
return git_commit_encoding;
bol += strlen(needle);
eol = strchrnul(bol, '\n');
*eol = '\0';
return bol;
}
static void handle_commit(struct commit *commit, struct rev_info *rev)
{
int saved_output_format = rev->diffopt.output_format;
const char *author, *author_end, *committer, *committer_end;
const char *encoding, *message, *extra, *next;
struct strbuf extras = STRBUF_INIT;
char *reencoded = NULL;
struct commit_list *p;
int i;
rev->diffopt.output_format = DIFF_FORMAT_CALLBACK;
parse_commit(commit);
author = strstr(commit->buffer, "\nauthor ");
if (!author)
die ("Could not find author in commit %s",
sha1_to_hex(commit->object.sha1));
author++;
author_end = strchrnul(author, '\n');
committer = strstr(author_end, "\ncommitter ");
if (!committer)
die ("Could not find committer in commit %s",
sha1_to_hex(commit->object.sha1));
committer++;
committer_end = strchrnul(committer, '\n');
message = strstr(committer_end, "\n\n");
encoding = find_encoding(committer_end, message);
extra = committer_end + 1;
while (extra < message) {
next = strchrnul(extra, '\n');
if (prefixcmp(extra, "encoding ")) {
strbuf_addstr(&extras, "extra ");
strbuf_add(&extras, extra, next - extra);
strbuf_addstr(&extras, "\n");
}
extra = next + 1;
}
if (message)
message += 2;
if (commit->parents &&
get_object_mark(&commit->parents->item->object) != 0 &&
!full_tree) {
parse_commit(commit->parents->item);
diff_tree_sha1(commit->parents->item->tree->object.sha1,
commit->tree->object.sha1, "", &rev->diffopt);
}
else
diff_root_tree_sha1(commit->tree->object.sha1,
"", &rev->diffopt);
/* Export the referenced blobs, and remember the marks. */
for (i = 0; i < diff_queued_diff.nr; i++)
if (!S_ISGITLINK(diff_queued_diff.queue[i]->two->mode))
handle_object(diff_queued_diff.queue[i]->two->sha1);
mark_next_object(&commit->object);
if (!is_encoding_utf8(encoding))
reencoded = reencode_string(message, "UTF-8", encoding);
if (!commit->parents)
printf("reset %s\n", (const char*)commit->util);
printf("commit %s\nmark :%"PRIu32"\n%.*s\n%.*s\n%sdata %u\n%s",
(const char *)commit->util, last_idnum,
(int)(author_end - author), author,
(int)(committer_end - committer), committer,
extras.buf,
(unsigned)(reencoded
? strlen(reencoded) : message
? strlen(message) : 0),
reencoded ? reencoded : message ? message : "");
free(reencoded);
strbuf_release(&extras);
for (i = 0, p = commit->parents; p; p = p->next) {
int mark = get_object_mark(&p->item->object);
if (!mark)
continue;
if (i == 0)
printf("from :%d\n", mark);
else
printf("merge :%d\n", mark);
i++;
}
if (full_tree)
printf("deleteall\n");
log_tree_diff_flush(rev);
rev->diffopt.output_format = saved_output_format;
printf("\n");
show_progress();
}
static void handle_tail(struct object_array *commits, struct rev_info *revs)
{
struct commit *commit;
while (commits->nr) {
commit = (struct commit *)commits->objects[commits->nr - 1].item;
if (has_unshown_parent(commit))
return;
handle_commit(commit, revs);
commits->nr--;
}
}
static void handle_tag(const char *name, struct tag *tag)
{
unsigned long size;
enum object_type type;
char *buf;
const char *tagger, *tagger_end, *message;
size_t message_size = 0;
struct object *tagged;
int tagged_mark;
struct commit *p;
/* Trees have no identifer in fast-export output, thus we have no way
* to output tags of trees, tags of tags of trees, etc. Simply omit
* such tags.
*/
tagged = tag->tagged;
while (tagged->type == OBJ_TAG) {
tagged = ((struct tag *)tagged)->tagged;
}
if (tagged->type == OBJ_TREE) {
warning("Omitting tag %s,\nsince tags of trees (or tags of tags of trees, etc.) are not supported.",
sha1_to_hex(tag->object.sha1));
return;
}
buf = read_sha1_file(tag->object.sha1, &type, &size);
if (!buf)
die ("Could not read tag %s", sha1_to_hex(tag->object.sha1));
message = memmem(buf, size, "\n\n", 2);
if (message) {
message += 2;
message_size = strlen(message);
}
tagger = memmem(buf, message ? message - buf : size, "\ntagger ", 8);
if (!tagger) {
if (fake_missing_tagger)
tagger = "tagger Unspecified Tagger "
"<unspecified-tagger> 0 +0000";
else
tagger = "";
tagger_end = tagger + strlen(tagger);
} else {
tagger++;
tagger_end = strchrnul(tagger, '\n');
}
/* handle signed tags */
if (message) {
const char *signature = strstr(message,
"\n-----BEGIN PGP SIGNATURE-----\n");
if (signature)
switch(signed_tag_mode) {
case ABORT:
die ("Encountered signed tag %s; use "
"--signed-tag=<mode> to handle it.",
sha1_to_hex(tag->object.sha1));
case WARN:
warning ("Exporting signed tag %s",
sha1_to_hex(tag->object.sha1));
/* fallthru */
case VERBATIM:
break;
case STRIP:
message_size = signature + 1 - message;
break;
}
}
/* handle tag->tagged having been filtered out due to paths specified */
tagged = tag->tagged;
tagged_mark = get_object_mark(tagged);
if (!tagged_mark) {
switch(tag_of_filtered_mode) {
case ABORT:
die ("Tag %s tags unexported object; use "
"--tag-of-filtered-object=<mode> to handle it.",
sha1_to_hex(tag->object.sha1));
case DROP:
/* Ignore this tag altogether */
return;
case REWRITE:
if (tagged->type != OBJ_COMMIT) {
die ("Tag %s tags unexported %s!",
sha1_to_hex(tag->object.sha1),
typename(tagged->type));
}
p = (struct commit *)tagged;
for (;;) {
if (p->parents && p->parents->next)
break;
if (p->object.flags & UNINTERESTING)
break;
if (!(p->object.flags & TREESAME))
break;
if (!p->parents)
die ("Can't find replacement commit for tag %s\n",
sha1_to_hex(tag->object.sha1));
p = p->parents->item;
}
tagged_mark = get_object_mark(&p->object);
}
}
if (!prefixcmp(name, "refs/tags/"))
name += 10;
printf("tag %s\nfrom :%d\n%.*s%sdata %d\n%.*s\n",
name, tagged_mark,
(int)(tagger_end - tagger), tagger,
tagger == tagger_end ? "" : "\n",
(int)message_size, (int)message_size, message ? message : "");
}
static void get_tags_and_duplicates(struct object_array *pending,
struct string_list *extra_refs)
{
struct tag *tag;
int i;
for (i = 0; i < pending->nr; i++) {
struct object_array_entry *e = pending->objects + i;
unsigned char sha1[20];
struct commit *commit = commit;
char *full_name;
if (dwim_ref(e->name, strlen(e->name), sha1, &full_name) != 1)
continue;
switch (e->item->type) {
case OBJ_COMMIT:
commit = (struct commit *)e->item;
break;
case OBJ_TAG:
tag = (struct tag *)e->item;
/* handle nested tags */
while (tag && tag->object.type == OBJ_TAG) {
parse_object(tag->object.sha1);
string_list_append(extra_refs, full_name)->util = tag;
tag = (struct tag *)tag->tagged;
}
if (!tag)
die ("Tag %s points nowhere?", e->name);
switch(tag->object.type) {
case OBJ_COMMIT:
commit = (struct commit *)tag;
break;
case OBJ_BLOB:
handle_object(tag->object.sha1);
continue;
default: /* OBJ_TAG (nested tags) is already handled */
warning("Tag points to object of unexpected type %s, skipping.",
typename(tag->object.type));
continue;
}
break;
default:
warning("%s: Unexpected object of type %s, skipping.",
e->name,
typename(e->item->type));
continue;
}
if (commit->util)
/* more than one name for the same object */
string_list_append(extra_refs, full_name)->util = commit;
else
commit->util = full_name;
}
}
static void handle_tags_and_duplicates(struct string_list *extra_refs)
{
struct commit *commit;
int i;
for (i = extra_refs->nr - 1; i >= 0; i--) {
const char *name = extra_refs->items[i].string;
struct object *object = extra_refs->items[i].util;
switch (object->type) {
case OBJ_TAG:
handle_tag(name, (struct tag *)object);
break;
case OBJ_COMMIT:
/* create refs pointing to already seen commits */
commit = (struct commit *)object;
printf("reset %s\nfrom :%d\n\n", name,
get_object_mark(&commit->object));
show_progress();
break;
}
}
}
static void export_marks(char *file)
{
unsigned int i;
uint32_t mark;
struct object_decoration *deco = idnums.hash;
FILE *f;
int e = 0;
f = fopen(file, "w");
if (!f)
die_errno("Unable to open marks file %s for writing.", file);
for (i = 0; i < idnums.size; i++) {
if (deco->base && deco->base->type == 1) {
mark = ptr_to_mark(deco->decoration);
if (fprintf(f, ":%"PRIu32" %s\n", mark,
sha1_to_hex(deco->base->sha1)) < 0) {
e = 1;
break;
}
}
deco++;
}
e |= ferror(f);
e |= fclose(f);
if (e)
error("Unable to write marks file %s.", file);
}
static void import_marks(char *input_file)
{
char line[512];
FILE *f = fopen(input_file, "r");
if (!f)
die_errno("cannot read '%s'", input_file);
while (fgets(line, sizeof(line), f)) {
uint32_t mark;
char *line_end, *mark_end;
unsigned char sha1[20];
struct object *object;
line_end = strchr(line, '\n');
if (line[0] != ':' || !line_end)
die("corrupt mark line: %s", line);
*line_end = '\0';
mark = strtoumax(line + 1, &mark_end, 10);
if (!mark || mark_end == line + 1
|| *mark_end != ' ' || get_sha1(mark_end + 1, sha1))
die("corrupt mark line: %s", line);
object = parse_object(sha1);
if (!object)
die ("Could not read blob %s", sha1_to_hex(sha1));
if (object->flags & SHOWN)
error("Object %s already has a mark", sha1);
mark_object(object, mark);
if (last_idnum < mark)
last_idnum = mark;
object->flags |= SHOWN;
}
fclose(f);
}
int cmd_fast_export(int argc, const char **argv, const char *prefix)
{
struct rev_info revs;
struct object_array commits = OBJECT_ARRAY_INIT;
struct string_list extra_refs = STRING_LIST_INIT_NODUP;
struct commit *commit;
char *export_filename = NULL, *import_filename = NULL;
struct option options[] = {
OPT_INTEGER(0, "progress", &progress,
"show progress after <n> objects"),
OPT_CALLBACK(0, "signed-tags", &signed_tag_mode, "mode",
"select handling of signed tags",
parse_opt_signed_tag_mode),
OPT_CALLBACK(0, "tag-of-filtered-object", &tag_of_filtered_mode, "mode",
"select handling of tags that tag filtered objects",
parse_opt_tag_of_filtered_mode),
OPT_STRING(0, "export-marks", &export_filename, "file",
"Dump marks to this file"),
OPT_STRING(0, "import-marks", &import_filename, "file",
"Import marks from this file"),
OPT_BOOLEAN(0, "fake-missing-tagger", &fake_missing_tagger,
"Fake a tagger when tags lack one"),
OPT_BOOLEAN(0, "full-tree", &full_tree,
"Output full tree for each commit"),
OPT_BOOLEAN(0, "use-done-feature", &use_done_feature,
"Use the done feature to terminate the stream"),
{ OPTION_NEGBIT, 0, "data", &no_data, NULL,
"Skip output of blob data",
PARSE_OPT_NOARG | PARSE_OPT_NEGHELP, NULL, 1 },
OPT_END()
};
if (argc == 1)
usage_with_options (fast_export_usage, options);
/* we handle encodings */
git_config(git_default_config, NULL);
init_revisions(&revs, prefix);
revs.topo_order = 1;
revs.show_source = 1;
revs.rewrite_parents = 1;
argc = setup_revisions(argc, argv, &revs, NULL);
argc = parse_options(argc, argv, prefix, options, fast_export_usage, 0);
if (argc > 1)
usage_with_options (fast_export_usage, options);
if (use_done_feature)
printf("feature done\n");
if (import_filename)
import_marks(import_filename);
if (import_filename && revs.prune_data.nr)
full_tree = 1;
get_tags_and_duplicates(&revs.pending, &extra_refs);
if (prepare_revision_walk(&revs))
die("revision walk setup failed");
revs.diffopt.format_callback = show_filemodify;
DIFF_OPT_SET(&revs.diffopt, RECURSIVE);
while ((commit = get_revision(&revs))) {
if (has_unshown_parent(commit)) {
add_object_array(&commit->object, NULL, &commits);
}
else {
handle_commit(commit, &revs);
handle_tail(&commits, &revs);
}
}
handle_tags_and_duplicates(&extra_refs);
if (export_filename)
export_marks(export_filename);
if (use_done_feature)
printf("done\n");
return 0;
}
| gpl-2.0 |
kusma/amoeba | main/piprecalc.cpp | 1 | 3649 | /*
* This "effect" is almost totally untimed (ie. it counts frames, not
* seconds). Rationale: A faster machine will play the fades faster,
* but it will also _load_ faster, so it's reasonable that it also
* loads faster ;-) (If you want, do a s/rationale/excuse for being
* lazy/ ;-) ) Unfortunately, vsync messes this up ;-)
*/
#include "main/piprecalc.h"
#include <stdio.h>
#include <unistd.h>
#define PI_STRING "3.14159265358979323846264338327950288"
/* ahem ;-) */
#ifndef __unix__
#define usleep(x) Sleep(x)
#endif
PiPrecalc::PiPrecalc(GLWindow *win)
{
this->win = win;
this->font = texture::load("loaderfont.png");
this->font->bind();
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
this->last_status = 0;
}
PiPrecalc::~PiPrecalc()
{
/*
* simple fade to zero
* (some drivers don't appear to like that we base ourselves
* on the last frame, so we have to redraw every time here)
*/
for (int i = 0; i < 50; i++) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glOrtho(0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
glDisable(GL_LIGHTING);
glEnable(GL_TEXTURE_2D);
this->font->bind();
glColor4f(1.0f, 1.0f, 1.0f, (float)(49-i) / 49.0f);
glBegin(GL_QUADS);
for (int j = 0; j < 37; j++) {
char ch = PI_STRING[j];
int t = (ch == '.') ? 7 : (8 + (ch - '0'));
glTexCoord2f((float)(t) / 32.0f, 0.0f);
glVertex2f((float)(j+2) / 41.0f, 0.45f);
glTexCoord2f((float)(t+1) / 32.0f, 0.0f);
glVertex2f((float)(j+3) / 41.0f, 0.45f);
glTexCoord2f((float)(t+1) / 32.0f, 0.9f);
glVertex2f((float)(j+3) / 41.0f, 0.55f);
glTexCoord2f((float)(t) / 32.0f, 0.9f);
glVertex2f((float)(j+2) / 41.0f, 0.55f);
}
glEnd();
glPopMatrix();
glMatrixMode(GL_MODELVIEW);
glPopMatrix();
this->win->flip();
usleep(1);
}
delete this->font;
this->font = NULL;
}
void PiPrecalc::update(float p)
{
int target = (int)(p * 37.0f);
for (int i = this->last_status; i < target; i++) {
for (int fno = 0; fno < 7; fno++) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glOrtho(0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
glDisable(GL_LIGHTING);
glEnable(GL_TEXTURE_2D);
this->font->bind();
/* first rewrite the text :-) */
glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
/*
* then write every letter that we already wrote,
* and last the new letter
*/
for (int j = 0; j <= i; j++) {
char ch = PI_STRING[36 - j];
int t = (ch == '.') ? 7 : (8 + (ch - '0'));
if (j == i) {
glColor4f(1.0f, 1.0f, 1.0f, (float)(fno) / 6.0f);
}
glBegin(GL_QUADS);
glTexCoord2f((float)(t) / 32.0f, 0.0f);
glVertex2f((float)(38-j) / 41.0f, 0.45f);
glTexCoord2f((float)(t+1) / 32.0f, 0.0f);
glVertex2f((float)(38-j+1) / 41.0f, 0.45f);
glTexCoord2f((float)(t+1) / 32.0f, 0.9f);
glVertex2f((float)(38-j+1) / 41.0f, 0.55f);
glTexCoord2f((float)(t) / 32.0f, 0.9f);
glVertex2f((float)(38-j) / 41.0f, 0.55f);
glEnd();
}
glPopMatrix();
glMatrixMode(GL_MODELVIEW);
glPopMatrix();
this->win->flip();
usleep(1);
}
}
this->last_status = target;
}
| gpl-2.0 |
renolui/RenoStudio | Player/BB/apps/plugins/bitmaps/native/matrix_bold.c | 1 | 47393 | #include "lcd.h"
#include "/Users/reno/Documents/source/RenoStudio/Player/BB/pluginbitmaps/matrix_bold.h"
const unsigned short matrix_bold[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020,
0x0040, 0x0020, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x0080, 0x00c0, 0x00a0,
0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0060, 0x00c0, 0x0100, 0x00c0, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x0080, 0x0060, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x0921, 0x0981,
0x09c1, 0x01a0, 0x00e0, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x00a0, 0x1282, 0x1b83, 0x1b63, 0x09a1, 0x00a0, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0140, 0x0aa1, 0x1382, 0x1ba2, 0x1382,
0x1342, 0x0aa1, 0x0100, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0020, 0x0060, 0x00a0, 0x00c0, 0x0080, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x00a0, 0x00e0, 0x0100, 0x00c0, 0x0060,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0060, 0x0080, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0060, 0x0060,
0x0060, 0x0080, 0x0060, 0x0040, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x0961, 0x01a1, 0x0020, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020,
0x0060, 0x0060, 0x0060, 0x0040, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x0020,
0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0060, 0x00c0, 0x00e0, 0x00c0, 0x00a0,
0x0080, 0x0040, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020,
0x0080, 0x01c0, 0x0260, 0x0940, 0x09e1, 0x0260, 0x01a1, 0x0060, 0x0000, 0x0000,
0x0000, 0x00a0, 0x0ac1, 0x0ba1, 0x0ae1, 0x09e1, 0x0941, 0x0981, 0x0941, 0x00a0,
0x0060, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x09a1, 0x09c1,
0x0901, 0x0060, 0x0020, 0x0040, 0x0080, 0x0040, 0x0000, 0x0000, 0x0000, 0x0020,
0x0160, 0x01a0, 0x00a0, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x00c0,
0x0120, 0x0100, 0x00a0, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0020, 0x0080, 0x0120, 0x00e0, 0x0060, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x0160, 0x0220, 0x0aa1, 0x0a81,
0x01a0, 0x0080, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x00c0,
0x01e0, 0x0ac1, 0x1341, 0x0300, 0x01c0, 0x0080, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x00a0, 0x09c1, 0x0260, 0x0200, 0x0100, 0x0040, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x09c1, 0x1c03, 0x14c2,
0x1ce3, 0x2d05, 0x1382, 0x0160, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x09c1, 0x35a6, 0x4eaa, 0x56aa, 0x0c61, 0x0a60, 0x0100, 0x0020, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x00c0, 0x1362, 0x564b, 0x5e8b, 0x3e47, 0x3e47,
0x4628, 0x3da7, 0x1342, 0x00e0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040,
0x0120, 0x1322, 0x2444, 0x2464, 0x1342, 0x0180, 0x0080, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0060, 0x00e0, 0x1342, 0x2cc4, 0x2ce5, 0x1bc2, 0x0a00,
0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x00a0, 0x0aa1, 0x1342, 0x0101, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0060, 0x00a0, 0x0100, 0x0100,
0x00c0, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x0980, 0x0a21, 0x0220,
0x01a0, 0x01c0, 0x01e0, 0x0180, 0x0120, 0x00a0, 0x0020, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0060, 0x1202, 0x2ca5, 0x3506, 0x00e0, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x00a0, 0x09e1, 0x0a61,
0x0a81, 0x0aa1, 0x0aa1, 0x0aa1, 0x0a41, 0x0941, 0x0040, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x00a0, 0x0100, 0x0100, 0x0020, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x09a1, 0x0961, 0x0161, 0x1302,
0x0a01, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020,
0x0080, 0x1281, 0x1b83, 0x0120, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x0a41, 0x0a41, 0x09a1,
0x0120, 0x0080, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0060, 0x0a21,
0x1342, 0x0a01, 0x00e0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0020, 0x0060, 0x00c0, 0x0100, 0x0100, 0x0100, 0x00c0, 0x0080, 0x0020, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0120, 0x01e0, 0x01e0, 0x0a21, 0x0a41, 0x09e1,
0x01a0, 0x0120, 0x00c0, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x00a0,
0x0220, 0x55ca, 0x76ce, 0x0b61, 0x3526, 0xa754, 0x3546, 0x0a81, 0x0040, 0x0000,
0x0060, 0x0a41, 0x6e6d, 0xaf75, 0x3e26, 0x1442, 0x0b21, 0x0c01, 0x0bc1, 0x0280,
0x01a0, 0x00e0, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0981, 0x3546, 0x3546,
0x13e2, 0x09a1, 0x0120, 0x09e1, 0x0ac0, 0x01e1, 0x0020, 0x0000, 0x0100, 0x1b23,
0x2ce5, 0x2d25, 0x13c2, 0x09c1, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0160, 0x0b41, 0x0bc0,
0x13e1, 0x1c63, 0x0b81, 0x0a41, 0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0020, 0x00e0, 0x2c05, 0x556a, 0x2c65, 0x1362, 0x1222, 0x0040, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x1282, 0x55aa, 0x4dc9, 0x2544, 0x2524,
0x3526, 0x3486, 0x0160, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x12e2,
0x768e, 0x664c, 0x35e6, 0x666c, 0x4548, 0x1362, 0x00c0, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0020, 0x09e1, 0x1482, 0x7eaf, 0x7ed0, 0x0b21, 0x0100, 0x0040,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0120, 0x1be3, 0x66ac, 0x8f31,
0xc798, 0xe7fc, 0x9732, 0x1c63, 0x0120, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0941, 0x1b83, 0x2da6, 0x56aa, 0xbf97, 0x4609, 0x0280, 0x00e0, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0140, 0x554a, 0xffff, 0xffff, 0xaf75, 0x8750,
0xffff, 0xf7fe, 0x35a6, 0x0a81, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x01a0,
0x2ca5, 0x3e07, 0x3e87, 0x3e87, 0x3646, 0x2d05, 0x0a41, 0x0080, 0x0000, 0x0000,
0x0000, 0x0000, 0x0040, 0x09c1, 0x1c22, 0x2de5, 0x3e88, 0x3e67, 0x3e27, 0x3546,
0x0ac1, 0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x00c0,
0x0a61, 0x2c65, 0x12e2, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0120, 0x2c45, 0x4e09, 0x4e09, 0x0a21, 0x0060, 0x0020, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0120, 0x1362, 0x2444, 0x24c4, 0x3d47,
0x2484, 0x12c2, 0x0100, 0x0000, 0x0000, 0x0000, 0x09c1, 0x2ce5, 0x3dc7, 0x3dc7,
0x2da5, 0x668c, 0x8ef1, 0x3da7, 0x24e4, 0x2444, 0x0a21, 0x0080, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0020, 0x0180, 0x660c, 0x9f53, 0x01e0, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x0a61, 0x4628, 0x4e69,
0x3666, 0x3646, 0x3e67, 0x3e67, 0x2e25, 0x1422, 0x0100, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0040, 0x0080, 0x0961, 0x0ac1, 0x2444, 0x2464, 0x0941, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x09c1, 0x2524, 0x1be2, 0x0ba1, 0x876f,
0x3d47, 0x0160, 0x0080, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x0901,
0x01a0, 0x662c, 0x8f91, 0x0b41, 0x0921, 0x00c0, 0x0020, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x09c1, 0x3da6, 0x2544, 0x13a2,
0x12e1, 0x0961, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x08c1, 0x0180, 0x3d27,
0x5eab, 0x1403, 0x09e1, 0x00c0, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0060,
0x0a81, 0x13c2, 0x12e2, 0x0a61, 0x0a01, 0x0a21, 0x12c2, 0x1361, 0x0a41, 0x00e0,
0x0000, 0x0000, 0x0000, 0x0060, 0x2424, 0x4de9, 0x3dc7, 0x2584, 0x1d43, 0x1d43,
0x2d45, 0x4528, 0x2484, 0x1302, 0x0121, 0x0020, 0x0000, 0x0000, 0x00a0, 0x11e1,
0x0320, 0xaf14, 0xdffb, 0x0ca1, 0x45e8, 0xdffb, 0x3e47, 0x13a2, 0x00a0, 0x0000,
0x0040, 0x09c1, 0x4568, 0x7eef, 0x3666, 0x3e87, 0x568a, 0x66cc, 0x9732, 0xaf55,
0x666c, 0x3d27, 0x1b83, 0x0101, 0x0000, 0x0000, 0x0000, 0x0a02, 0x35c6, 0x76ee,
0x4e29, 0x0b21, 0x1322, 0x1ce3, 0x8ef1, 0x558a, 0x0120, 0x0000, 0x1242, 0x3606,
0xa794, 0xd7da, 0x56e9, 0x1c03, 0x0100, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x00a0, 0x1ba3, 0xb7b6, 0xbfb7,
0xbfb7, 0xe7fc, 0xa774, 0x25a4, 0x0a21, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0040, 0x0180, 0xa734, 0xffff, 0xd7fa, 0x772d, 0x35a5, 0x0961, 0x0000,
0x0000, 0x0000, 0x0000, 0x0020, 0x0120, 0x3ce7, 0xffff, 0xf7fe, 0x6f2c, 0x5eeb,
0xe7fc, 0xdffb, 0x1b43, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0120, 0x4507,
0xffff, 0xeffd, 0x670c, 0xf7fe, 0xafb5, 0x35e6, 0x09e2, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0080, 0x0ae1, 0x3e67, 0xffff, 0xffff, 0x3de7, 0x0ae1, 0x0100,
0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0120, 0x1be3, 0x668c, 0x9752,
0xc798, 0xffff, 0xdfdb, 0x35c6, 0x09e1, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0060, 0x0140, 0x1241, 0x1c63, 0x2e05, 0xc7b8, 0xbf97, 0x3de7, 0x0aa1, 0x0080,
0x0000, 0x0000, 0x0000, 0x0000, 0x00e0, 0x652c, 0xffff, 0xefdd, 0x6f0d, 0x568a,
0x86d0, 0x86d0, 0x34e6, 0x0a21, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x1ba3,
0xe7fc, 0x9732, 0x1e03, 0x2624, 0x9f53, 0xdffb, 0x2464, 0x0100, 0x0020, 0x0000,
0x0000, 0x0020, 0x00a0, 0x23e3, 0xcff8, 0x9f53, 0x2625, 0x1da3, 0x86ef, 0xeffd,
0x2d05, 0x01c0, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0a41,
0x3546, 0xdffb, 0x660c, 0x0120, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x09e1, 0x568a, 0xc7b8, 0xa754, 0x0460, 0x0a21, 0x00c0, 0x0020, 0x0000,
0x0000, 0x0000, 0x0000, 0x0060, 0x09a0, 0x1ba2, 0x3de7, 0x3626, 0x4688, 0xbfb7,
0x6f0d, 0x2564, 0x1362, 0x0160, 0x0020, 0x0000, 0x0a01, 0x2d05, 0x3e27, 0x4e89,
0x3e67, 0xd7da, 0xffff, 0x66cc, 0x2dc5, 0x3546, 0x12c2, 0x00e0, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0020, 0x0140, 0x8e51, 0xb7b6, 0x0280, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x00a0, 0x1ba3, 0xc7d8, 0x6eed,
0x2e25, 0x3646, 0x3666, 0x3685, 0x9772, 0x5e2b, 0x0200, 0x0060, 0x0000, 0x0000,
0x0000, 0x0040, 0x00e0, 0x0961, 0x1321, 0x4607, 0x4668, 0x2d85, 0x0a21, 0x0020,
0x0000, 0x0000, 0x0000, 0x0020, 0x00a0, 0x1b43, 0x9f93, 0x45c8, 0x1d23, 0xf7fe,
0x86b0, 0x0280, 0x0921, 0x0060, 0x0000, 0x0000, 0x0000, 0x0080, 0x0160, 0x0220,
0x0300, 0x9f13, 0xf7fe, 0x1ca3, 0x0a41, 0x0180, 0x00a0, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x0b41, 0xbfb7, 0x9f53, 0x15a2,
0x24a4, 0x1282, 0x0040, 0x0000, 0x0000, 0x0040, 0x01a0, 0x0280, 0x0320, 0xaf15,
0xf7fe, 0x1ce3, 0x02a0, 0x0a01, 0x00e0, 0x0020, 0x0000, 0x0000, 0x0000, 0x0120,
0x662b, 0x9792, 0x1d82, 0x1402, 0x1302, 0x0b21, 0x2d65, 0x670c, 0x2d45, 0x12a2,
0x0020, 0x0000, 0x0000, 0x00e0, 0x76ae, 0xffff, 0xffff, 0xc7d8, 0x672c, 0xb7b6,
0xdffb, 0xe7fc, 0xb7d6, 0x45e8, 0x0a81, 0x0060, 0x0000, 0x0000, 0x09c1, 0x1b63,
0x1461, 0xb756, 0xffff, 0x25c4, 0x4e48, 0xb796, 0x46a8, 0x1ce3, 0x0180, 0x0020,
0x0020, 0x0100, 0x13a1, 0x1da3, 0x2e65, 0x9f52, 0xf7fe, 0x8f31, 0xf7fe, 0xffff,
0xf7fe, 0x8730, 0x2504, 0x0981, 0x0000, 0x0000, 0x0000, 0x09c1, 0x1ca3, 0x9f33,
0xaf95, 0x0c61, 0x24e4, 0x76ee, 0xbf97, 0x5e0b, 0x01c0, 0x0020, 0x00c0, 0x1302,
0x5e4b, 0xd7bb, 0xcfb9, 0x2d05, 0x09c0, 0x0100, 0x0080, 0x0080, 0x0180, 0x0980,
0x00a0, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x0aa1, 0x4dc9, 0xffff, 0xcf99,
0x4e49, 0x7eaf, 0xdfbb, 0xcfd9, 0x2403, 0x00c0, 0x0020, 0x0000, 0x0000, 0x0000,
0x0000, 0x00a0, 0x0220, 0xa714, 0xffff, 0x8e71, 0x24a4, 0x1b23, 0x00a0, 0x0000,
0x0000, 0x0000, 0x0000, 0x0040, 0x0300, 0x8690, 0xffff, 0xa754, 0x1d83, 0x1d03,
0x8ed1, 0xc7d8, 0x1322, 0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x0300, 0x7e70,
0xffff, 0x96f2, 0x1562, 0x4628, 0x4668, 0x35a6, 0x09c1, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x00c0, 0x1342, 0x3e87, 0xefdd, 0xffff, 0xdffb, 0x2dc4, 0x0a00,
0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0100, 0x1281, 0x0c61, 0x0540,
0x0e00, 0x8f51, 0x7f2e, 0x25a4, 0x0a21, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x00a0, 0x09c1, 0x0b41, 0x0c81, 0x0d81, 0x2e85, 0xc797, 0xf7fe, 0x2484, 0x0100,
0x0020, 0x0000, 0x0000, 0x0000, 0x00a0, 0x1382, 0xbfb7, 0xb775, 0x3e87, 0x0ca1,
0x0360, 0x0b00, 0x0a21, 0x00e0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x3466,
0xffff, 0x9f33, 0x15e2, 0x0de1, 0x9f53, 0xffff, 0x2d05, 0x0180, 0x0060, 0x0000,
0x0000, 0x0040, 0x0120, 0x2c85, 0xffff, 0x9733, 0x1542, 0x14c1, 0x668c, 0xd7ba,
0x3607, 0x1342, 0x00a0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x0140,
0x4ca9, 0xffff, 0x9eb3, 0x01c0, 0x00a0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x00a0, 0x1282, 0x3586, 0x7f2f, 0xcfd8, 0x45a8, 0x0180, 0x00a0, 0x0000,
0x0000, 0x0000, 0x0020, 0x0100, 0x2d06, 0x66ec, 0x4e89, 0x1d23, 0x3566, 0xffff,
0xefde, 0x56ca, 0x3e67, 0x2444, 0x0100, 0x0000, 0x0120, 0x0240, 0x0b61, 0x1422,
0x0ce1, 0x8f31, 0xffff, 0x2583, 0x0ba1, 0x1302, 0x09c1, 0x00e0, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0040, 0x0140, 0xa674, 0xbfd7, 0x02a0, 0x0020, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x0140, 0x34a5, 0xffff, 0x8730,
0x1602, 0x2624, 0x1e23, 0x2684, 0xeffc, 0x9712, 0x02c0, 0x00c1, 0x0000, 0x00a0,
0x0aa1, 0x1be3, 0x1c23, 0x1442, 0x1541, 0x9f73, 0xc798, 0x568a, 0x1322, 0x00a0,
0x0000, 0x0000, 0x0020, 0x00c0, 0x0120, 0x2c45, 0xffff, 0x6ead, 0x2e04, 0xffff,
0x86f0, 0x03c0, 0x1202, 0x00e0, 0x0000, 0x0000, 0x0080, 0x0a20, 0x3526, 0x4de9,
0x25c4, 0xbf77, 0xffff, 0x4648, 0x2584, 0x3546, 0x1362, 0x0141, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x00e0, 0x0ba1, 0x9f73, 0xffff, 0x6f0d,
0x1cc3, 0x0a01, 0x0040, 0x0000, 0x0040, 0x0a41, 0x660c, 0xaf55, 0x6ead, 0xf7fe,
0xffff, 0x9731, 0x4627, 0x3566, 0x2463, 0x1262, 0x0020, 0x0000, 0x0020, 0x0160,
0xaed5, 0xcfd9, 0x1582, 0x1442, 0x1382, 0x0b81, 0x2da5, 0x8730, 0x3e06, 0x1b83,
0x0080, 0x0000, 0x0000, 0x0060, 0x1ae3, 0x55aa, 0x9f73, 0xbf97, 0xdfda, 0xffff,
0x7f0f, 0x1d83, 0x3546, 0x1bc3, 0x0100, 0x0020, 0x0000, 0x0000, 0x12c2, 0x2d85,
0x66cc, 0xffff, 0xffff, 0x7f2e, 0xa774, 0xffff, 0xdfda, 0x568a, 0x1382, 0x00c0,
0x0000, 0x00c0, 0x0a41, 0x1ce3, 0x46c8, 0x8f51, 0x770e, 0x0d21, 0x4647, 0xa774,
0x5eab, 0x2ce5, 0x0a41, 0x00c0, 0x0000, 0x0000, 0x0020, 0x0101, 0x02c0, 0xa714,
0xeffd, 0x0d61, 0x4648, 0xe7db, 0x2de6, 0x13c2, 0x09a1, 0x0040, 0x0040, 0x0921,
0x14a2, 0x9732, 0xffff, 0x560a, 0x0260, 0x09a1, 0x09e1, 0x1302, 0x2d05, 0x2424,
0x0100, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x00e0, 0x1482, 0xa753, 0xffff, 0x7ecf,
0x0420, 0x0420, 0x96f2, 0xffff, 0x3506, 0x0160, 0x0040, 0x0000, 0x0000, 0x0000,
0x0000, 0x00a0, 0x0240, 0xaf35, 0xffff, 0x2cc5, 0x01c0, 0x00e0, 0x0020, 0x0000,
0x0000, 0x0000, 0x0000, 0x0080, 0x0360, 0x9ef3, 0xffff, 0x8ef1, 0x0400, 0x1362,
0x24e4, 0x55ca, 0x09e1, 0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x02e0, 0x6e4d,
0xffff, 0xbf77, 0x1da3, 0x24c3, 0x2464, 0x1b83, 0x00e0, 0x0000, 0x0000, 0x0000,
0x0000, 0x0020, 0x0100, 0x13a2, 0x2ea5, 0xa774, 0xffff, 0xffff, 0x8f31, 0x1d23,
0x0a01, 0x0060, 0x0000, 0x0000, 0x0000, 0x0040, 0x0100, 0x0b21, 0x2e05, 0x66cc,
0xa773, 0xefdd, 0xcfb9, 0x35e6, 0x0a01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020,
0x0120, 0x1c03, 0x9f53, 0xbf77, 0x9f73, 0xefdd, 0xffff, 0xf7fe, 0x3d47, 0x0180,
0x0040, 0x0000, 0x0000, 0x0000, 0x0040, 0x0a21, 0x1d23, 0xaf55, 0xaf95, 0x0c61,
0x1242, 0x0961, 0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x00e0, 0x3486,
0xffff, 0xaf75, 0x3e87, 0x56aa, 0xd7ba, 0xffff, 0x2d65, 0x01c0, 0x00a0, 0x0000,
0x0000, 0x0060, 0x0180, 0x2ce5, 0xf7fe, 0x76cd, 0x0cc1, 0x14a2, 0x3e27, 0xbf96,
0x3e87, 0x1c24, 0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x00a0, 0x0100,
0x2404, 0xf7fe, 0x9712, 0x0380, 0x0941, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x00a0, 0x1322, 0x3566, 0x96f2, 0x5eca, 0x3d67, 0x12c2, 0x00a0,
0x0000, 0x0000, 0x0080, 0x0220, 0x7e8f, 0xaf75, 0x1542, 0x13e2, 0x2504, 0xf7fe,
0xc778, 0x2e04, 0x4688, 0x35c6, 0x1302, 0x0100, 0x1b42, 0x4de9, 0x2564, 0x1ce3,
0x1da3, 0xaf75, 0xffff, 0x3626, 0x14c2, 0x1422, 0x1302, 0x09c1, 0x0020, 0x0000,
0x0000, 0x0000, 0x0000, 0x0040, 0x0160, 0x8651, 0xbfb7, 0x0ae1, 0x0040, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x0220, 0x2544, 0xeffd, 0xdfbb,
0x8750, 0x6f2d, 0xa794, 0xb796, 0xffff, 0x8f31, 0x02e0, 0x08e1, 0x0060, 0x0281,
0x8f11, 0xeffd, 0xdffb, 0xb7b6, 0x7f2f, 0x6f0c, 0xffff, 0xf7fe, 0x2464, 0x0100,
0x0040, 0x0000, 0x00c0, 0x0a81, 0x0280, 0x2ce5, 0xeffd, 0x66ec, 0x3686, 0xf7fe,
0x770e, 0x0cc1, 0x12e2, 0x09a1, 0x0060, 0x0000, 0x0941, 0x1402, 0xbfd7, 0xffff,
0x8770, 0xeffd, 0xffff, 0x9f73, 0xbfb7, 0xf7fe, 0x4e49, 0x1342, 0x0040, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x09e1, 0x1462, 0x46c8, 0xc798, 0xcfd9,
0x14a2, 0x0981, 0x0060, 0x0000, 0x00a0, 0x0b21, 0xc7b8, 0xffff, 0xc7b8, 0xefdd,
0xffff, 0xffff, 0xb7b6, 0x5eeb, 0x35e6, 0x1b43, 0x0020, 0x0000, 0x0020, 0x0180,
0xa6d4, 0xbfb7, 0x1562, 0x13e2, 0x1362, 0x0b81, 0x2da5, 0x7f2f, 0x3666, 0x1c03,
0x00c0, 0x0000, 0x0000, 0x0000, 0x00a0, 0x0240, 0x2564, 0x3e67, 0x6eec, 0xffff,
0x7eef, 0x03c0, 0x12c1, 0x09a1, 0x0020, 0x0000, 0x0000, 0x0000, 0x09c1, 0x1bc3,
0x4588, 0xcf99, 0xffff, 0x66cb, 0x6f0c, 0xeffd, 0xcfb9, 0x4608, 0x0ae1, 0x0080,
0x0000, 0x00e0, 0x0b01, 0x25a4, 0xb796, 0x8f50, 0x46a8, 0x1462, 0x1c62, 0x25c4,
0x3646, 0x24e4, 0x0180, 0x0060, 0x0000, 0x0000, 0x0020, 0x0921, 0x0260, 0x668c,
0xf7fe, 0xa774, 0x6f0c, 0x56aa, 0x1c03, 0x09e1, 0x00a0, 0x0000, 0x0000, 0x00c0,
0x0320, 0x8690, 0xffff, 0x76ce, 0x0340, 0x1221, 0x1bc3, 0x35e6, 0x8f91, 0x4da9,
0x00e0, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0180, 0x3de7, 0xeffd, 0xffff, 0x4668,
0x1402, 0x0be1, 0x9ef3, 0xffff, 0x45e8, 0x0aa1, 0x0080, 0x0000, 0x0000, 0x0000,
0x0000, 0x00a0, 0x0240, 0xb736, 0xffff, 0x34c6, 0x01a0, 0x00c0, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0060, 0x02a0, 0x7e50, 0xffff, 0x8f11, 0x0480, 0x1b63,
0x0a61, 0x01a0, 0x0080, 0x0000, 0x0000, 0x0000, 0x0000, 0x00a0, 0x01e0, 0x3545,
0xffff, 0xffff, 0x670c, 0x1462, 0x0a21, 0x00e0, 0x0020, 0x0000, 0x0000, 0x0000,
0x0000, 0x0020, 0x0961, 0x1c43, 0x36c6, 0xb795, 0xefdd, 0xf7fe, 0xeffd, 0x9772,
0x1422, 0x0921, 0x0000, 0x0000, 0x0000, 0x0080, 0x0a21, 0x2504, 0xbfb7, 0xffff,
0xe7dc, 0xeffd, 0xa774, 0x24e4, 0x0981, 0x0000, 0x0000, 0x0000, 0x0000, 0x0080,
0x0b01, 0x4e4a, 0xffff, 0xd7ba, 0x772e, 0xaf95, 0xf7de, 0xf7fe, 0x3d87, 0x01c0,
0x0080, 0x0000, 0x0000, 0x0000, 0x0000, 0x00c1, 0x02a0, 0x7eaf, 0xbfb6, 0x14e2,
0x1282, 0x0140, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x0120, 0x13c2,
0xb776, 0xffff, 0xf7fe, 0xffff, 0xffff, 0xf7fe, 0x2d45, 0x0a21, 0x00a0, 0x0000,
0x0000, 0x0080, 0x01a0, 0x3526, 0xffff, 0x7f0f, 0x0540, 0x0d40, 0x4e89, 0xeffd,
0x4e89, 0x13e2, 0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x00a0, 0x0160, 0x0a21,
0x0be1, 0x8f31, 0x6eed, 0x2da5, 0x0a41, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0080, 0x0941, 0x1282, 0x0420, 0x8ef1, 0xf7fe, 0x2d05, 0x01a0,
0x0020, 0x0000, 0x0921, 0x0300, 0xb756, 0xaf55, 0x03e0, 0x12c2, 0x1482, 0xbfd7,
0x9711, 0x0d81, 0x3e68, 0x3646, 0x2544, 0x0a41, 0x1bc4, 0x6eed, 0x3e27, 0x1da3,
0x2625, 0xcfb9, 0xffff, 0x4e89, 0x1d63, 0x1d02, 0x1402, 0x0a41, 0x0040, 0x0000,
0x0000, 0x0000, 0x0000, 0x0040, 0x0180, 0x9e93, 0xcff9, 0x0b21, 0x0080, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x00e0, 0x24c4, 0xa733, 0xeffd, 0xefdd,
0xefdc, 0xe7dc, 0xf7fd, 0xffff, 0xffff, 0x8f31, 0x0c00, 0x0941, 0x0040, 0x0180,
0x4d09, 0x8ef1, 0xffff, 0xffff, 0xbf97, 0x4ec9, 0xa773, 0xdfdb, 0x3dc7, 0x0aa1,
0x0040, 0x0000, 0x0a81, 0x6e8d, 0x566a, 0x9712, 0xffff, 0xc798, 0xaf95, 0xffff,
0xd7ba, 0x3686, 0x25e4, 0x1c23, 0x0140, 0x0020, 0x0921, 0x0b61, 0xb776, 0xe7dc,
0x2644, 0xbf77, 0xffff, 0x4667, 0x66cc, 0xcfba, 0x3e27, 0x1b83, 0x0040, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x0b61, 0x4648, 0xe7dc, 0xffff, 0xb796,
0x1d23, 0x0a41, 0x0080, 0x0000, 0x0060, 0x0220, 0xa714, 0xdfdb, 0x1dc3, 0x4ea9,
0x9772, 0x7f2f, 0x3ea7, 0x25a5, 0x1382, 0x0981, 0x0000, 0x0000, 0x0040, 0x01a0,
0xaef4, 0xbfb7, 0x1561, 0x13e2, 0x1322, 0x0b61, 0x2dc5, 0x7f2f, 0x3e47, 0x1c23,
0x00a0, 0x0000, 0x0000, 0x0000, 0x0040, 0x0100, 0x0a21, 0x0b81, 0x2d84, 0xffff,
0x96f2, 0x0300, 0x09a1, 0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x0921,
0x0240, 0x9ef2, 0xffff, 0x25a4, 0x3646, 0x9771, 0x3ea6, 0x1ca3, 0x0140, 0x0020,
0x0000, 0x00e0, 0x24c4, 0x56ca, 0xaf75, 0x8f51, 0x3e87, 0x1402, 0x12e2, 0x13e2,
0x66cc, 0x45a8, 0x0180, 0x0040, 0x0000, 0x0000, 0x0040, 0x0941, 0x0ac1, 0x3e27,
0xeffd, 0xffff, 0xa774, 0x1562, 0x12a2, 0x0100, 0x0040, 0x0000, 0x0000, 0x0040,
0x0160, 0x24a4, 0xf7fe, 0x8f10, 0x0500, 0x1301, 0x2d25, 0x56ca, 0x3566, 0x1ae2,
0x0080, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x01a0, 0x768e, 0xffff, 0xf7fe, 0x3606,
0x13c2, 0x03a0, 0x9ef2, 0xffff, 0x6ead, 0x0ba1, 0x00a0, 0x0000, 0x0000, 0x0000,
0x0000, 0x00a0, 0x0a20, 0xb736, 0xffff, 0x2ca6, 0x0140, 0x0080, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0040, 0x0180, 0x2cc5, 0xf7fe, 0xcfb9, 0x3666, 0x1402,
0x1202, 0x0920, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000, 0x00c0, 0x02c0, 0x4de9,
0xffff, 0xffff, 0x8f71, 0x1d43, 0x12a2, 0x00c0, 0x0020, 0x0000, 0x0000, 0x0000,
0x0000, 0x0060, 0x0a41, 0x1542, 0x8f71, 0xf7fe, 0xf7fe, 0x9f72, 0xdfbb, 0xefdd,
0x3e07, 0x0ac1, 0x0060, 0x0000, 0x0000, 0x00c0, 0x0c21, 0x8f31, 0xffff, 0xb796,
0x2644, 0x3606, 0x1c83, 0x0a61, 0x00a0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0100,
0x3566, 0xe7dc, 0xfffe, 0x4e89, 0x0d00, 0x0560, 0xa754, 0xffff, 0x24e4, 0x01e0,
0x00a0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x0140, 0x3d47, 0x9752, 0x25e4,
0x13c2, 0x0940, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0120, 0x1422,
0xb7b6, 0xffff, 0xffff, 0xffff, 0xffff, 0xeffd, 0x2da5, 0x0a41, 0x00c0, 0x0000,
0x0000, 0x0060, 0x0180, 0x3d26, 0xffff, 0xefdd, 0x9752, 0x772e, 0xf7fe, 0xffff,
0x3e07, 0x0aa1, 0x0060, 0x0000, 0x0000, 0x0000, 0x0080, 0x0a01, 0x24e4, 0x2543,
0x2544, 0x1d43, 0x568a, 0x8f31, 0x1341, 0x00c0, 0x0020, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0040, 0x0120, 0x11e2, 0x0340, 0x9ef3, 0xffff, 0x3566, 0x0a00,
0x0020, 0x0000, 0x0961, 0x1402, 0xb756, 0xa774, 0x03c0, 0x12a2, 0x13a2, 0x3606,
0x6eec, 0x66ec, 0x1e03, 0x5e6b, 0x8750, 0x1ba4, 0x09c1, 0x1c03, 0x1c83, 0x1d03,
0x25e4, 0xcf99, 0xffff, 0x3646, 0x1d03, 0x1c43, 0x1ba2, 0x09e1, 0x0020, 0x0000,
0x0000, 0x0000, 0x0000, 0x0040, 0x0160, 0xa694, 0xc7d8, 0x0b61, 0x0900, 0x0040,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0100, 0x6e4d, 0xe7fc, 0xcfb9, 0xaf95,
0xaf95, 0xb795, 0xefdc, 0xffff, 0xaf95, 0x3686, 0x1c83, 0x09a1, 0x0000, 0x0060,
0x0160, 0x0c01, 0x6eed, 0xe7dc, 0xbfb7, 0x2644, 0x2e24, 0x3e67, 0x3e67, 0x2484,
0x00e0, 0x0000, 0x12a2, 0x664c, 0x5e6b, 0x9712, 0xffff, 0xdfdb, 0xa794, 0xffff,
0xe7dc, 0x3ea7, 0x2e45, 0x1c43, 0x0140, 0x0020, 0x0901, 0x02e0, 0xa754, 0xcfb9,
0x2e65, 0xbf97, 0xffff, 0x3e67, 0x6eee, 0xcfb9, 0x3e47, 0x1b83, 0x0040, 0x0000,
0x0000, 0x0000, 0x0000, 0x0020, 0x00e0, 0x0b21, 0x4627, 0xffff, 0xf7de, 0x672c,
0x1d83, 0x1322, 0x00e0, 0x0000, 0x0060, 0x01a0, 0xaf15, 0xcfd9, 0x1d82, 0x1d83,
0x2e64, 0xa774, 0x568a, 0x0c21, 0x0a01, 0x00c0, 0x0000, 0x0000, 0x0040, 0x0180,
0xaef5, 0xaf94, 0x0d21, 0x13c2, 0x12e2, 0x1341, 0x25a4, 0x772e, 0x3e67, 0x1c03,
0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0100, 0x09c1, 0x1c43, 0xcfd9,
0x4e29, 0x0260, 0x0941, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x00c0,
0x01e0, 0x9ed3, 0xffff, 0x2d65, 0x3606, 0xa794, 0x46a8, 0x1c43, 0x0100, 0x0020,
0x0000, 0x0120, 0x9ef3, 0xeffd, 0x15c2, 0x2e05, 0x3e47, 0x13c2, 0x1282, 0x0ba1,
0xbfb7, 0x96b2, 0x01a0, 0x0020, 0x0000, 0x0000, 0x00e0, 0x0200, 0x0380, 0x2e45,
0xdfdb, 0xffff, 0xb776, 0x0ce0, 0x0a61, 0x0121, 0x0020, 0x0000, 0x0000, 0x0000,
0x00a0, 0x12e2, 0x4e68, 0xaf74, 0x8710, 0x0bc1, 0x2444, 0x3d87, 0x12e2, 0x0100,
0x0020, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x01a0, 0x8eb1, 0xffff, 0xeffd, 0x3e07,
0x13e2, 0x03a0, 0x96f2, 0xffff, 0x8ef1, 0x0ba1, 0x00c0, 0x0000, 0x0000, 0x0000,
0x0000, 0x00e0, 0x0a61, 0xb756, 0xffff, 0x34a6, 0x0160, 0x0060, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0060, 0x0920, 0x1382, 0x4688, 0xb796, 0xb796, 0x15a2,
0x13a2, 0x09a1, 0x00a0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0101, 0x14a2, 0x9732,
0xffff, 0xa754, 0x4628, 0x2484, 0x0a41, 0x00c0, 0x0040, 0x0000, 0x0000, 0x0000,
0x0000, 0x0080, 0x13a2, 0x668c, 0xefdd, 0xffff, 0xffff, 0xcfb9, 0xefdd, 0xffff,
0x5eab, 0x13a2, 0x0060, 0x0000, 0x0000, 0x0100, 0x3566, 0xdfdb, 0xffff, 0x56aa,
0x0ca1, 0x13e2, 0x1261, 0x0120, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000, 0x0100,
0x4dea, 0xffff, 0xeffd, 0x2e05, 0x1c63, 0x0480, 0x9713, 0xffff, 0x2ce5, 0x0180,
0x0060, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x00c0, 0x0a61, 0x1ce3, 0x9f93,
0x4588, 0x0140, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0140, 0x2cc5,
0xf7fe, 0xffff, 0xc7d8, 0x670c, 0xc798, 0xffff, 0x3606, 0x0ae1, 0x00a0, 0x0000,
0x0000, 0x0020, 0x0120, 0x2ca5, 0xeffd, 0xffff, 0xffff, 0xf7fe, 0xcfb9, 0xc7b8,
0x2c64, 0x0140, 0x0020, 0x0000, 0x0000, 0x0080, 0x0a61, 0x1482, 0x3e87, 0x3626,
0x1d03, 0x1422, 0x4e29, 0xb7b6, 0x1423, 0x0921, 0x0060, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0060, 0x0961, 0x1362, 0x3586, 0x6eac, 0x76ae, 0x1be3, 0x0120,
0x0000, 0x0000, 0x00c0, 0x0ae1, 0xc778, 0xc7b8, 0x0c61, 0x12a1, 0x12c2, 0x0c21,
0x5eab, 0xf7fe, 0x3e48, 0x4e69, 0x7f0f, 0x1b63, 0x0040, 0x0941, 0x0a41, 0x1302,
0x0c01, 0xa734, 0xffff, 0x2d05, 0x0b01, 0x0a41, 0x0960, 0x00a0, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0020, 0x0120, 0x762e, 0xc7d7, 0x0bc1, 0x0140, 0x0080,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x12c2, 0x1d63, 0x8f51, 0x772d,
0x4f08, 0x5f0a, 0xf7fe, 0xffff, 0x770e, 0x1502, 0x1262, 0x00c0, 0x0000, 0x0020,
0x00a0, 0x11e1, 0x03a0, 0x9f13, 0xffff, 0x2d45, 0x0ba1, 0x1c23, 0x3e47, 0x3586,
0x09e1, 0x0040, 0x0040, 0x0160, 0x0240, 0x3506, 0xffff, 0x5ecb, 0x1e43, 0xe7fc,
0x8f50, 0x1581, 0x13a2, 0x09c1, 0x0080, 0x0000, 0x08e1, 0x0280, 0x9f53, 0xffff,
0x9772, 0xefdd, 0xffff, 0xb796, 0xcfb9, 0xffff, 0x566a, 0x1342, 0x0040, 0x0000,
0x0000, 0x0000, 0x0000, 0x00a0, 0x0961, 0x1282, 0x1d23, 0xc7d7, 0xf7fe, 0x670c,
0x2605, 0x13e2, 0x0120, 0x0020, 0x0040, 0x0180, 0x8e92, 0xbfb7, 0x15c2, 0x1d43,
0x25c4, 0xdfdb, 0x6ead, 0x0320, 0x0981, 0x00a0, 0x0000, 0x0000, 0x0020, 0x0160,
0xa6f4, 0xd7da, 0x1dc3, 0x1ce2, 0x1c23, 0x1462, 0x4648, 0xbf97, 0x4688, 0x1c02,
0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x00e0, 0x0b21, 0x8730,
0x4e09, 0x02e0, 0x0941, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x00a0,
0x01a0, 0x9eb3, 0xffff, 0x2544, 0x2e06, 0x9772, 0x3e67, 0x1c23, 0x00e0, 0x0000,
0x0000, 0x0080, 0x44a8, 0x4dc9, 0x0be1, 0x562a, 0x8f31, 0x0ba1, 0x1242, 0x0bc1,
0xbf97, 0xa6d4, 0x01a0, 0x0020, 0x0000, 0x00c0, 0x1362, 0x45c8, 0xaf55, 0xd7ba,
0xb796, 0x9f52, 0xdfdb, 0xb776, 0x1be3, 0x0100, 0x0040, 0x0000, 0x0000, 0x0000,
0x0040, 0x0961, 0x0360, 0x9f53, 0xeffd, 0x14c2, 0x0ac1, 0x0a21, 0x0120, 0x0080,
0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0181, 0x35a6, 0xd7da, 0xffff, 0x4e69,
0x1422, 0x03e0, 0x96f2, 0xffff, 0x3da7, 0x0a61, 0x0080, 0x0000, 0x0000, 0x0000,
0x0020, 0x0100, 0x0aa0, 0xb736, 0xffff, 0x2cc5, 0x0180, 0x00a0, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0040, 0x0980, 0x1322, 0x0ca1, 0x4ec9, 0xdfdb, 0x8f71,
0x1de3, 0x02e0, 0x0120, 0x0020, 0x0000, 0x0000, 0x0000, 0x0120, 0x4de9, 0xffff,
0xcfb9, 0x2e45, 0x0c61, 0x1362, 0x0ae1, 0x0240, 0x00c0, 0x0000, 0x0000, 0x0000,
0x0000, 0x0080, 0x13e2, 0x7f0e, 0xffff, 0xffff, 0xffff, 0xcfd9, 0xbfb7, 0xbf97,
0x4628, 0x1363, 0x0060, 0x0000, 0x0000, 0x00e0, 0x3547, 0xc798, 0xdfdb, 0x2e05,
0x1402, 0x13e2, 0x1382, 0x0ac1, 0x00a0, 0x0000, 0x0000, 0x0000, 0x0000, 0x00c0,
0x3506, 0x9752, 0xeffd, 0x4628, 0x1482, 0x0d21, 0x9f33, 0xffff, 0x2c85, 0x0140,
0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x00e0, 0x1362, 0xc7d8,
0x76ad, 0x0b41, 0x08e1, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x0120, 0x34c6,
0xffff, 0xa754, 0x1602, 0x1dc2, 0x36a6, 0xcfb9, 0x4687, 0x13e2, 0x00e0, 0x0000,
0x0000, 0x0000, 0x00a0, 0x0b21, 0x5ecb, 0xefdd, 0xe7dc, 0x2e04, 0x0ca1, 0x13e2,
0x09e1, 0x0080, 0x0000, 0x0000, 0x0020, 0x0140, 0x2d25, 0x7f0e, 0x56ca, 0x25a4,
0x1441, 0x0460, 0x5e8b, 0xcfd9, 0x1d03, 0x0280, 0x0160, 0x0040, 0x0000, 0x0000,
0x0000, 0x0000, 0x0100, 0x1342, 0x2da5, 0x9792, 0x4e09, 0x02c0, 0x0140, 0x0040,
0x0000, 0x0000, 0x0020, 0x0160, 0x8e91, 0x9f73, 0x2563, 0x13c2, 0x12c1, 0x0ac1,
0x4e09, 0xd7da, 0x6ecd, 0x3646, 0x35e6, 0x1282, 0x0000, 0x0000, 0x0080, 0x0921,
0x0220, 0xa6f4, 0xdffb, 0x1402, 0x0981, 0x0100, 0x0020, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x00c0, 0x4508, 0x8f31, 0x35e6, 0x1b83, 0x00e0,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x0100, 0x1422, 0xc7d8, 0x772e,
0x2ea5, 0x2e85, 0x8f51, 0xd7da, 0x56ca, 0x1462, 0x0961, 0x0040, 0x0000, 0x0000,
0x0020, 0x00c0, 0x0180, 0x4da9, 0xa774, 0x35e6, 0x1402, 0x0a21, 0x23a4, 0x1ae3,
0x00e0, 0x0000, 0x0000, 0x0060, 0x00e0, 0x2c05, 0xf7fe, 0x5669, 0x1561, 0x772e,
0x770d, 0x3625, 0x0b41, 0x0940, 0x0060, 0x0000, 0x0080, 0x0180, 0x4548, 0x666c,
0x1502, 0xaf35, 0xffff, 0x45e8, 0x35c6, 0x5e8b, 0x24e4, 0x1242, 0x0000, 0x0000,
0x0000, 0x0000, 0x0080, 0x0120, 0x09e1, 0x0ae1, 0x04e0, 0x9792, 0xffff, 0xe7dc,
0x5f0b, 0x1d03, 0x0961, 0x0020, 0x0000, 0x00e0, 0x2ce5, 0x5ecb, 0x66ec, 0x3666,
0x2e65, 0x9772, 0x4648, 0x0c01, 0x09a1, 0x0080, 0x0000, 0x0000, 0x0020, 0x0140,
0x8e90, 0xffff, 0x772e, 0x36a6, 0x3666, 0x3686, 0xbf97, 0xffff, 0x4e89, 0x13e2,
0x00a0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x0a21, 0x35a5,
0x2dc6, 0x1c63, 0x09c1, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040,
0x0160, 0x9e93, 0xffff, 0x1d43, 0x3e07, 0xbfb7, 0x3e68, 0x1c03, 0x00a0, 0x0000,
0x0000, 0x0000, 0x0100, 0x01c0, 0x0200, 0x7e70, 0xcfd9, 0x14e2, 0x13e2, 0x3526,
0xeffd, 0x8670, 0x0140, 0x0000, 0x0000, 0x0941, 0x2504, 0x9f73, 0xe7fc, 0xaf75,
0x4e49, 0x1502, 0x666c, 0xcfb8, 0x3546, 0x0a41, 0x00e0, 0x0000, 0x0000, 0x0000,
0x0000, 0x0060, 0x0981, 0x2c64, 0x4e49, 0x670c, 0x24e4, 0x0180, 0x0100, 0x0080,
0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0100, 0x1401, 0x86f0, 0xffff, 0x6ecc,
0x1462, 0x0cc2, 0x9f13, 0xffff, 0x2d05, 0x0160, 0x0040, 0x0000, 0x0000, 0x0000,
0x0020, 0x0120, 0x0ac1, 0xc778, 0xf7ff, 0x2c86, 0x01a0, 0x00c0, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0060, 0x1322, 0x1d23, 0x25e4, 0x2664, 0x36a6, 0xd7b9,
0xbf97, 0x4e69, 0x0ac1, 0x0060, 0x0000, 0x0000, 0x0000, 0x00c0, 0x1cc3, 0xaf75,
0xffff, 0x3e47, 0x14a1, 0x1522, 0x76cd, 0xb776, 0x0ac1, 0x0000, 0x0000, 0x0000,
0x0000, 0x0040, 0x09c1, 0x1462, 0x7f2f, 0xcf99, 0x9752, 0x15a2, 0x0c61, 0x0ba1,
0x0a81, 0x0120, 0x0000, 0x0000, 0x0000, 0x0080, 0x0ae1, 0x2dc5, 0xe7fc, 0x4e69,
0x0d01, 0x4668, 0x770e, 0x66ec, 0x0a21, 0x0000, 0x0000, 0x0000, 0x0000, 0x0060,
0x0240, 0x3586, 0xffff, 0x66ac, 0x1542, 0x2e45, 0xbf97, 0xf7fe, 0x1c03, 0x0080,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x0aa1, 0x76ce,
0x6eed, 0x35c6, 0x09e1, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x00e0, 0x3486,
0xffff, 0x9732, 0x0d21, 0x14c1, 0x66cc, 0xe7dc, 0x4e69, 0x13a2, 0x00a0, 0x0000,
0x0000, 0x0000, 0x0040, 0x09a1, 0x0be1, 0x76ed, 0xffff, 0x76ed, 0x1cc3, 0x0a01,
0x0100, 0x0060, 0x0000, 0x0000, 0x0060, 0x0280, 0xa734, 0xffff, 0x8f50, 0x3686,
0x3e67, 0x8730, 0xffff, 0xffff, 0xeffd, 0x9f33, 0x2d25, 0x09c1, 0x0000, 0x0000,
0x0000, 0x00a0, 0x02a0, 0x86b0, 0xcfd9, 0x3566, 0x12e2, 0x0120, 0x0060, 0x0000,
0x0000, 0x0000, 0x0000, 0x0080, 0x0a61, 0x1c43, 0x35e6, 0x3606, 0x2d64, 0x1322,
0x13c2, 0x25a4, 0xb7d6, 0x9712, 0x1342, 0x0100, 0x0000, 0x0000, 0x0000, 0x00c0,
0x01c0, 0xa6d4, 0xc7d8, 0x0bc1, 0x0961, 0x00a0, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x0a41, 0x2504, 0xc7f8, 0x4de9, 0x01c0,
0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x00c0, 0x2404, 0xeffd, 0x6f0d,
0x1e23, 0x2604, 0x1e23, 0x3646, 0x3e47, 0x2484, 0x00e0, 0x0000, 0x0000, 0x0000,
0x0000, 0x0020, 0x00c0, 0x0b01, 0x2564, 0x46a8, 0x2d45, 0x0a01, 0x0100, 0x0060,
0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x1b83, 0xc7f8, 0x560a, 0x1422, 0x1d83,
0x56aa, 0x7f0f, 0x0bc1, 0x0921, 0x0040, 0x0000, 0x0020, 0x00a0, 0x0a41, 0x0b41,
0x02c0, 0xa6d4, 0xeffd, 0x1463, 0x1362, 0x1bc3, 0x1282, 0x00e0, 0x0000, 0x0000,
0x0020, 0x0160, 0x1322, 0x1c43, 0x1cc3, 0x45c8, 0x7ecf, 0xb796, 0xffff, 0xefdc,
0x4e89, 0x1c23, 0x0120, 0x0020, 0x0000, 0x0060, 0x0a81, 0x3566, 0xffff, 0xb776,
0x2e45, 0x2584, 0x35e6, 0x4628, 0x1c23, 0x09c1, 0x0020, 0x0000, 0x0000, 0x00e0,
0x5e2c, 0xb7b6, 0x2da5, 0x24c4, 0x2444, 0x1482, 0x5e6b, 0xaf95, 0x3626, 0x1ba3,
0x0060, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x00c0, 0x1261,
0x2d25, 0x4668, 0x1be3, 0x0961, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020,
0x0120, 0x9ed3, 0xffff, 0x66ec, 0xa774, 0xffff, 0x5eaa, 0x13a2, 0x0080, 0x0000,
0x0000, 0x0000, 0x0000, 0x0060, 0x0120, 0x24c4, 0xa794, 0xe7fc, 0xe7fc, 0xf7fe,
0xbfb7, 0x1c83, 0x00e0, 0x0000, 0x0000, 0x00a0, 0x3526, 0x4ea9, 0x3565, 0x13e2,
0x0a81, 0x0a41, 0x0ac1, 0x13e2, 0x3546, 0x24c4, 0x0a81, 0x0080, 0x0000, 0x0000,
0x0000, 0x0000, 0x0080, 0x0180, 0x13a2, 0x5e8b, 0x4628, 0x1c43, 0x0a21, 0x00a0,
0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x0220, 0x3566, 0xffff, 0x9712,
0x0540, 0x2604, 0xc798, 0xeffd, 0x2404, 0x00c0, 0x0020, 0x0000, 0x0000, 0x0000,
0x0020, 0x0960, 0x1362, 0xc778, 0xffff, 0x3546, 0x0220, 0x00e0, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0040, 0x1b63, 0x35c6, 0x872f, 0x5eea, 0x7f0f, 0xf7fe,
0xffff, 0xc7d8, 0x1362, 0x0060, 0x0000, 0x0000, 0x0000, 0x0040, 0x12a1, 0x2d45,
0x9f73, 0x56aa, 0x2e05, 0x4e88, 0xaf74, 0xa754, 0x12c2, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x00a0, 0x12a2, 0x25c4, 0x46a7, 0x4668, 0x0b21, 0x09a1, 0x0921,
0x0060, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0141, 0x13a2, 0x4e49, 0x3e68,
0x3e47, 0x6f0d, 0x66cc, 0x4588, 0x0181, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020,
0x0100, 0x1b23, 0x5609, 0x4668, 0x3646, 0x4ec9, 0x5e8b, 0x34e6, 0x09e1, 0x0040,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0901, 0x1322,
0x45e8, 0x6f2d, 0x0a41, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x1ba3,
0xdffb, 0x7f0f, 0x1d82, 0x0ce1, 0x86f0, 0xeffd, 0x45a8, 0x0a61, 0x0040, 0x0000,
0x0000, 0x0000, 0x0000, 0x0080, 0x09a0, 0x3526, 0xb776, 0xcfb9, 0x35e6, 0x0b01,
0x0120, 0x0020, 0x0000, 0x0000, 0x0060, 0x0aa1, 0x9f32, 0xdfdb, 0x566a, 0x2dc5,
0x3e07, 0x9f13, 0xbf56, 0xb776, 0xbf98, 0xbf77, 0x3da7, 0x0a21, 0x0000, 0x0000,
0x0000, 0x09a1, 0x3566, 0xa754, 0x96d2, 0x0ae1, 0x00c0, 0x0060, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x0981, 0x1b63, 0x23e4, 0x1b83, 0x1242,
0x09e1, 0x12a1, 0x3ce7, 0x3446, 0x00e0, 0x0020, 0x0000, 0x0000, 0x0000, 0x0060,
0x0160, 0xa6b4, 0xffff, 0x24e5, 0x09e1, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x0a21, 0x3466, 0x3506, 0x2424,
0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x0b21, 0x9772, 0x6eec,
0x2664, 0x2e25, 0x2504, 0x1382, 0x12e2, 0x0980, 0x0020, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0020, 0x00c0, 0x12a2, 0x3e07, 0x35e6, 0x2423, 0x0941, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1302, 0xb7d6, 0x4da9, 0x0ac1, 0x0ae1,
0x4dc9, 0xb7d6, 0x1c63, 0x0980, 0x0080, 0x0000, 0x0000, 0x0000, 0x0080, 0x0921,
0x01a0, 0x8e71, 0xbfd7, 0x0b61, 0x0941, 0x0101, 0x0040, 0x0000, 0x0000, 0x0000,
0x0060, 0x0a61, 0x3586, 0x4ec9, 0x5ecb, 0xb7b6, 0xcfd9, 0x9752, 0xaf95, 0x8f51,
0x1d43, 0x0aa1, 0x00c0, 0x0000, 0x0000, 0x0000, 0x0060, 0x2363, 0xc7b8, 0xa734,
0x2524, 0x0aa1, 0x2c85, 0x5ecb, 0x2d45, 0x12c2, 0x0020, 0x0000, 0x0000, 0x0040,
0x2c65, 0x45a7, 0x0b01, 0x0a01, 0x09a1, 0x09c1, 0x13e2, 0x3586, 0x24c4, 0x12a2,
0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0060,
0x1b23, 0x3546, 0x1ba3, 0x09a1, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0080, 0x44e8, 0x8ef1, 0x3d47, 0x562a, 0x76ce, 0x2d66, 0x0aa1, 0x0020, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x1242, 0x2464, 0x5deb, 0x664c, 0x6e4c,
0x3466, 0x0a21, 0x0020, 0x0000, 0x0000, 0x0000, 0x1262, 0x1b43, 0x0a01, 0x0921,
0x00a0, 0x00c0, 0x0080, 0x0100, 0x1262, 0x1262, 0x0120, 0x0020, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0060, 0x0961, 0x0b21, 0x45e8, 0x7f4f, 0x1be3, 0x0140,
0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x00e0, 0x1343, 0x66ac, 0x772e,
0x9772, 0xc7d8, 0x76ee, 0x3d07, 0x0200, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0a01, 0x2584, 0xb7b6, 0xffff, 0x56a9, 0x1422, 0x0981, 0x0020, 0x0000,
0x0000, 0x0000, 0x0000, 0x0020, 0x0a21, 0x2d25, 0xe7fc, 0xf7fe, 0xffff, 0xffff,
0xffff, 0xaf95, 0x0b21, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x00a0, 0x0aa1,
0x2564, 0x4668, 0x5eeb, 0x8730, 0x4e69, 0x1462, 0x0160, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0020, 0x09c1, 0x2424, 0x2d25, 0x24c4, 0x09c1, 0x0080, 0x0040,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x0160, 0x0b81, 0x2d65,
0x4628, 0x4e29, 0x2ce5, 0x0a81, 0x0080, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0040, 0x0100, 0x0ae1, 0x24a4, 0x3586, 0x3587, 0x13a2, 0x0120, 0x0060, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0080,
0x2364, 0x3ce7, 0x0941, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x01a0,
0x2c85, 0x35e6, 0x3666, 0x3647, 0x3e47, 0x3dc7, 0x12e1, 0x00c0, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x09a1, 0x0b61, 0x3606, 0x4688, 0x35a6,
0x09c1, 0x0000, 0x0000, 0x0000, 0x0040, 0x0980, 0x2ce5, 0x3586, 0x0b01, 0x0a41,
0x0a41, 0x0240, 0x0ac0, 0x0b01, 0x0b01, 0x0b21, 0x1302, 0x0100, 0x0000, 0x0000,
0x0000, 0x0a01, 0x6eed, 0x3d67, 0x0280, 0x0120, 0x0040, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0040, 0x0080, 0x00a0, 0x0120,
0x00c0, 0x00a0, 0x0080, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x00c0, 0x6dad, 0xbf77, 0x2cc5, 0x0a01, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x00c0, 0x1282, 0x1ba4,
0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x09e1, 0x2524, 0x9732,
0xd7ba, 0x4688, 0x1c63, 0x0961, 0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x00c0, 0x1b02, 0x2c65, 0x2c85, 0x09c1, 0x0020,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0a41, 0x7f2f, 0x34a6, 0x0160, 0x0960,
0x1b23, 0x34c6, 0x2d25, 0x1b83, 0x00e0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040,
0x00a0, 0x3506, 0x8f31, 0x0a41, 0x0080, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000,
0x0040, 0x0140, 0x2464, 0x35e6, 0x35c6, 0x1d04, 0x1c43, 0x13e1, 0x0bc1, 0x1442,
0x35c6, 0x1c03, 0x0080, 0x0000, 0x0000, 0x0000, 0x0000, 0x00c0, 0x0aa1, 0x1262,
0x09a1, 0x0120, 0x0160, 0x0a21, 0x12e2, 0x09e1, 0x0020, 0x0000, 0x0000, 0x0000,
0x00a0, 0x0140, 0x00c0, 0x00c0, 0x00c0, 0x00e0, 0x0141, 0x09a1, 0x0981, 0x00a0,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0060, 0x0100, 0x0961, 0x00e0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0100, 0x01e0, 0x0160, 0x0aa1, 0x13c2, 0x12a2, 0x0120, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0100, 0x0200, 0x0a41, 0x0a00,
0x00e0, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0020,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x0101, 0x1282, 0x2404, 0x2484, 0x1302,
0x0020, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x0120, 0x0b41, 0x24a4,
0x5569, 0x4d89, 0x1c03, 0x01a0, 0x0080, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x00c0, 0x1302, 0x3466, 0x3ce7, 0x23c4, 0x1222, 0x00a0, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x00a0, 0x0a61, 0x2c25, 0x3cc7, 0x34e6, 0x3ce7,
0x3ca6, 0x23c4, 0x0140, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x00a0,
0x0a01, 0x12e2, 0x1362, 0x1362, 0x12a2, 0x0961, 0x0060, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0040, 0x0100, 0x0961, 0x0100, 0x0040, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x00e0, 0x0180,
0x0a21, 0x0a01, 0x0961, 0x0080, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0020, 0x00a0, 0x0120, 0x0160, 0x0160, 0x00e0, 0x0040, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0020, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040,
0x0100, 0x12e2, 0x2404, 0x23e4, 0x1b63, 0x0a41, 0x00a0, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0040, 0x00e1, 0x12a2, 0x2364, 0x12c2,
0x00a0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x09a1, 0x09e1, 0x00e0, 0x00c0,
0x00a0, 0x00a0, 0x00a0, 0x00a0, 0x00a0, 0x00c0, 0x00a0, 0x0020, 0x0000, 0x0000,
0x0000, 0x0060, 0x0a21, 0x0981, 0x00a0, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0160, 0x0220, 0x0140, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0020,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0080, 0x01e0, 0x7e4f,
0xdffb, 0x45e8, 0x1342, 0x00e0, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x09c1, 0x1b23, 0x0981, 0x0020,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0901, 0x2464, 0x12a2, 0x0080, 0x00a0,
0x0080, 0x0140, 0x23e4, 0x1ba3, 0x00e0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x12a2, 0x1be3, 0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0060, 0x0a21, 0x1342, 0x1362, 0x0a21, 0x0140, 0x0921, 0x0921, 0x0961,
0x1b43, 0x1262, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0020, 0x0040, 0x0020, 0x0000, 0x00e0, 0x0080, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0040, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0040, 0x0040, 0x00a0, 0x00e0, 0x0080, 0x0020, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x0060, 0x0040,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x00c0, 0x12c2, 0x0a21,
0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x00a0, 0x0100,
0x0140, 0x0120, 0x00e0, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0040, 0x0080, 0x0080, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0060, 0x00e0, 0x0100, 0x0100,
0x00a0, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0040, 0x0080, 0x0080, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0020, 0x00a0, 0x0080, 0x0080, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x00a0, 0x1262,
0x1b83, 0x0a21, 0x0100, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x0060, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0060, 0x0040, 0x0000, 0x0000,
0x0000, 0x0020, 0x00a0, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0040, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0040, 0x0060, 0x0060, 0x0080, 0x0040, 0x0040, 0x0020,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000,
};
const struct bitmap bm_matrix_bold = {
.width = BMPWIDTH_matrix_bold,
.height = BMPHEIGHT_matrix_bold,
.format = FORMAT_NATIVE,
.data = (unsigned char*)matrix_bold,
};
| gpl-2.0 |
AsmodeosNetworkCO/AsmodeosCORE4.3.4 | src/server/worldserver/Main.cpp | 1 | 4929 | /*
* Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/>
* Copyright (C) 2005-2009 MaNGOS <http://getmangos.com/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/// \addtogroup Trinityd Trinity Daemon
/// @{
/// \file
#include <openssl/opensslv.h>
#include <openssl/crypto.h>
#include <ace/Version.h>
#include "Common.h"
#include "Database/DatabaseEnv.h"
#include "Configuration/Config.h"
#include "Log.h"
#include "Master.h"
#ifndef _TRINITY_CORE_CONFIG
# define _TRINITY_CORE_CONFIG "worldserver.conf"
#endif //_TRINITY_CORE_CONFIG
#ifdef _WIN32
#include "ServiceWin32.h"
char serviceName[] = "worldserver";
char serviceLongName[] = "AsmodeosCore world service";
char serviceDescription[] = "AsmodeosCore World of Warcraft emulator world service";
/*
* -1 - not in service mode
* 0 - stopped
* 1 - running
* 2 - paused
*/
int m_ServiceStatus = -1;
#endif
WorldDatabaseWorkerPool WorldDatabase; ///< Accessor to the world database
CharacterDatabaseWorkerPool CharacterDatabase; ///< Accessor to the character database
LoginDatabaseWorkerPool LoginDatabase; ///< Accessor to the realm/login database
uint32 realmID; ///< Id of the realm
/// Print out the usage string for this program on the console.
void usage(const char *prog)
{
sLog->outInfo(LOG_FILTER_WORLDSERVER, "Usage: \n %s [<options>]\n"
" -c config_file use config_file as configuration file\n\r"
#ifdef _WIN32
" Running as service functions:\n\r"
" --service run as service\n\r"
" -s install install service\n\r"
" -s uninstall uninstall service\n\r"
#endif
, prog);
}
/// Launch the Trinity server
extern int main(int argc, char **argv)
{
///- Command line parsing to get the configuration file name
char const* cfg_file = _TRINITY_CORE_CONFIG;
int c = 1;
while ( c < argc )
{
if (strcmp(argv[c], "-c") == 0)
{
if (++c >= argc)
{
printf("Runtime-Error: -c option requires an input argument");
usage(argv[0]);
return 1;
}
else
cfg_file = argv[c];
}
#ifdef _WIN32
////////////
//Services//
////////////
if (strcmp(argv[c], "-s") == 0)
{
if (++c >= argc)
{
printf("Runtime-Error: -s option requires an input argument");
usage(argv[0]);
return 1;
}
if (strcmp(argv[c], "install") == 0)
{
if (WinServiceInstall())
printf("Installing service\n");
return 1;
}
else if (strcmp(argv[c], "uninstall") == 0)
{
if (WinServiceUninstall())
printf("Uninstalling service\n");
return 1;
}
else
{
printf("Runtime-Error: unsupported option %s", argv[c]);
usage(argv[0]);
return 1;
}
}
if (strcmp(argv[c], "--service") == 0)
{
WinServiceRun();
}
////
#endif
++c;
}
if (!ConfigMgr::Load(cfg_file))
{
printf("Invalid or missing configuration file : %s\n", cfg_file);
printf("Verify that the file exists and has \'[worldserver]' written in the top of the file!\n");
return 1;
}
sLog->outInfo(LOG_FILTER_WORLDSERVER, "Using configuration file %s.", cfg_file);
sLog->outInfo(LOG_FILTER_WORLDSERVER, "Using SSL version: %s (library: %s)", OPENSSL_VERSION_TEXT, SSLeay_version(SSLEAY_VERSION));
sLog->outInfo(LOG_FILTER_WORLDSERVER, "Using ACE version: %s", ACE_VERSION);
///- and run the 'Master'
/// \todo Why do we need this 'Master'? Can't all of this be in the Main as for Realmd?
int ret = sMaster->Run();
// at sMaster return function exist with codes
// 0 - normal shutdown
// 1 - shutdown at error
// 2 - restart command used, this code can be used by restarter for restart Trinityd
return ret;
}
/// @}
| gpl-2.0 |
wangxingchao/spi-omap | sound/soc/soc-core.c | 1 | 88477 | /*
* soc-core.c -- ALSA SoC Audio Layer
*
* Copyright 2005 Wolfson Microelectronics PLC.
* Copyright 2005 Openedhand Ltd.
* Copyright (C) 2010 Slimlogic Ltd.
* Copyright (C) 2010 Texas Instruments Inc.
*
* Author: Liam Girdwood <lrg@slimlogic.co.uk>
* with code, comments and ideas from :-
* Richard Purdie <richard@openedhand.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* TODO:
* o Add hw rules to enforce rates, etc.
* o More testing with other codecs/machines.
* o Add more codecs and platforms to ensure good API coverage.
* o Support TDM on PCM and I2S
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <sound/ac97_codec.h>
#include <sound/core.h>
#include <sound/jack.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/initval.h>
#define CREATE_TRACE_POINTS
#include <trace/events/asoc.h>
#define NAME_SIZE 32
static DECLARE_WAIT_QUEUE_HEAD(soc_pm_waitq);
#ifdef CONFIG_DEBUG_FS
struct dentry *snd_soc_debugfs_root;
EXPORT_SYMBOL_GPL(snd_soc_debugfs_root);
#endif
static DEFINE_MUTEX(client_mutex);
static LIST_HEAD(card_list);
static LIST_HEAD(dai_list);
static LIST_HEAD(platform_list);
static LIST_HEAD(codec_list);
int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num);
/*
* This is a timeout to do a DAPM powerdown after a stream is closed().
* It can be used to eliminate pops between different playback streams, e.g.
* between two audio tracks.
*/
static int pmdown_time = 5000;
module_param(pmdown_time, int, 0);
MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)");
/* returns the minimum number of bytes needed to represent
* a particular given value */
static int min_bytes_needed(unsigned long val)
{
int c = 0;
int i;
for (i = (sizeof val * 8) - 1; i >= 0; --i, ++c)
if (val & (1UL << i))
break;
c = (sizeof val * 8) - c;
if (!c || (c % 8))
c = (c + 8) / 8;
else
c /= 8;
return c;
}
/* fill buf which is 'len' bytes with a formatted
* string of the form 'reg: value\n' */
static int format_register_str(struct snd_soc_codec *codec,
unsigned int reg, char *buf, size_t len)
{
int wordsize = min_bytes_needed(codec->driver->reg_cache_size) * 2;
int regsize = codec->driver->reg_word_size * 2;
int ret;
char tmpbuf[len + 1];
char regbuf[regsize + 1];
/* since tmpbuf is allocated on the stack, warn the callers if they
* try to abuse this function */
WARN_ON(len > 63);
/* +2 for ': ' and + 1 for '\n' */
if (wordsize + regsize + 2 + 1 != len)
return -EINVAL;
ret = snd_soc_read(codec , reg);
if (ret < 0) {
memset(regbuf, 'X', regsize);
regbuf[regsize] = '\0';
} else {
snprintf(regbuf, regsize + 1, "%.*x", regsize, ret);
}
/* prepare the buffer */
snprintf(tmpbuf, len + 1, "%.*x: %s\n", wordsize, reg, regbuf);
/* copy it back to the caller without the '\0' */
memcpy(buf, tmpbuf, len);
return 0;
}
/* codec register dump */
static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf,
size_t count, loff_t pos)
{
int i, step = 1;
int wordsize, regsize;
int len;
size_t total = 0;
loff_t p = 0;
wordsize = min_bytes_needed(codec->driver->reg_cache_size) * 2;
regsize = codec->driver->reg_word_size * 2;
len = wordsize + regsize + 2 + 1;
if (!codec->driver->reg_cache_size)
return 0;
if (codec->driver->reg_cache_step)
step = codec->driver->reg_cache_step;
for (i = 0; i < codec->driver->reg_cache_size; i += step) {
if (codec->readable_register && !codec->readable_register(codec, i))
continue;
if (codec->driver->display_register) {
count += codec->driver->display_register(codec, buf + count,
PAGE_SIZE - count, i);
} else {
/* only support larger than PAGE_SIZE bytes debugfs
* entries for the default case */
if (p >= pos) {
if (total + len >= count - 1)
break;
format_register_str(codec, i, buf + total, len);
total += len;
}
p += len;
}
}
total = min(total, count - 1);
return total;
}
static ssize_t codec_reg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct snd_soc_pcm_runtime *rtd =
container_of(dev, struct snd_soc_pcm_runtime, dev);
return soc_codec_reg_show(rtd->codec, buf, PAGE_SIZE, 0);
}
static DEVICE_ATTR(codec_reg, 0444, codec_reg_show, NULL);
static ssize_t pmdown_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct snd_soc_pcm_runtime *rtd =
container_of(dev, struct snd_soc_pcm_runtime, dev);
return sprintf(buf, "%ld\n", rtd->pmdown_time);
}
static ssize_t pmdown_time_set(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct snd_soc_pcm_runtime *rtd =
container_of(dev, struct snd_soc_pcm_runtime, dev);
int ret;
ret = strict_strtol(buf, 10, &rtd->pmdown_time);
if (ret)
return ret;
return count;
}
static DEVICE_ATTR(pmdown_time, 0644, pmdown_time_show, pmdown_time_set);
#ifdef CONFIG_DEBUG_FS
static int codec_reg_open_file(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static ssize_t codec_reg_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
ssize_t ret;
struct snd_soc_codec *codec = file->private_data;
char *buf;
if (*ppos < 0 || !count)
return -EINVAL;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = soc_codec_reg_show(codec, buf, count, *ppos);
if (ret >= 0) {
if (copy_to_user(user_buf, buf, ret)) {
kfree(buf);
return -EFAULT;
}
*ppos += ret;
}
kfree(buf);
return ret;
}
static ssize_t codec_reg_write_file(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
char buf[32];
size_t buf_size;
char *start = buf;
unsigned long reg, value;
int step = 1;
struct snd_soc_codec *codec = file->private_data;
buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
buf[buf_size] = 0;
if (codec->driver->reg_cache_step)
step = codec->driver->reg_cache_step;
while (*start == ' ')
start++;
reg = simple_strtoul(start, &start, 16);
while (*start == ' ')
start++;
if (strict_strtoul(start, 16, &value))
return -EINVAL;
/* Userspace has been fiddling around behind the kernel's back */
add_taint(TAINT_USER);
snd_soc_write(codec, reg, value);
return buf_size;
}
static const struct file_operations codec_reg_fops = {
.open = codec_reg_open_file,
.read = codec_reg_read_file,
.write = codec_reg_write_file,
.llseek = default_llseek,
};
static void soc_init_codec_debugfs(struct snd_soc_codec *codec)
{
struct dentry *debugfs_card_root = codec->card->debugfs_card_root;
codec->debugfs_codec_root = debugfs_create_dir(codec->name,
debugfs_card_root);
if (!codec->debugfs_codec_root) {
printk(KERN_WARNING
"ASoC: Failed to create codec debugfs directory\n");
return;
}
debugfs_create_bool("cache_sync", 0444, codec->debugfs_codec_root,
&codec->cache_sync);
debugfs_create_bool("cache_only", 0444, codec->debugfs_codec_root,
&codec->cache_only);
codec->debugfs_reg = debugfs_create_file("codec_reg", 0644,
codec->debugfs_codec_root,
codec, &codec_reg_fops);
if (!codec->debugfs_reg)
printk(KERN_WARNING
"ASoC: Failed to create codec register debugfs file\n");
snd_soc_dapm_debugfs_init(&codec->dapm, codec->debugfs_codec_root);
}
static void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec)
{
debugfs_remove_recursive(codec->debugfs_codec_root);
}
static ssize_t codec_list_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
ssize_t len, ret = 0;
struct snd_soc_codec *codec;
if (!buf)
return -ENOMEM;
list_for_each_entry(codec, &codec_list, list) {
len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
codec->name);
if (len >= 0)
ret += len;
if (ret > PAGE_SIZE) {
ret = PAGE_SIZE;
break;
}
}
if (ret >= 0)
ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
kfree(buf);
return ret;
}
static const struct file_operations codec_list_fops = {
.read = codec_list_read_file,
.llseek = default_llseek,/* read accesses f_pos */
};
static ssize_t dai_list_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
ssize_t len, ret = 0;
struct snd_soc_dai *dai;
if (!buf)
return -ENOMEM;
list_for_each_entry(dai, &dai_list, list) {
len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", dai->name);
if (len >= 0)
ret += len;
if (ret > PAGE_SIZE) {
ret = PAGE_SIZE;
break;
}
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
kfree(buf);
return ret;
}
static const struct file_operations dai_list_fops = {
.read = dai_list_read_file,
.llseek = default_llseek,/* read accesses f_pos */
};
static ssize_t platform_list_read_file(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
ssize_t len, ret = 0;
struct snd_soc_platform *platform;
if (!buf)
return -ENOMEM;
list_for_each_entry(platform, &platform_list, list) {
len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
platform->name);
if (len >= 0)
ret += len;
if (ret > PAGE_SIZE) {
ret = PAGE_SIZE;
break;
}
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
kfree(buf);
return ret;
}
static const struct file_operations platform_list_fops = {
.read = platform_list_read_file,
.llseek = default_llseek,/* read accesses f_pos */
};
static void soc_init_card_debugfs(struct snd_soc_card *card)
{
card->debugfs_card_root = debugfs_create_dir(card->name,
snd_soc_debugfs_root);
if (!card->debugfs_card_root) {
dev_warn(card->dev,
"ASoC: Failed to create codec debugfs directory\n");
return;
}
card->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644,
card->debugfs_card_root,
&card->pop_time);
if (!card->debugfs_pop_time)
dev_warn(card->dev,
"Failed to create pop time debugfs file\n");
}
static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
{
debugfs_remove_recursive(card->debugfs_card_root);
}
#else
static inline void soc_init_codec_debugfs(struct snd_soc_codec *codec)
{
}
static inline void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec)
{
}
static inline void soc_init_card_debugfs(struct snd_soc_card *card)
{
}
static inline void soc_cleanup_card_debugfs(struct snd_soc_card *card)
{
}
#endif
#ifdef CONFIG_SND_SOC_AC97_BUS
/* unregister ac97 codec */
static int soc_ac97_dev_unregister(struct snd_soc_codec *codec)
{
if (codec->ac97->dev.bus)
device_unregister(&codec->ac97->dev);
return 0;
}
/* stop no dev release warning */
static void soc_ac97_device_release(struct device *dev){}
/* register ac97 codec to bus */
static int soc_ac97_dev_register(struct snd_soc_codec *codec)
{
int err;
codec->ac97->dev.bus = &ac97_bus_type;
codec->ac97->dev.parent = codec->card->dev;
codec->ac97->dev.release = soc_ac97_device_release;
dev_set_name(&codec->ac97->dev, "%d-%d:%s",
codec->card->snd_card->number, 0, codec->name);
err = device_register(&codec->ac97->dev);
if (err < 0) {
snd_printk(KERN_ERR "Can't register ac97 bus\n");
codec->ac97->dev.bus = NULL;
return err;
}
return 0;
}
#endif
#ifdef CONFIG_PM_SLEEP
/* powers down audio subsystem for suspend */
int snd_soc_suspend(struct device *dev)
{
struct snd_soc_card *card = dev_get_drvdata(dev);
struct snd_soc_codec *codec;
int i;
/* If the initialization of this soc device failed, there is no codec
* associated with it. Just bail out in this case.
*/
if (list_empty(&card->codec_dev_list))
return 0;
/* Due to the resume being scheduled into a workqueue we could
* suspend before that's finished - wait for it to complete.
*/
snd_power_lock(card->snd_card);
snd_power_wait(card->snd_card, SNDRV_CTL_POWER_D0);
snd_power_unlock(card->snd_card);
/* we're going to block userspace touching us until resume completes */
snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D3hot);
/* mute any active DACs */
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai *dai = card->rtd[i].codec_dai;
struct snd_soc_dai_driver *drv = dai->driver;
if (card->rtd[i].dai_link->ignore_suspend)
continue;
if (drv->ops->digital_mute && dai->playback_active)
drv->ops->digital_mute(dai, 1);
}
/* suspend all pcms */
for (i = 0; i < card->num_rtd; i++) {
if (card->rtd[i].dai_link->ignore_suspend)
continue;
snd_pcm_suspend_all(card->rtd[i].pcm);
}
if (card->suspend_pre)
card->suspend_pre(card);
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
struct snd_soc_platform *platform = card->rtd[i].platform;
if (card->rtd[i].dai_link->ignore_suspend)
continue;
if (cpu_dai->driver->suspend && !cpu_dai->driver->ac97_control)
cpu_dai->driver->suspend(cpu_dai);
if (platform->driver->suspend && !platform->suspended) {
platform->driver->suspend(cpu_dai);
platform->suspended = 1;
}
}
/* close any waiting streams and save state */
for (i = 0; i < card->num_rtd; i++) {
flush_delayed_work_sync(&card->rtd[i].delayed_work);
card->rtd[i].codec->dapm.suspend_bias_level = card->rtd[i].codec->dapm.bias_level;
}
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai_driver *driver = card->rtd[i].codec_dai->driver;
if (card->rtd[i].dai_link->ignore_suspend)
continue;
if (driver->playback.stream_name != NULL)
snd_soc_dapm_stream_event(&card->rtd[i], driver->playback.stream_name,
SND_SOC_DAPM_STREAM_SUSPEND);
if (driver->capture.stream_name != NULL)
snd_soc_dapm_stream_event(&card->rtd[i], driver->capture.stream_name,
SND_SOC_DAPM_STREAM_SUSPEND);
}
/* suspend all CODECs */
list_for_each_entry(codec, &card->codec_dev_list, card_list) {
/* If there are paths active then the CODEC will be held with
* bias _ON and should not be suspended. */
if (!codec->suspended && codec->driver->suspend) {
switch (codec->dapm.bias_level) {
case SND_SOC_BIAS_STANDBY:
case SND_SOC_BIAS_OFF:
codec->driver->suspend(codec, PMSG_SUSPEND);
codec->suspended = 1;
codec->cache_sync = 1;
break;
default:
dev_dbg(codec->dev, "CODEC is on over suspend\n");
break;
}
}
}
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
if (card->rtd[i].dai_link->ignore_suspend)
continue;
if (cpu_dai->driver->suspend && cpu_dai->driver->ac97_control)
cpu_dai->driver->suspend(cpu_dai);
}
if (card->suspend_post)
card->suspend_post(card);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_suspend);
/* deferred resume work, so resume can complete before we finished
* setting our codec back up, which can be very slow on I2C
*/
static void soc_resume_deferred(struct work_struct *work)
{
struct snd_soc_card *card =
container_of(work, struct snd_soc_card, deferred_resume_work);
struct snd_soc_codec *codec;
int i;
/* our power state is still SNDRV_CTL_POWER_D3hot from suspend time,
* so userspace apps are blocked from touching us
*/
dev_dbg(card->dev, "starting resume work\n");
/* Bring us up into D2 so that DAPM starts enabling things */
snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D2);
if (card->resume_pre)
card->resume_pre(card);
/* resume AC97 DAIs */
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
if (card->rtd[i].dai_link->ignore_suspend)
continue;
if (cpu_dai->driver->resume && cpu_dai->driver->ac97_control)
cpu_dai->driver->resume(cpu_dai);
}
list_for_each_entry(codec, &card->codec_dev_list, card_list) {
/* If the CODEC was idle over suspend then it will have been
* left with bias OFF or STANDBY and suspended so we must now
* resume. Otherwise the suspend was suppressed.
*/
if (codec->driver->resume && codec->suspended) {
switch (codec->dapm.bias_level) {
case SND_SOC_BIAS_STANDBY:
case SND_SOC_BIAS_OFF:
codec->driver->resume(codec);
codec->suspended = 0;
break;
default:
dev_dbg(codec->dev, "CODEC was on over suspend\n");
break;
}
}
}
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai_driver *driver = card->rtd[i].codec_dai->driver;
if (card->rtd[i].dai_link->ignore_suspend)
continue;
if (driver->playback.stream_name != NULL)
snd_soc_dapm_stream_event(&card->rtd[i], driver->playback.stream_name,
SND_SOC_DAPM_STREAM_RESUME);
if (driver->capture.stream_name != NULL)
snd_soc_dapm_stream_event(&card->rtd[i], driver->capture.stream_name,
SND_SOC_DAPM_STREAM_RESUME);
}
/* unmute any active DACs */
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai *dai = card->rtd[i].codec_dai;
struct snd_soc_dai_driver *drv = dai->driver;
if (card->rtd[i].dai_link->ignore_suspend)
continue;
if (drv->ops->digital_mute && dai->playback_active)
drv->ops->digital_mute(dai, 0);
}
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
struct snd_soc_platform *platform = card->rtd[i].platform;
if (card->rtd[i].dai_link->ignore_suspend)
continue;
if (cpu_dai->driver->resume && !cpu_dai->driver->ac97_control)
cpu_dai->driver->resume(cpu_dai);
if (platform->driver->resume && platform->suspended) {
platform->driver->resume(cpu_dai);
platform->suspended = 0;
}
}
if (card->resume_post)
card->resume_post(card);
dev_dbg(card->dev, "resume work completed\n");
/* userspace can access us now we are back as we were before */
snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0);
}
/* powers up audio subsystem after a suspend */
int snd_soc_resume(struct device *dev)
{
struct snd_soc_card *card = dev_get_drvdata(dev);
int i, ac97_control = 0;
/* AC97 devices might have other drivers hanging off them so
* need to resume immediately. Other drivers don't have that
* problem and may take a substantial amount of time to resume
* due to I/O costs and anti-pop so handle them out of line.
*/
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
ac97_control |= cpu_dai->driver->ac97_control;
}
if (ac97_control) {
dev_dbg(dev, "Resuming AC97 immediately\n");
soc_resume_deferred(&card->deferred_resume_work);
} else {
dev_dbg(dev, "Scheduling resume work\n");
if (!schedule_work(&card->deferred_resume_work))
dev_err(dev, "resume work item may be lost\n");
}
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_resume);
#else
#define snd_soc_suspend NULL
#define snd_soc_resume NULL
#endif
static struct snd_soc_dai_ops null_dai_ops = {
};
static int soc_bind_dai_link(struct snd_soc_card *card, int num)
{
struct snd_soc_dai_link *dai_link = &card->dai_link[num];
struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
struct snd_soc_codec *codec;
struct snd_soc_platform *platform;
struct snd_soc_dai *codec_dai, *cpu_dai;
const char *platform_name;
if (rtd->complete)
return 1;
dev_dbg(card->dev, "binding %s at idx %d\n", dai_link->name, num);
/* do we already have the CPU DAI for this link ? */
if (rtd->cpu_dai) {
goto find_codec;
}
/* no, then find CPU DAI from registered DAIs*/
list_for_each_entry(cpu_dai, &dai_list, list) {
if (!strcmp(cpu_dai->name, dai_link->cpu_dai_name)) {
rtd->cpu_dai = cpu_dai;
goto find_codec;
}
}
dev_dbg(card->dev, "CPU DAI %s not registered\n",
dai_link->cpu_dai_name);
find_codec:
/* do we already have the CODEC for this link ? */
if (rtd->codec) {
goto find_platform;
}
/* no, then find CODEC from registered CODECs*/
list_for_each_entry(codec, &codec_list, list) {
if (!strcmp(codec->name, dai_link->codec_name)) {
rtd->codec = codec;
/* CODEC found, so find CODEC DAI from registered DAIs from this CODEC*/
list_for_each_entry(codec_dai, &dai_list, list) {
if (codec->dev == codec_dai->dev &&
!strcmp(codec_dai->name, dai_link->codec_dai_name)) {
rtd->codec_dai = codec_dai;
goto find_platform;
}
}
dev_dbg(card->dev, "CODEC DAI %s not registered\n",
dai_link->codec_dai_name);
goto find_platform;
}
}
dev_dbg(card->dev, "CODEC %s not registered\n",
dai_link->codec_name);
find_platform:
/* do we need a platform? */
if (rtd->platform)
goto out;
/* if there's no platform we match on the empty platform */
platform_name = dai_link->platform_name;
if (!platform_name)
platform_name = "snd-soc-dummy";
/* no, then find one from the set of registered platforms */
list_for_each_entry(platform, &platform_list, list) {
if (!strcmp(platform->name, platform_name)) {
rtd->platform = platform;
goto out;
}
}
dev_dbg(card->dev, "platform %s not registered\n",
dai_link->platform_name);
return 0;
out:
/* mark rtd as complete if we found all 4 of our client devices */
if (rtd->codec && rtd->codec_dai && rtd->platform && rtd->cpu_dai) {
rtd->complete = 1;
card->num_rtd++;
}
return 1;
}
static void soc_remove_codec(struct snd_soc_codec *codec)
{
int err;
if (codec->driver->remove) {
err = codec->driver->remove(codec);
if (err < 0)
dev_err(codec->dev,
"asoc: failed to remove %s: %d\n",
codec->name, err);
}
/* Make sure all DAPM widgets are freed */
snd_soc_dapm_free(&codec->dapm);
soc_cleanup_codec_debugfs(codec);
codec->probed = 0;
list_del(&codec->card_list);
module_put(codec->dev->driver->owner);
}
static void soc_remove_dai_link(struct snd_soc_card *card, int num, int order)
{
struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_platform *platform = rtd->platform;
struct snd_soc_dai *codec_dai = rtd->codec_dai, *cpu_dai = rtd->cpu_dai;
int err;
/* unregister the rtd device */
if (rtd->dev_registered) {
device_remove_file(&rtd->dev, &dev_attr_pmdown_time);
device_remove_file(&rtd->dev, &dev_attr_codec_reg);
device_unregister(&rtd->dev);
rtd->dev_registered = 0;
}
/* remove the CODEC DAI */
if (codec_dai && codec_dai->probed &&
codec_dai->driver->remove_order == order) {
if (codec_dai->driver->remove) {
err = codec_dai->driver->remove(codec_dai);
if (err < 0)
printk(KERN_ERR "asoc: failed to remove %s\n", codec_dai->name);
}
codec_dai->probed = 0;
list_del(&codec_dai->card_list);
}
/* remove the platform */
if (platform && platform->probed &&
platform->driver->remove_order == order) {
if (platform->driver->remove) {
err = platform->driver->remove(platform);
if (err < 0)
printk(KERN_ERR "asoc: failed to remove %s\n", platform->name);
}
platform->probed = 0;
list_del(&platform->card_list);
module_put(platform->dev->driver->owner);
}
/* remove the CODEC */
if (codec && codec->probed &&
codec->driver->remove_order == order)
soc_remove_codec(codec);
/* remove the cpu_dai */
if (cpu_dai && cpu_dai->probed &&
cpu_dai->driver->remove_order == order) {
if (cpu_dai->driver->remove) {
err = cpu_dai->driver->remove(cpu_dai);
if (err < 0)
printk(KERN_ERR "asoc: failed to remove %s\n", cpu_dai->name);
}
cpu_dai->probed = 0;
list_del(&cpu_dai->card_list);
module_put(cpu_dai->dev->driver->owner);
}
}
static void soc_remove_dai_links(struct snd_soc_card *card)
{
int dai, order;
for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
order++) {
for (dai = 0; dai < card->num_rtd; dai++)
soc_remove_dai_link(card, dai, order);
}
card->num_rtd = 0;
}
static void soc_set_name_prefix(struct snd_soc_card *card,
struct snd_soc_codec *codec)
{
int i;
if (card->codec_conf == NULL)
return;
for (i = 0; i < card->num_configs; i++) {
struct snd_soc_codec_conf *map = &card->codec_conf[i];
if (map->dev_name && !strcmp(codec->name, map->dev_name)) {
codec->name_prefix = map->name_prefix;
break;
}
}
}
static int soc_probe_codec(struct snd_soc_card *card,
struct snd_soc_codec *codec)
{
int ret = 0;
const struct snd_soc_codec_driver *driver = codec->driver;
codec->card = card;
codec->dapm.card = card;
soc_set_name_prefix(card, codec);
if (!try_module_get(codec->dev->driver->owner))
return -ENODEV;
soc_init_codec_debugfs(codec);
if (driver->dapm_widgets)
snd_soc_dapm_new_controls(&codec->dapm, driver->dapm_widgets,
driver->num_dapm_widgets);
if (driver->probe) {
ret = driver->probe(codec);
if (ret < 0) {
dev_err(codec->dev,
"asoc: failed to probe CODEC %s: %d\n",
codec->name, ret);
goto err_probe;
}
}
if (driver->controls)
snd_soc_add_controls(codec, driver->controls,
driver->num_controls);
if (driver->dapm_routes)
snd_soc_dapm_add_routes(&codec->dapm, driver->dapm_routes,
driver->num_dapm_routes);
/* mark codec as probed and add to card codec list */
codec->probed = 1;
list_add(&codec->card_list, &card->codec_dev_list);
list_add(&codec->dapm.list, &card->dapm_list);
return 0;
err_probe:
soc_cleanup_codec_debugfs(codec);
module_put(codec->dev->driver->owner);
return ret;
}
static int soc_probe_platform(struct snd_soc_card *card,
struct snd_soc_platform *platform)
{
int ret = 0;
const struct snd_soc_platform_driver *driver = platform->driver;
platform->card = card;
platform->dapm.card = card;
if (!try_module_get(platform->dev->driver->owner))
return -ENODEV;
if (driver->dapm_widgets)
snd_soc_dapm_new_controls(&platform->dapm,
driver->dapm_widgets, driver->num_dapm_widgets);
if (driver->probe) {
ret = driver->probe(platform);
if (ret < 0) {
dev_err(platform->dev,
"asoc: failed to probe platform %s: %d\n",
platform->name, ret);
goto err_probe;
}
}
if (driver->controls)
snd_soc_add_platform_controls(platform, driver->controls,
driver->num_controls);
if (driver->dapm_routes)
snd_soc_dapm_add_routes(&platform->dapm, driver->dapm_routes,
driver->num_dapm_routes);
/* mark platform as probed and add to card platform list */
platform->probed = 1;
list_add(&platform->card_list, &card->platform_dev_list);
list_add(&platform->dapm.list, &card->dapm_list);
return 0;
err_probe:
module_put(platform->dev->driver->owner);
return ret;
}
static void rtd_release(struct device *dev) {}
static int soc_post_component_init(struct snd_soc_card *card,
struct snd_soc_codec *codec,
int num, int dailess)
{
struct snd_soc_dai_link *dai_link = NULL;
struct snd_soc_aux_dev *aux_dev = NULL;
struct snd_soc_pcm_runtime *rtd;
const char *temp, *name;
int ret = 0;
if (!dailess) {
dai_link = &card->dai_link[num];
rtd = &card->rtd[num];
name = dai_link->name;
} else {
aux_dev = &card->aux_dev[num];
rtd = &card->rtd_aux[num];
name = aux_dev->name;
}
rtd->card = card;
/* machine controls, routes and widgets are not prefixed */
temp = codec->name_prefix;
codec->name_prefix = NULL;
/* do machine specific initialization */
if (!dailess && dai_link->init)
ret = dai_link->init(rtd);
else if (dailess && aux_dev->init)
ret = aux_dev->init(&codec->dapm);
if (ret < 0) {
dev_err(card->dev, "asoc: failed to init %s: %d\n", name, ret);
return ret;
}
codec->name_prefix = temp;
/* Make sure all DAPM widgets are instantiated */
snd_soc_dapm_new_widgets(&codec->dapm);
/* register the rtd device */
rtd->codec = codec;
rtd->dev.parent = card->dev;
rtd->dev.release = rtd_release;
rtd->dev.init_name = name;
mutex_init(&rtd->pcm_mutex);
ret = device_register(&rtd->dev);
if (ret < 0) {
dev_err(card->dev,
"asoc: failed to register runtime device: %d\n", ret);
return ret;
}
rtd->dev_registered = 1;
/* add DAPM sysfs entries for this codec */
ret = snd_soc_dapm_sys_add(&rtd->dev);
if (ret < 0)
dev_err(codec->dev,
"asoc: failed to add codec dapm sysfs entries: %d\n",
ret);
/* add codec sysfs entries */
ret = device_create_file(&rtd->dev, &dev_attr_codec_reg);
if (ret < 0)
dev_err(codec->dev,
"asoc: failed to add codec sysfs files: %d\n", ret);
return 0;
}
static int soc_probe_dai_link(struct snd_soc_card *card, int num, int order)
{
struct snd_soc_dai_link *dai_link = &card->dai_link[num];
struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_platform *platform = rtd->platform;
struct snd_soc_dai *codec_dai = rtd->codec_dai, *cpu_dai = rtd->cpu_dai;
int ret;
dev_dbg(card->dev, "probe %s dai link %d late %d\n",
card->name, num, order);
/* config components */
codec_dai->codec = codec;
cpu_dai->platform = platform;
codec_dai->card = card;
cpu_dai->card = card;
/* set default power off timeout */
rtd->pmdown_time = pmdown_time;
/* probe the cpu_dai */
if (!cpu_dai->probed &&
cpu_dai->driver->probe_order == order) {
if (!try_module_get(cpu_dai->dev->driver->owner))
return -ENODEV;
if (cpu_dai->driver->probe) {
ret = cpu_dai->driver->probe(cpu_dai);
if (ret < 0) {
printk(KERN_ERR "asoc: failed to probe CPU DAI %s\n",
cpu_dai->name);
module_put(cpu_dai->dev->driver->owner);
return ret;
}
}
cpu_dai->probed = 1;
/* mark cpu_dai as probed and add to card dai list */
list_add(&cpu_dai->card_list, &card->dai_dev_list);
}
/* probe the CODEC */
if (!codec->probed &&
codec->driver->probe_order == order) {
ret = soc_probe_codec(card, codec);
if (ret < 0)
return ret;
}
/* probe the platform */
if (!platform->probed &&
platform->driver->probe_order == order) {
ret = soc_probe_platform(card, platform);
if (ret < 0)
return ret;
}
/* probe the CODEC DAI */
if (!codec_dai->probed && codec_dai->driver->probe_order == order) {
if (codec_dai->driver->probe) {
ret = codec_dai->driver->probe(codec_dai);
if (ret < 0) {
printk(KERN_ERR "asoc: failed to probe CODEC DAI %s\n",
codec_dai->name);
return ret;
}
}
/* mark codec_dai as probed and add to card dai list */
codec_dai->probed = 1;
list_add(&codec_dai->card_list, &card->dai_dev_list);
}
/* complete DAI probe during last probe */
if (order != SND_SOC_COMP_ORDER_LAST)
return 0;
ret = soc_post_component_init(card, codec, num, 0);
if (ret)
return ret;
ret = device_create_file(&rtd->dev, &dev_attr_pmdown_time);
if (ret < 0)
printk(KERN_WARNING "asoc: failed to add pmdown_time sysfs\n");
/* create the pcm */
ret = soc_new_pcm(rtd, num);
if (ret < 0) {
printk(KERN_ERR "asoc: can't create pcm %s\n", dai_link->stream_name);
return ret;
}
/* add platform data for AC97 devices */
if (rtd->codec_dai->driver->ac97_control)
snd_ac97_dev_add_pdata(codec->ac97, rtd->cpu_dai->ac97_pdata);
return 0;
}
#ifdef CONFIG_SND_SOC_AC97_BUS
static int soc_register_ac97_dai_link(struct snd_soc_pcm_runtime *rtd)
{
int ret;
/* Only instantiate AC97 if not already done by the adaptor
* for the generic AC97 subsystem.
*/
if (rtd->codec_dai->driver->ac97_control && !rtd->codec->ac97_registered) {
/*
* It is possible that the AC97 device is already registered to
* the device subsystem. This happens when the device is created
* via snd_ac97_mixer(). Currently only SoC codec that does so
* is the generic AC97 glue but others migh emerge.
*
* In those cases we don't try to register the device again.
*/
if (!rtd->codec->ac97_created)
return 0;
ret = soc_ac97_dev_register(rtd->codec);
if (ret < 0) {
printk(KERN_ERR "asoc: AC97 device register failed\n");
return ret;
}
rtd->codec->ac97_registered = 1;
}
return 0;
}
static void soc_unregister_ac97_dai_link(struct snd_soc_codec *codec)
{
if (codec->ac97_registered) {
soc_ac97_dev_unregister(codec);
codec->ac97_registered = 0;
}
}
#endif
static int soc_probe_aux_dev(struct snd_soc_card *card, int num)
{
struct snd_soc_aux_dev *aux_dev = &card->aux_dev[num];
struct snd_soc_codec *codec;
int ret = -ENODEV;
/* find CODEC from registered CODECs*/
list_for_each_entry(codec, &codec_list, list) {
if (!strcmp(codec->name, aux_dev->codec_name)) {
if (codec->probed) {
dev_err(codec->dev,
"asoc: codec already probed");
ret = -EBUSY;
goto out;
}
goto found;
}
}
/* codec not found */
dev_err(card->dev, "asoc: codec %s not found", aux_dev->codec_name);
goto out;
found:
ret = soc_probe_codec(card, codec);
if (ret < 0)
return ret;
ret = soc_post_component_init(card, codec, num, 1);
out:
return ret;
}
static void soc_remove_aux_dev(struct snd_soc_card *card, int num)
{
struct snd_soc_pcm_runtime *rtd = &card->rtd_aux[num];
struct snd_soc_codec *codec = rtd->codec;
/* unregister the rtd device */
if (rtd->dev_registered) {
device_remove_file(&rtd->dev, &dev_attr_codec_reg);
device_unregister(&rtd->dev);
rtd->dev_registered = 0;
}
if (codec && codec->probed)
soc_remove_codec(codec);
}
static int snd_soc_init_codec_cache(struct snd_soc_codec *codec,
enum snd_soc_compress_type compress_type)
{
int ret;
if (codec->cache_init)
return 0;
/* override the compress_type if necessary */
if (compress_type && codec->compress_type != compress_type)
codec->compress_type = compress_type;
ret = snd_soc_cache_init(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to set cache compression type: %d\n",
ret);
return ret;
}
codec->cache_init = 1;
return 0;
}
static void snd_soc_instantiate_card(struct snd_soc_card *card)
{
struct snd_soc_codec *codec;
struct snd_soc_codec_conf *codec_conf;
enum snd_soc_compress_type compress_type;
int ret, i, order;
mutex_lock(&card->mutex);
if (card->instantiated) {
mutex_unlock(&card->mutex);
return;
}
/* bind DAIs */
for (i = 0; i < card->num_links; i++)
soc_bind_dai_link(card, i);
/* bind completed ? */
if (card->num_rtd != card->num_links) {
mutex_unlock(&card->mutex);
return;
}
/* initialize the register cache for each available codec */
list_for_each_entry(codec, &codec_list, list) {
if (codec->cache_init)
continue;
/* by default we don't override the compress_type */
compress_type = 0;
/* check to see if we need to override the compress_type */
for (i = 0; i < card->num_configs; ++i) {
codec_conf = &card->codec_conf[i];
if (!strcmp(codec->name, codec_conf->dev_name)) {
compress_type = codec_conf->compress_type;
if (compress_type && compress_type
!= codec->compress_type)
break;
}
}
ret = snd_soc_init_codec_cache(codec, compress_type);
if (ret < 0) {
mutex_unlock(&card->mutex);
return;
}
}
/* card bind complete so register a sound card */
ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
card->owner, 0, &card->snd_card);
if (ret < 0) {
printk(KERN_ERR "asoc: can't create sound card for card %s\n",
card->name);
mutex_unlock(&card->mutex);
return;
}
card->snd_card->dev = card->dev;
card->dapm.bias_level = SND_SOC_BIAS_OFF;
card->dapm.dev = card->dev;
card->dapm.card = card;
list_add(&card->dapm.list, &card->dapm_list);
#ifdef CONFIG_DEBUG_FS
snd_soc_dapm_debugfs_init(&card->dapm, card->debugfs_card_root);
#endif
#ifdef CONFIG_PM_SLEEP
/* deferred resume work */
INIT_WORK(&card->deferred_resume_work, soc_resume_deferred);
#endif
if (card->dapm_widgets)
snd_soc_dapm_new_controls(&card->dapm, card->dapm_widgets,
card->num_dapm_widgets);
/* initialise the sound card only once */
if (card->probe) {
ret = card->probe(card);
if (ret < 0)
goto card_probe_error;
}
/* early DAI link probe */
for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
order++) {
for (i = 0; i < card->num_links; i++) {
ret = soc_probe_dai_link(card, i, order);
if (ret < 0) {
pr_err("asoc: failed to instantiate card %s: %d\n",
card->name, ret);
goto probe_dai_err;
}
}
}
for (i = 0; i < card->num_aux_devs; i++) {
ret = soc_probe_aux_dev(card, i);
if (ret < 0) {
pr_err("asoc: failed to add auxiliary devices %s: %d\n",
card->name, ret);
goto probe_aux_dev_err;
}
}
/* We should have a non-codec control add function but we don't */
if (card->controls)
snd_soc_add_controls(list_first_entry(&card->codec_dev_list,
struct snd_soc_codec,
card_list),
card->controls,
card->num_controls);
if (card->dapm_routes)
snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes,
card->num_dapm_routes);
snprintf(card->snd_card->shortname, sizeof(card->snd_card->shortname),
"%s", card->name);
snprintf(card->snd_card->longname, sizeof(card->snd_card->longname),
"%s", card->long_name ? card->long_name : card->name);
snprintf(card->snd_card->driver, sizeof(card->snd_card->driver),
"%s", card->driver_name ? card->driver_name : card->name);
for (i = 0; i < ARRAY_SIZE(card->snd_card->driver); i++) {
switch (card->snd_card->driver[i]) {
case '_':
case '-':
case '\0':
break;
default:
if (!isalnum(card->snd_card->driver[i]))
card->snd_card->driver[i] = '_';
break;
}
}
if (card->late_probe) {
ret = card->late_probe(card);
if (ret < 0) {
dev_err(card->dev, "%s late_probe() failed: %d\n",
card->name, ret);
goto probe_aux_dev_err;
}
}
ret = snd_card_register(card->snd_card);
if (ret < 0) {
printk(KERN_ERR "asoc: failed to register soundcard for %s\n", card->name);
goto probe_aux_dev_err;
}
#ifdef CONFIG_SND_SOC_AC97_BUS
/* register any AC97 codecs */
for (i = 0; i < card->num_rtd; i++) {
ret = soc_register_ac97_dai_link(&card->rtd[i]);
if (ret < 0) {
printk(KERN_ERR "asoc: failed to register AC97 %s\n", card->name);
while (--i >= 0)
soc_unregister_ac97_dai_link(card->rtd[i].codec);
goto probe_aux_dev_err;
}
}
#endif
card->instantiated = 1;
mutex_unlock(&card->mutex);
return;
probe_aux_dev_err:
for (i = 0; i < card->num_aux_devs; i++)
soc_remove_aux_dev(card, i);
probe_dai_err:
soc_remove_dai_links(card);
card_probe_error:
if (card->remove)
card->remove(card);
snd_card_free(card->snd_card);
mutex_unlock(&card->mutex);
}
/*
* Attempt to initialise any uninitialised cards. Must be called with
* client_mutex.
*/
static void snd_soc_instantiate_cards(void)
{
struct snd_soc_card *card;
list_for_each_entry(card, &card_list, list)
snd_soc_instantiate_card(card);
}
/* probes a new socdev */
static int soc_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
int ret = 0;
/*
* no card, so machine driver should be registering card
* we should not be here in that case so ret error
*/
if (!card)
return -EINVAL;
/* Bodge while we unpick instantiation */
card->dev = &pdev->dev;
ret = snd_soc_register_card(card);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to register card\n");
return ret;
}
return 0;
}
static int soc_cleanup_card_resources(struct snd_soc_card *card)
{
int i;
/* make sure any delayed work runs */
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
flush_delayed_work_sync(&rtd->delayed_work);
}
/* remove auxiliary devices */
for (i = 0; i < card->num_aux_devs; i++)
soc_remove_aux_dev(card, i);
/* remove and free each DAI */
soc_remove_dai_links(card);
soc_cleanup_card_debugfs(card);
/* remove the card */
if (card->remove)
card->remove(card);
snd_soc_dapm_free(&card->dapm);
kfree(card->rtd);
snd_card_free(card->snd_card);
return 0;
}
/* removes a socdev */
static int soc_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
snd_soc_unregister_card(card);
return 0;
}
int snd_soc_poweroff(struct device *dev)
{
struct snd_soc_card *card = dev_get_drvdata(dev);
int i;
if (!card->instantiated)
return 0;
/* Flush out pmdown_time work - we actually do want to run it
* now, we're shutting down so no imminent restart. */
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
flush_delayed_work_sync(&rtd->delayed_work);
}
snd_soc_dapm_shutdown(card);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_poweroff);
const struct dev_pm_ops snd_soc_pm_ops = {
.suspend = snd_soc_suspend,
.resume = snd_soc_resume,
.poweroff = snd_soc_poweroff,
};
EXPORT_SYMBOL_GPL(snd_soc_pm_ops);
/* ASoC platform driver */
static struct platform_driver soc_driver = {
.driver = {
.name = "soc-audio",
.owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = soc_probe,
.remove = soc_remove,
};
/**
* snd_soc_codec_volatile_register: Report if a register is volatile.
*
* @codec: CODEC to query.
* @reg: Register to query.
*
* Boolean function indiciating if a CODEC register is volatile.
*/
int snd_soc_codec_volatile_register(struct snd_soc_codec *codec,
unsigned int reg)
{
if (codec->volatile_register)
return codec->volatile_register(codec, reg);
else
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_codec_volatile_register);
/**
* snd_soc_codec_readable_register: Report if a register is readable.
*
* @codec: CODEC to query.
* @reg: Register to query.
*
* Boolean function indicating if a CODEC register is readable.
*/
int snd_soc_codec_readable_register(struct snd_soc_codec *codec,
unsigned int reg)
{
if (codec->readable_register)
return codec->readable_register(codec, reg);
else
return 1;
}
EXPORT_SYMBOL_GPL(snd_soc_codec_readable_register);
/**
* snd_soc_codec_writable_register: Report if a register is writable.
*
* @codec: CODEC to query.
* @reg: Register to query.
*
* Boolean function indicating if a CODEC register is writable.
*/
int snd_soc_codec_writable_register(struct snd_soc_codec *codec,
unsigned int reg)
{
if (codec->writable_register)
return codec->writable_register(codec, reg);
else
return 1;
}
EXPORT_SYMBOL_GPL(snd_soc_codec_writable_register);
int snd_soc_platform_read(struct snd_soc_platform *platform,
unsigned int reg)
{
unsigned int ret;
if (!platform->driver->read) {
dev_err(platform->dev, "platform has no read back\n");
return -1;
}
ret = platform->driver->read(platform, reg);
dev_dbg(platform->dev, "read %x => %x\n", reg, ret);
trace_snd_soc_preg_read(platform, reg, ret);
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_platform_read);
int snd_soc_platform_write(struct snd_soc_platform *platform,
unsigned int reg, unsigned int val)
{
if (!platform->driver->write) {
dev_err(platform->dev, "platform has no write back\n");
return -1;
}
dev_dbg(platform->dev, "write %x = %x\n", reg, val);
trace_snd_soc_preg_write(platform, reg, val);
return platform->driver->write(platform, reg, val);
}
EXPORT_SYMBOL_GPL(snd_soc_platform_write);
/**
* snd_soc_new_ac97_codec - initailise AC97 device
* @codec: audio codec
* @ops: AC97 bus operations
* @num: AC97 codec number
*
* Initialises AC97 codec resources for use by ad-hoc devices only.
*/
int snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
struct snd_ac97_bus_ops *ops, int num)
{
mutex_lock(&codec->mutex);
codec->ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL);
if (codec->ac97 == NULL) {
mutex_unlock(&codec->mutex);
return -ENOMEM;
}
codec->ac97->bus = kzalloc(sizeof(struct snd_ac97_bus), GFP_KERNEL);
if (codec->ac97->bus == NULL) {
kfree(codec->ac97);
codec->ac97 = NULL;
mutex_unlock(&codec->mutex);
return -ENOMEM;
}
codec->ac97->bus->ops = ops;
codec->ac97->num = num;
/*
* Mark the AC97 device to be created by us. This way we ensure that the
* device will be registered with the device subsystem later on.
*/
codec->ac97_created = 1;
mutex_unlock(&codec->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec);
/**
* snd_soc_free_ac97_codec - free AC97 codec device
* @codec: audio codec
*
* Frees AC97 codec device resources.
*/
void snd_soc_free_ac97_codec(struct snd_soc_codec *codec)
{
mutex_lock(&codec->mutex);
#ifdef CONFIG_SND_SOC_AC97_BUS
soc_unregister_ac97_dai_link(codec);
#endif
kfree(codec->ac97->bus);
kfree(codec->ac97);
codec->ac97 = NULL;
codec->ac97_created = 0;
mutex_unlock(&codec->mutex);
}
EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec);
unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg)
{
unsigned int ret;
ret = codec->read(codec, reg);
dev_dbg(codec->dev, "read %x => %x\n", reg, ret);
trace_snd_soc_reg_read(codec, reg, ret);
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_read);
unsigned int snd_soc_write(struct snd_soc_codec *codec,
unsigned int reg, unsigned int val)
{
dev_dbg(codec->dev, "write %x = %x\n", reg, val);
trace_snd_soc_reg_write(codec, reg, val);
return codec->write(codec, reg, val);
}
EXPORT_SYMBOL_GPL(snd_soc_write);
unsigned int snd_soc_bulk_write_raw(struct snd_soc_codec *codec,
unsigned int reg, const void *data, size_t len)
{
return codec->bulk_write_raw(codec, reg, data, len);
}
EXPORT_SYMBOL_GPL(snd_soc_bulk_write_raw);
/**
* snd_soc_update_bits - update codec register bits
* @codec: audio codec
* @reg: codec register
* @mask: register mask
* @value: new value
*
* Writes new register value.
*
* Returns 1 for change, 0 for no change, or negative error code.
*/
int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg,
unsigned int mask, unsigned int value)
{
int change;
unsigned int old, new;
int ret;
ret = snd_soc_read(codec, reg);
if (ret < 0)
return ret;
old = ret;
new = (old & ~mask) | (value & mask);
change = old != new;
if (change) {
ret = snd_soc_write(codec, reg, new);
if (ret < 0)
return ret;
}
return change;
}
EXPORT_SYMBOL_GPL(snd_soc_update_bits);
/**
* snd_soc_update_bits_locked - update codec register bits
* @codec: audio codec
* @reg: codec register
* @mask: register mask
* @value: new value
*
* Writes new register value, and takes the codec mutex.
*
* Returns 1 for change else 0.
*/
int snd_soc_update_bits_locked(struct snd_soc_codec *codec,
unsigned short reg, unsigned int mask,
unsigned int value)
{
int change;
mutex_lock(&codec->mutex);
change = snd_soc_update_bits(codec, reg, mask, value);
mutex_unlock(&codec->mutex);
return change;
}
EXPORT_SYMBOL_GPL(snd_soc_update_bits_locked);
/**
* snd_soc_test_bits - test register for change
* @codec: audio codec
* @reg: codec register
* @mask: register mask
* @value: new value
*
* Tests a register with a new value and checks if the new value is
* different from the old value.
*
* Returns 1 for change else 0.
*/
int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned short reg,
unsigned int mask, unsigned int value)
{
int change;
unsigned int old, new;
old = snd_soc_read(codec, reg);
new = (old & ~mask) | value;
change = old != new;
return change;
}
EXPORT_SYMBOL_GPL(snd_soc_test_bits);
/**
* snd_soc_set_runtime_hwparams - set the runtime hardware parameters
* @substream: the pcm substream
* @hw: the hardware parameters
*
* Sets the substream runtime hardware parameters.
*/
int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream,
const struct snd_pcm_hardware *hw)
{
struct snd_pcm_runtime *runtime = substream->runtime;
runtime->hw.info = hw->info;
runtime->hw.formats = hw->formats;
runtime->hw.period_bytes_min = hw->period_bytes_min;
runtime->hw.period_bytes_max = hw->period_bytes_max;
runtime->hw.periods_min = hw->periods_min;
runtime->hw.periods_max = hw->periods_max;
runtime->hw.buffer_bytes_max = hw->buffer_bytes_max;
runtime->hw.fifo_size = hw->fifo_size;
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_set_runtime_hwparams);
/**
* snd_soc_cnew - create new control
* @_template: control template
* @data: control private data
* @long_name: control long name
* @prefix: control name prefix
*
* Create a new mixer control from a template control.
*
* Returns 0 for success, else error.
*/
struct snd_kcontrol *snd_soc_cnew(const struct snd_kcontrol_new *_template,
void *data, char *long_name,
const char *prefix)
{
struct snd_kcontrol_new template;
struct snd_kcontrol *kcontrol;
char *name = NULL;
int name_len;
memcpy(&template, _template, sizeof(template));
template.index = 0;
if (!long_name)
long_name = template.name;
if (prefix) {
name_len = strlen(long_name) + strlen(prefix) + 2;
name = kmalloc(name_len, GFP_KERNEL);
if (!name)
return NULL;
snprintf(name, name_len, "%s %s", prefix, long_name);
template.name = name;
} else {
template.name = long_name;
}
kcontrol = snd_ctl_new1(&template, data);
kfree(name);
return kcontrol;
}
EXPORT_SYMBOL_GPL(snd_soc_cnew);
/**
* snd_soc_add_controls - add an array of controls to a codec.
* Convienience function to add a list of controls. Many codecs were
* duplicating this code.
*
* @codec: codec to add controls to
* @controls: array of controls to add
* @num_controls: number of elements in the array
*
* Return 0 for success, else error.
*/
int snd_soc_add_controls(struct snd_soc_codec *codec,
const struct snd_kcontrol_new *controls, int num_controls)
{
struct snd_card *card = codec->card->snd_card;
int err, i;
for (i = 0; i < num_controls; i++) {
const struct snd_kcontrol_new *control = &controls[i];
err = snd_ctl_add(card, snd_soc_cnew(control, codec,
control->name,
codec->name_prefix));
if (err < 0) {
dev_err(codec->dev, "%s: Failed to add %s: %d\n",
codec->name, control->name, err);
return err;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_add_controls);
/**
* snd_soc_add_platform_controls - add an array of controls to a platform.
* Convienience function to add a list of controls.
*
* @platform: platform to add controls to
* @controls: array of controls to add
* @num_controls: number of elements in the array
*
* Return 0 for success, else error.
*/
int snd_soc_add_platform_controls(struct snd_soc_platform *platform,
const struct snd_kcontrol_new *controls, int num_controls)
{
struct snd_card *card = platform->card->snd_card;
int err, i;
for (i = 0; i < num_controls; i++) {
const struct snd_kcontrol_new *control = &controls[i];
err = snd_ctl_add(card, snd_soc_cnew(control, platform,
control->name, NULL));
if (err < 0) {
dev_err(platform->dev, "Failed to add %s %d\n",control->name, err);
return err;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_add_platform_controls);
/**
* snd_soc_info_enum_double - enumerated double mixer info callback
* @kcontrol: mixer control
* @uinfo: control element information
*
* Callback to provide information about a double enumerated
* mixer control.
*
* Returns 0 for success.
*/
int snd_soc_info_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = e->shift_l == e->shift_r ? 1 : 2;
uinfo->value.enumerated.items = e->max;
if (uinfo->value.enumerated.item > e->max - 1)
uinfo->value.enumerated.item = e->max - 1;
strcpy(uinfo->value.enumerated.name,
e->texts[uinfo->value.enumerated.item]);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_info_enum_double);
/**
* snd_soc_get_enum_double - enumerated double mixer get callback
* @kcontrol: mixer control
* @ucontrol: control element information
*
* Callback to get the value of a double enumerated mixer.
*
* Returns 0 for success.
*/
int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val, bitmask;
for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
;
val = snd_soc_read(codec, e->reg);
ucontrol->value.enumerated.item[0]
= (val >> e->shift_l) & (bitmask - 1);
if (e->shift_l != e->shift_r)
ucontrol->value.enumerated.item[1] =
(val >> e->shift_r) & (bitmask - 1);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_get_enum_double);
/**
* snd_soc_put_enum_double - enumerated double mixer put callback
* @kcontrol: mixer control
* @ucontrol: control element information
*
* Callback to set the value of a double enumerated mixer.
*
* Returns 0 for success.
*/
int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val;
unsigned int mask, bitmask;
for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
;
if (ucontrol->value.enumerated.item[0] > e->max - 1)
return -EINVAL;
val = ucontrol->value.enumerated.item[0] << e->shift_l;
mask = (bitmask - 1) << e->shift_l;
if (e->shift_l != e->shift_r) {
if (ucontrol->value.enumerated.item[1] > e->max - 1)
return -EINVAL;
val |= ucontrol->value.enumerated.item[1] << e->shift_r;
mask |= (bitmask - 1) << e->shift_r;
}
return snd_soc_update_bits_locked(codec, e->reg, mask, val);
}
EXPORT_SYMBOL_GPL(snd_soc_put_enum_double);
/**
* snd_soc_get_value_enum_double - semi enumerated double mixer get callback
* @kcontrol: mixer control
* @ucontrol: control element information
*
* Callback to get the value of a double semi enumerated mixer.
*
* Semi enumerated mixer: the enumerated items are referred as values. Can be
* used for handling bitfield coded enumeration for example.
*
* Returns 0 for success.
*/
int snd_soc_get_value_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int reg_val, val, mux;
reg_val = snd_soc_read(codec, e->reg);
val = (reg_val >> e->shift_l) & e->mask;
for (mux = 0; mux < e->max; mux++) {
if (val == e->values[mux])
break;
}
ucontrol->value.enumerated.item[0] = mux;
if (e->shift_l != e->shift_r) {
val = (reg_val >> e->shift_r) & e->mask;
for (mux = 0; mux < e->max; mux++) {
if (val == e->values[mux])
break;
}
ucontrol->value.enumerated.item[1] = mux;
}
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_get_value_enum_double);
/**
* snd_soc_put_value_enum_double - semi enumerated double mixer put callback
* @kcontrol: mixer control
* @ucontrol: control element information
*
* Callback to set the value of a double semi enumerated mixer.
*
* Semi enumerated mixer: the enumerated items are referred as values. Can be
* used for handling bitfield coded enumeration for example.
*
* Returns 0 for success.
*/
int snd_soc_put_value_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val;
unsigned int mask;
if (ucontrol->value.enumerated.item[0] > e->max - 1)
return -EINVAL;
val = e->values[ucontrol->value.enumerated.item[0]] << e->shift_l;
mask = e->mask << e->shift_l;
if (e->shift_l != e->shift_r) {
if (ucontrol->value.enumerated.item[1] > e->max - 1)
return -EINVAL;
val |= e->values[ucontrol->value.enumerated.item[1]] << e->shift_r;
mask |= e->mask << e->shift_r;
}
return snd_soc_update_bits_locked(codec, e->reg, mask, val);
}
EXPORT_SYMBOL_GPL(snd_soc_put_value_enum_double);
/**
* snd_soc_info_enum_ext - external enumerated single mixer info callback
* @kcontrol: mixer control
* @uinfo: control element information
*
* Callback to provide information about an external enumerated
* single mixer.
*
* Returns 0 for success.
*/
int snd_soc_info_enum_ext(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = e->max;
if (uinfo->value.enumerated.item > e->max - 1)
uinfo->value.enumerated.item = e->max - 1;
strcpy(uinfo->value.enumerated.name,
e->texts[uinfo->value.enumerated.item]);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_info_enum_ext);
/**
* snd_soc_info_volsw_ext - external single mixer info callback
* @kcontrol: mixer control
* @uinfo: control element information
*
* Callback to provide information about a single external mixer control.
*
* Returns 0 for success.
*/
int snd_soc_info_volsw_ext(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
int max = kcontrol->private_value;
if (max == 1 && !strstr(kcontrol->id.name, " Volume"))
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
else
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = max;
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_info_volsw_ext);
/**
* snd_soc_info_volsw - single mixer info callback
* @kcontrol: mixer control
* @uinfo: control element information
*
* Callback to provide information about a single mixer control.
*
* Returns 0 for success.
*/
int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
int platform_max;
unsigned int shift = mc->shift;
unsigned int rshift = mc->rshift;
if (!mc->platform_max)
mc->platform_max = mc->max;
platform_max = mc->platform_max;
if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume"))
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
else
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = shift == rshift ? 1 : 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = platform_max;
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_info_volsw);
/**
* snd_soc_get_volsw - single mixer get callback
* @kcontrol: mixer control
* @ucontrol: control element information
*
* Callback to get the value of a single mixer control.
*
* Returns 0 for success.
*/
int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
unsigned int reg = mc->reg;
unsigned int shift = mc->shift;
unsigned int rshift = mc->rshift;
int max = mc->max;
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
ucontrol->value.integer.value[0] =
(snd_soc_read(codec, reg) >> shift) & mask;
if (shift != rshift)
ucontrol->value.integer.value[1] =
(snd_soc_read(codec, reg) >> rshift) & mask;
if (invert) {
ucontrol->value.integer.value[0] =
max - ucontrol->value.integer.value[0];
if (shift != rshift)
ucontrol->value.integer.value[1] =
max - ucontrol->value.integer.value[1];
}
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_get_volsw);
/**
* snd_soc_put_volsw - single mixer put callback
* @kcontrol: mixer control
* @ucontrol: control element information
*
* Callback to set the value of a single mixer control.
*
* Returns 0 for success.
*/
int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
unsigned int reg = mc->reg;
unsigned int shift = mc->shift;
unsigned int rshift = mc->rshift;
int max = mc->max;
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
unsigned int val, val2, val_mask;
val = (ucontrol->value.integer.value[0] & mask);
if (invert)
val = max - val;
val_mask = mask << shift;
val = val << shift;
if (shift != rshift) {
val2 = (ucontrol->value.integer.value[1] & mask);
if (invert)
val2 = max - val2;
val_mask |= mask << rshift;
val |= val2 << rshift;
}
return snd_soc_update_bits_locked(codec, reg, val_mask, val);
}
EXPORT_SYMBOL_GPL(snd_soc_put_volsw);
/**
* snd_soc_info_volsw_2r - double mixer info callback
* @kcontrol: mixer control
* @uinfo: control element information
*
* Callback to provide information about a double mixer control that
* spans 2 codec registers.
*
* Returns 0 for success.
*/
int snd_soc_info_volsw_2r(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
int platform_max;
if (!mc->platform_max)
mc->platform_max = mc->max;
platform_max = mc->platform_max;
if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume"))
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
else
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = platform_max;
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_info_volsw_2r);
/**
* snd_soc_get_volsw_2r - double mixer get callback
* @kcontrol: mixer control
* @ucontrol: control element information
*
* Callback to get the value of a double mixer control that spans 2 registers.
*
* Returns 0 for success.
*/
int snd_soc_get_volsw_2r(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
unsigned int reg = mc->reg;
unsigned int reg2 = mc->rreg;
unsigned int shift = mc->shift;
int max = mc->max;
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
ucontrol->value.integer.value[0] =
(snd_soc_read(codec, reg) >> shift) & mask;
ucontrol->value.integer.value[1] =
(snd_soc_read(codec, reg2) >> shift) & mask;
if (invert) {
ucontrol->value.integer.value[0] =
max - ucontrol->value.integer.value[0];
ucontrol->value.integer.value[1] =
max - ucontrol->value.integer.value[1];
}
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_get_volsw_2r);
/**
* snd_soc_put_volsw_2r - double mixer set callback
* @kcontrol: mixer control
* @ucontrol: control element information
*
* Callback to set the value of a double mixer control that spans 2 registers.
*
* Returns 0 for success.
*/
int snd_soc_put_volsw_2r(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
unsigned int reg = mc->reg;
unsigned int reg2 = mc->rreg;
unsigned int shift = mc->shift;
int max = mc->max;
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
int err;
unsigned int val, val2, val_mask;
val_mask = mask << shift;
val = (ucontrol->value.integer.value[0] & mask);
val2 = (ucontrol->value.integer.value[1] & mask);
if (invert) {
val = max - val;
val2 = max - val2;
}
val = val << shift;
val2 = val2 << shift;
err = snd_soc_update_bits_locked(codec, reg, val_mask, val);
if (err < 0)
return err;
err = snd_soc_update_bits_locked(codec, reg2, val_mask, val2);
return err;
}
EXPORT_SYMBOL_GPL(snd_soc_put_volsw_2r);
/**
* snd_soc_info_volsw_s8 - signed mixer info callback
* @kcontrol: mixer control
* @uinfo: control element information
*
* Callback to provide information about a signed mixer control.
*
* Returns 0 for success.
*/
int snd_soc_info_volsw_s8(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
int platform_max;
int min = mc->min;
if (!mc->platform_max)
mc->platform_max = mc->max;
platform_max = mc->platform_max;
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = platform_max - min;
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_info_volsw_s8);
/**
* snd_soc_get_volsw_s8 - signed mixer get callback
* @kcontrol: mixer control
* @ucontrol: control element information
*
* Callback to get the value of a signed mixer control.
*
* Returns 0 for success.
*/
int snd_soc_get_volsw_s8(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
unsigned int reg = mc->reg;
int min = mc->min;
int val = snd_soc_read(codec, reg);
ucontrol->value.integer.value[0] =
((signed char)(val & 0xff))-min;
ucontrol->value.integer.value[1] =
((signed char)((val >> 8) & 0xff))-min;
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_get_volsw_s8);
/**
* snd_soc_put_volsw_sgn - signed mixer put callback
* @kcontrol: mixer control
* @ucontrol: control element information
*
* Callback to set the value of a signed mixer control.
*
* Returns 0 for success.
*/
int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
unsigned int reg = mc->reg;
int min = mc->min;
unsigned int val;
val = (ucontrol->value.integer.value[0]+min) & 0xff;
val |= ((ucontrol->value.integer.value[1]+min) & 0xff) << 8;
return snd_soc_update_bits_locked(codec, reg, 0xffff, val);
}
EXPORT_SYMBOL_GPL(snd_soc_put_volsw_s8);
/**
* snd_soc_limit_volume - Set new limit to an existing volume control.
*
* @codec: where to look for the control
* @name: Name of the control
* @max: new maximum limit
*
* Return 0 for success, else error.
*/
int snd_soc_limit_volume(struct snd_soc_codec *codec,
const char *name, int max)
{
struct snd_card *card = codec->card->snd_card;
struct snd_kcontrol *kctl;
struct soc_mixer_control *mc;
int found = 0;
int ret = -EINVAL;
/* Sanity check for name and max */
if (unlikely(!name || max <= 0))
return -EINVAL;
list_for_each_entry(kctl, &card->controls, list) {
if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) {
found = 1;
break;
}
}
if (found) {
mc = (struct soc_mixer_control *)kctl->private_value;
if (max <= mc->max) {
mc->platform_max = max;
ret = 0;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_limit_volume);
/**
* snd_soc_info_volsw_2r_sx - double with tlv and variable data size
* mixer info callback
* @kcontrol: mixer control
* @uinfo: control element information
*
* Returns 0 for success.
*/
int snd_soc_info_volsw_2r_sx(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
int max = mc->max;
int min = mc->min;
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = max-min;
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_info_volsw_2r_sx);
/**
* snd_soc_get_volsw_2r_sx - double with tlv and variable data size
* mixer get callback
* @kcontrol: mixer control
* @uinfo: control element information
*
* Returns 0 for success.
*/
int snd_soc_get_volsw_2r_sx(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
unsigned int mask = (1<<mc->shift)-1;
int min = mc->min;
int val = snd_soc_read(codec, mc->reg) & mask;
int valr = snd_soc_read(codec, mc->rreg) & mask;
ucontrol->value.integer.value[0] = ((val & 0xff)-min) & mask;
ucontrol->value.integer.value[1] = ((valr & 0xff)-min) & mask;
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_get_volsw_2r_sx);
/**
* snd_soc_put_volsw_2r_sx - double with tlv and variable data size
* mixer put callback
* @kcontrol: mixer control
* @uinfo: control element information
*
* Returns 0 for success.
*/
int snd_soc_put_volsw_2r_sx(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
unsigned int mask = (1<<mc->shift)-1;
int min = mc->min;
int ret;
unsigned int val, valr, oval, ovalr;
val = ((ucontrol->value.integer.value[0]+min) & 0xff);
val &= mask;
valr = ((ucontrol->value.integer.value[1]+min) & 0xff);
valr &= mask;
oval = snd_soc_read(codec, mc->reg) & mask;
ovalr = snd_soc_read(codec, mc->rreg) & mask;
ret = 0;
if (oval != val) {
ret = snd_soc_write(codec, mc->reg, val);
if (ret < 0)
return ret;
}
if (ovalr != valr) {
ret = snd_soc_write(codec, mc->rreg, valr);
if (ret < 0)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_put_volsw_2r_sx);
/**
* snd_soc_dai_set_sysclk - configure DAI system or master clock.
* @dai: DAI
* @clk_id: DAI specific clock ID
* @freq: new clock frequency in Hz
* @dir: new clock direction - input/output.
*
* Configures the DAI master (MCLK) or system (SYSCLK) clocking.
*/
int snd_soc_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
if (dai->driver && dai->driver->ops->set_sysclk)
return dai->driver->ops->set_sysclk(dai, clk_id, freq, dir);
else if (dai->codec && dai->codec->driver->set_sysclk)
return dai->codec->driver->set_sysclk(dai->codec, clk_id,
freq, dir);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_sysclk);
/**
* snd_soc_codec_set_sysclk - configure CODEC system or master clock.
* @codec: CODEC
* @clk_id: DAI specific clock ID
* @freq: new clock frequency in Hz
* @dir: new clock direction - input/output.
*
* Configures the CODEC master (MCLK) or system (SYSCLK) clocking.
*/
int snd_soc_codec_set_sysclk(struct snd_soc_codec *codec, int clk_id,
unsigned int freq, int dir)
{
if (codec->driver->set_sysclk)
return codec->driver->set_sysclk(codec, clk_id, freq, dir);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(snd_soc_codec_set_sysclk);
/**
* snd_soc_dai_set_clkdiv - configure DAI clock dividers.
* @dai: DAI
* @div_id: DAI specific clock divider ID
* @div: new clock divisor.
*
* Configures the clock dividers. This is used to derive the best DAI bit and
* frame clocks from the system or master clock. It's best to set the DAI bit
* and frame clocks as low as possible to save system power.
*/
int snd_soc_dai_set_clkdiv(struct snd_soc_dai *dai,
int div_id, int div)
{
if (dai->driver && dai->driver->ops->set_clkdiv)
return dai->driver->ops->set_clkdiv(dai, div_id, div);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_clkdiv);
/**
* snd_soc_dai_set_pll - configure DAI PLL.
* @dai: DAI
* @pll_id: DAI specific PLL ID
* @source: DAI specific source for the PLL
* @freq_in: PLL input clock frequency in Hz
* @freq_out: requested PLL output clock frequency in Hz
*
* Configures and enables PLL to generate output clock based on input clock.
*/
int snd_soc_dai_set_pll(struct snd_soc_dai *dai, int pll_id, int source,
unsigned int freq_in, unsigned int freq_out)
{
if (dai->driver && dai->driver->ops->set_pll)
return dai->driver->ops->set_pll(dai, pll_id, source,
freq_in, freq_out);
else if (dai->codec && dai->codec->driver->set_pll)
return dai->codec->driver->set_pll(dai->codec, pll_id, source,
freq_in, freq_out);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_pll);
/*
* snd_soc_codec_set_pll - configure codec PLL.
* @codec: CODEC
* @pll_id: DAI specific PLL ID
* @source: DAI specific source for the PLL
* @freq_in: PLL input clock frequency in Hz
* @freq_out: requested PLL output clock frequency in Hz
*
* Configures and enables PLL to generate output clock based on input clock.
*/
int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
unsigned int freq_in, unsigned int freq_out)
{
if (codec->driver->set_pll)
return codec->driver->set_pll(codec, pll_id, source,
freq_in, freq_out);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(snd_soc_codec_set_pll);
/**
* snd_soc_dai_set_fmt - configure DAI hardware audio format.
* @dai: DAI
* @fmt: SND_SOC_DAIFMT_ format value.
*
* Configures the DAI hardware format and clocking.
*/
int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
if (dai->driver && dai->driver->ops->set_fmt)
return dai->driver->ops->set_fmt(dai, fmt);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_fmt);
/**
* snd_soc_dai_set_tdm_slot - configure DAI TDM.
* @dai: DAI
* @tx_mask: bitmask representing active TX slots.
* @rx_mask: bitmask representing active RX slots.
* @slots: Number of slots in use.
* @slot_width: Width in bits for each slot.
*
* Configures a DAI for TDM operation. Both mask and slots are codec and DAI
* specific.
*/
int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
{
if (dai->driver && dai->driver->ops->set_tdm_slot)
return dai->driver->ops->set_tdm_slot(dai, tx_mask, rx_mask,
slots, slot_width);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_tdm_slot);
/**
* snd_soc_dai_set_channel_map - configure DAI audio channel map
* @dai: DAI
* @tx_num: how many TX channels
* @tx_slot: pointer to an array which imply the TX slot number channel
* 0~num-1 uses
* @rx_num: how many RX channels
* @rx_slot: pointer to an array which imply the RX slot number channel
* 0~num-1 uses
*
* configure the relationship between channel number and TDM slot number.
*/
int snd_soc_dai_set_channel_map(struct snd_soc_dai *dai,
unsigned int tx_num, unsigned int *tx_slot,
unsigned int rx_num, unsigned int *rx_slot)
{
if (dai->driver && dai->driver->ops->set_channel_map)
return dai->driver->ops->set_channel_map(dai, tx_num, tx_slot,
rx_num, rx_slot);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_channel_map);
/**
* snd_soc_dai_set_tristate - configure DAI system or master clock.
* @dai: DAI
* @tristate: tristate enable
*
* Tristates the DAI so that others can use it.
*/
int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate)
{
if (dai->driver && dai->driver->ops->set_tristate)
return dai->driver->ops->set_tristate(dai, tristate);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_tristate);
/**
* snd_soc_dai_digital_mute - configure DAI system or master clock.
* @dai: DAI
* @mute: mute enable
*
* Mutes the DAI DAC.
*/
int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute)
{
if (dai->driver && dai->driver->ops->digital_mute)
return dai->driver->ops->digital_mute(dai, mute);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(snd_soc_dai_digital_mute);
/**
* snd_soc_register_card - Register a card with the ASoC core
*
* @card: Card to register
*
*/
int snd_soc_register_card(struct snd_soc_card *card)
{
int i;
if (!card->name || !card->dev)
return -EINVAL;
dev_set_drvdata(card->dev, card);
snd_soc_initialize_card_lists(card);
soc_init_card_debugfs(card);
card->rtd = kzalloc(sizeof(struct snd_soc_pcm_runtime) *
(card->num_links + card->num_aux_devs),
GFP_KERNEL);
if (card->rtd == NULL)
return -ENOMEM;
card->rtd_aux = &card->rtd[card->num_links];
for (i = 0; i < card->num_links; i++)
card->rtd[i].dai_link = &card->dai_link[i];
INIT_LIST_HEAD(&card->list);
card->instantiated = 0;
mutex_init(&card->mutex);
mutex_lock(&client_mutex);
list_add(&card->list, &card_list);
snd_soc_instantiate_cards();
mutex_unlock(&client_mutex);
dev_dbg(card->dev, "Registered card '%s'\n", card->name);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_register_card);
/**
* snd_soc_unregister_card - Unregister a card with the ASoC core
*
* @card: Card to unregister
*
*/
int snd_soc_unregister_card(struct snd_soc_card *card)
{
if (card->instantiated)
soc_cleanup_card_resources(card);
mutex_lock(&client_mutex);
list_del(&card->list);
mutex_unlock(&client_mutex);
dev_dbg(card->dev, "Unregistered card '%s'\n", card->name);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_unregister_card);
/*
* Simplify DAI link configuration by removing ".-1" from device names
* and sanitizing names.
*/
static char *fmt_single_name(struct device *dev, int *id)
{
char *found, name[NAME_SIZE];
int id1, id2;
if (dev_name(dev) == NULL)
return NULL;
strlcpy(name, dev_name(dev), NAME_SIZE);
/* are we a "%s.%d" name (platform and SPI components) */
found = strstr(name, dev->driver->name);
if (found) {
/* get ID */
if (sscanf(&found[strlen(dev->driver->name)], ".%d", id) == 1) {
/* discard ID from name if ID == -1 */
if (*id == -1)
found[strlen(dev->driver->name)] = '\0';
}
} else {
/* I2C component devices are named "bus-addr" */
if (sscanf(name, "%x-%x", &id1, &id2) == 2) {
char tmp[NAME_SIZE];
/* create unique ID number from I2C addr and bus */
*id = ((id1 & 0xffff) << 16) + id2;
/* sanitize component name for DAI link creation */
snprintf(tmp, NAME_SIZE, "%s.%s", dev->driver->name, name);
strlcpy(name, tmp, NAME_SIZE);
} else
*id = 0;
}
return kstrdup(name, GFP_KERNEL);
}
/*
* Simplify DAI link naming for single devices with multiple DAIs by removing
* any ".-1" and using the DAI name (instead of device name).
*/
static inline char *fmt_multiple_name(struct device *dev,
struct snd_soc_dai_driver *dai_drv)
{
if (dai_drv->name == NULL) {
printk(KERN_ERR "asoc: error - multiple DAI %s registered with no name\n",
dev_name(dev));
return NULL;
}
return kstrdup(dai_drv->name, GFP_KERNEL);
}
/**
* snd_soc_register_dai - Register a DAI with the ASoC core
*
* @dai: DAI to register
*/
int snd_soc_register_dai(struct device *dev,
struct snd_soc_dai_driver *dai_drv)
{
struct snd_soc_dai *dai;
dev_dbg(dev, "dai register %s\n", dev_name(dev));
dai = kzalloc(sizeof(struct snd_soc_dai), GFP_KERNEL);
if (dai == NULL)
return -ENOMEM;
/* create DAI component name */
dai->name = fmt_single_name(dev, &dai->id);
if (dai->name == NULL) {
kfree(dai);
return -ENOMEM;
}
dai->dev = dev;
dai->driver = dai_drv;
if (!dai->driver->ops)
dai->driver->ops = &null_dai_ops;
mutex_lock(&client_mutex);
list_add(&dai->list, &dai_list);
snd_soc_instantiate_cards();
mutex_unlock(&client_mutex);
pr_debug("Registered DAI '%s'\n", dai->name);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_register_dai);
/**
* snd_soc_unregister_dai - Unregister a DAI from the ASoC core
*
* @dai: DAI to unregister
*/
void snd_soc_unregister_dai(struct device *dev)
{
struct snd_soc_dai *dai;
list_for_each_entry(dai, &dai_list, list) {
if (dev == dai->dev)
goto found;
}
return;
found:
mutex_lock(&client_mutex);
list_del(&dai->list);
mutex_unlock(&client_mutex);
pr_debug("Unregistered DAI '%s'\n", dai->name);
kfree(dai->name);
kfree(dai);
}
EXPORT_SYMBOL_GPL(snd_soc_unregister_dai);
/**
* snd_soc_register_dais - Register multiple DAIs with the ASoC core
*
* @dai: Array of DAIs to register
* @count: Number of DAIs
*/
int snd_soc_register_dais(struct device *dev,
struct snd_soc_dai_driver *dai_drv, size_t count)
{
struct snd_soc_dai *dai;
int i, ret = 0;
dev_dbg(dev, "dai register %s #%Zu\n", dev_name(dev), count);
for (i = 0; i < count; i++) {
dai = kzalloc(sizeof(struct snd_soc_dai), GFP_KERNEL);
if (dai == NULL) {
ret = -ENOMEM;
goto err;
}
/* create DAI component name */
dai->name = fmt_multiple_name(dev, &dai_drv[i]);
if (dai->name == NULL) {
kfree(dai);
ret = -EINVAL;
goto err;
}
dai->dev = dev;
dai->driver = &dai_drv[i];
if (dai->driver->id)
dai->id = dai->driver->id;
else
dai->id = i;
if (!dai->driver->ops)
dai->driver->ops = &null_dai_ops;
mutex_lock(&client_mutex);
list_add(&dai->list, &dai_list);
mutex_unlock(&client_mutex);
pr_debug("Registered DAI '%s'\n", dai->name);
}
mutex_lock(&client_mutex);
snd_soc_instantiate_cards();
mutex_unlock(&client_mutex);
return 0;
err:
for (i--; i >= 0; i--)
snd_soc_unregister_dai(dev);
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_register_dais);
/**
* snd_soc_unregister_dais - Unregister multiple DAIs from the ASoC core
*
* @dai: Array of DAIs to unregister
* @count: Number of DAIs
*/
void snd_soc_unregister_dais(struct device *dev, size_t count)
{
int i;
for (i = 0; i < count; i++)
snd_soc_unregister_dai(dev);
}
EXPORT_SYMBOL_GPL(snd_soc_unregister_dais);
/**
* snd_soc_register_platform - Register a platform with the ASoC core
*
* @platform: platform to register
*/
int snd_soc_register_platform(struct device *dev,
struct snd_soc_platform_driver *platform_drv)
{
struct snd_soc_platform *platform;
dev_dbg(dev, "platform register %s\n", dev_name(dev));
platform = kzalloc(sizeof(struct snd_soc_platform), GFP_KERNEL);
if (platform == NULL)
return -ENOMEM;
/* create platform component name */
platform->name = fmt_single_name(dev, &platform->id);
if (platform->name == NULL) {
kfree(platform);
return -ENOMEM;
}
platform->dev = dev;
platform->driver = platform_drv;
platform->dapm.dev = dev;
platform->dapm.platform = platform;
mutex_lock(&client_mutex);
list_add(&platform->list, &platform_list);
snd_soc_instantiate_cards();
mutex_unlock(&client_mutex);
pr_debug("Registered platform '%s'\n", platform->name);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_register_platform);
/**
* snd_soc_unregister_platform - Unregister a platform from the ASoC core
*
* @platform: platform to unregister
*/
void snd_soc_unregister_platform(struct device *dev)
{
struct snd_soc_platform *platform;
list_for_each_entry(platform, &platform_list, list) {
if (dev == platform->dev)
goto found;
}
return;
found:
mutex_lock(&client_mutex);
list_del(&platform->list);
mutex_unlock(&client_mutex);
pr_debug("Unregistered platform '%s'\n", platform->name);
kfree(platform->name);
kfree(platform);
}
EXPORT_SYMBOL_GPL(snd_soc_unregister_platform);
static u64 codec_format_map[] = {
SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE,
SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE,
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE,
SNDRV_PCM_FMTBIT_U24_LE | SNDRV_PCM_FMTBIT_U24_BE,
SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE,
SNDRV_PCM_FMTBIT_U32_LE | SNDRV_PCM_FMTBIT_U32_BE,
SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_U24_3BE,
SNDRV_PCM_FMTBIT_U24_3LE | SNDRV_PCM_FMTBIT_U24_3BE,
SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE,
SNDRV_PCM_FMTBIT_U20_3LE | SNDRV_PCM_FMTBIT_U20_3BE,
SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S18_3BE,
SNDRV_PCM_FMTBIT_U18_3LE | SNDRV_PCM_FMTBIT_U18_3BE,
SNDRV_PCM_FMTBIT_FLOAT_LE | SNDRV_PCM_FMTBIT_FLOAT_BE,
SNDRV_PCM_FMTBIT_FLOAT64_LE | SNDRV_PCM_FMTBIT_FLOAT64_BE,
SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE
| SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE,
};
/* Fix up the DAI formats for endianness: codecs don't actually see
* the endianness of the data but we're using the CPU format
* definitions which do need to include endianness so we ensure that
* codec DAIs always have both big and little endian variants set.
*/
static void fixup_codec_formats(struct snd_soc_pcm_stream *stream)
{
int i;
for (i = 0; i < ARRAY_SIZE(codec_format_map); i++)
if (stream->formats & codec_format_map[i])
stream->formats |= codec_format_map[i];
}
/**
* snd_soc_register_codec - Register a codec with the ASoC core
*
* @codec: codec to register
*/
int snd_soc_register_codec(struct device *dev,
const struct snd_soc_codec_driver *codec_drv,
struct snd_soc_dai_driver *dai_drv,
int num_dai)
{
size_t reg_size;
struct snd_soc_codec *codec;
int ret, i;
dev_dbg(dev, "codec register %s\n", dev_name(dev));
codec = kzalloc(sizeof(struct snd_soc_codec), GFP_KERNEL);
if (codec == NULL)
return -ENOMEM;
/* create CODEC component name */
codec->name = fmt_single_name(dev, &codec->id);
if (codec->name == NULL) {
kfree(codec);
return -ENOMEM;
}
if (codec_drv->compress_type)
codec->compress_type = codec_drv->compress_type;
else
codec->compress_type = SND_SOC_FLAT_COMPRESSION;
codec->write = codec_drv->write;
codec->read = codec_drv->read;
codec->volatile_register = codec_drv->volatile_register;
codec->readable_register = codec_drv->readable_register;
codec->writable_register = codec_drv->writable_register;
codec->dapm.bias_level = SND_SOC_BIAS_OFF;
codec->dapm.dev = dev;
codec->dapm.codec = codec;
codec->dapm.seq_notifier = codec_drv->seq_notifier;
codec->dev = dev;
codec->driver = codec_drv;
codec->num_dai = num_dai;
mutex_init(&codec->mutex);
/* allocate CODEC register cache */
if (codec_drv->reg_cache_size && codec_drv->reg_word_size) {
reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
codec->reg_size = reg_size;
/* it is necessary to make a copy of the default register cache
* because in the case of using a compression type that requires
* the default register cache to be marked as __devinitconst the
* kernel might have freed the array by the time we initialize
* the cache.
*/
if (codec_drv->reg_cache_default) {
codec->reg_def_copy = kmemdup(codec_drv->reg_cache_default,
reg_size, GFP_KERNEL);
if (!codec->reg_def_copy) {
ret = -ENOMEM;
goto fail;
}
}
}
if (codec_drv->reg_access_size && codec_drv->reg_access_default) {
if (!codec->volatile_register)
codec->volatile_register = snd_soc_default_volatile_register;
if (!codec->readable_register)
codec->readable_register = snd_soc_default_readable_register;
if (!codec->writable_register)
codec->writable_register = snd_soc_default_writable_register;
}
for (i = 0; i < num_dai; i++) {
fixup_codec_formats(&dai_drv[i].playback);
fixup_codec_formats(&dai_drv[i].capture);
}
/* register any DAIs */
if (num_dai) {
ret = snd_soc_register_dais(dev, dai_drv, num_dai);
if (ret < 0)
goto fail;
}
mutex_lock(&client_mutex);
list_add(&codec->list, &codec_list);
snd_soc_instantiate_cards();
mutex_unlock(&client_mutex);
pr_debug("Registered codec '%s'\n", codec->name);
return 0;
fail:
kfree(codec->reg_def_copy);
codec->reg_def_copy = NULL;
kfree(codec->name);
kfree(codec);
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_register_codec);
/**
* snd_soc_unregister_codec - Unregister a codec from the ASoC core
*
* @codec: codec to unregister
*/
void snd_soc_unregister_codec(struct device *dev)
{
struct snd_soc_codec *codec;
int i;
list_for_each_entry(codec, &codec_list, list) {
if (dev == codec->dev)
goto found;
}
return;
found:
if (codec->num_dai)
for (i = 0; i < codec->num_dai; i++)
snd_soc_unregister_dai(dev);
mutex_lock(&client_mutex);
list_del(&codec->list);
mutex_unlock(&client_mutex);
pr_debug("Unregistered codec '%s'\n", codec->name);
snd_soc_cache_exit(codec);
kfree(codec->reg_def_copy);
kfree(codec->name);
kfree(codec);
}
EXPORT_SYMBOL_GPL(snd_soc_unregister_codec);
static int __init snd_soc_init(void)
{
#ifdef CONFIG_DEBUG_FS
snd_soc_debugfs_root = debugfs_create_dir("asoc", NULL);
if (IS_ERR(snd_soc_debugfs_root) || !snd_soc_debugfs_root) {
printk(KERN_WARNING
"ASoC: Failed to create debugfs directory\n");
snd_soc_debugfs_root = NULL;
}
if (!debugfs_create_file("codecs", 0444, snd_soc_debugfs_root, NULL,
&codec_list_fops))
pr_warn("ASoC: Failed to create CODEC list debugfs file\n");
if (!debugfs_create_file("dais", 0444, snd_soc_debugfs_root, NULL,
&dai_list_fops))
pr_warn("ASoC: Failed to create DAI list debugfs file\n");
if (!debugfs_create_file("platforms", 0444, snd_soc_debugfs_root, NULL,
&platform_list_fops))
pr_warn("ASoC: Failed to create platform list debugfs file\n");
#endif
snd_soc_util_init();
return platform_driver_register(&soc_driver);
}
module_init(snd_soc_init);
static void __exit snd_soc_exit(void)
{
snd_soc_util_exit();
#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(snd_soc_debugfs_root);
#endif
platform_driver_unregister(&soc_driver);
}
module_exit(snd_soc_exit);
/* Module information */
MODULE_AUTHOR("Liam Girdwood, lrg@slimlogic.co.uk");
MODULE_DESCRIPTION("ALSA SoC Core");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:soc-audio");
| gpl-2.0 |
gdamjan/systemd | src/core/dbus-path.c | 1 | 5947 | /* SPDX-License-Identifier: LGPL-2.1-or-later */
#include "alloc-util.h"
#include "bus-get-properties.h"
#include "dbus-path.h"
#include "dbus-util.h"
#include "list.h"
#include "path.h"
#include "path-util.h"
#include "string-util.h"
#include "unit.h"
static BUS_DEFINE_PROPERTY_GET_ENUM(property_get_result, path_result, PathResult);
static int property_get_paths(
sd_bus *bus,
const char *path,
const char *interface,
const char *property,
sd_bus_message *reply,
void *userdata,
sd_bus_error *error) {
Path *p = userdata;
PathSpec *k;
int r;
assert(bus);
assert(reply);
assert(p);
r = sd_bus_message_open_container(reply, 'a', "(ss)");
if (r < 0)
return r;
LIST_FOREACH(spec, k, p->specs) {
r = sd_bus_message_append(reply, "(ss)", path_type_to_string(k->type), k->path);
if (r < 0)
return r;
}
return sd_bus_message_close_container(reply);
}
const sd_bus_vtable bus_path_vtable[] = {
SD_BUS_VTABLE_START(0),
SD_BUS_PROPERTY("Unit", "s", bus_property_get_triggered_unit, 0, SD_BUS_VTABLE_PROPERTY_CONST),
SD_BUS_PROPERTY("Paths", "a(ss)", property_get_paths, 0, SD_BUS_VTABLE_PROPERTY_CONST),
SD_BUS_PROPERTY("MakeDirectory", "b", bus_property_get_bool, offsetof(Path, make_directory), SD_BUS_VTABLE_PROPERTY_CONST),
SD_BUS_PROPERTY("DirectoryMode", "u", bus_property_get_mode, offsetof(Path, directory_mode), SD_BUS_VTABLE_PROPERTY_CONST),
SD_BUS_PROPERTY("Result", "s", property_get_result, offsetof(Path, result), SD_BUS_VTABLE_PROPERTY_EMITS_CHANGE),
SD_BUS_PROPERTY("TriggerLimitIntervalUSec", "t", bus_property_get_usec, offsetof(Path, trigger_limit.interval), SD_BUS_VTABLE_PROPERTY_CONST),
SD_BUS_PROPERTY("TriggerLimitBurst", "u", bus_property_get_unsigned, offsetof(Path, trigger_limit.burst), SD_BUS_VTABLE_PROPERTY_CONST),
SD_BUS_VTABLE_END
};
static int bus_path_set_transient_property(
Path *p,
const char *name,
sd_bus_message *message,
UnitWriteFlags flags,
sd_bus_error *error) {
Unit *u = UNIT(p);
int r;
assert(p);
assert(name);
assert(message);
flags |= UNIT_PRIVATE;
if (streq(name, "MakeDirectory"))
return bus_set_transient_bool(u, name, &p->make_directory, message, flags, error);
if (streq(name, "DirectoryMode"))
return bus_set_transient_mode_t(u, name, &p->directory_mode, message, flags, error);
if (streq(name, "Paths")) {
const char *type_name, *path;
bool empty = true;
r = sd_bus_message_enter_container(message, 'a', "(ss)");
if (r < 0)
return r;
while ((r = sd_bus_message_read(message, "(ss)", &type_name, &path)) > 0) {
PathType t;
t = path_type_from_string(type_name);
if (t < 0)
return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Unknown path type: %s", type_name);
if (isempty(path))
return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Path in %s is empty", type_name);
if (!path_is_absolute(path))
return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Path in %s is not absolute: %s", type_name, path);
if (!UNIT_WRITE_FLAGS_NOOP(flags)) {
_cleanup_free_ char *k = NULL;
PathSpec *s;
k = strdup(path);
if (!k)
return -ENOMEM;
path_simplify(k);
s = new0(PathSpec, 1);
if (!s)
return -ENOMEM;
s->unit = u;
s->path = TAKE_PTR(k);
s->type = t;
s->inotify_fd = -1;
LIST_PREPEND(spec, p->specs, s);
unit_write_settingf(u, flags|UNIT_ESCAPE_SPECIFIERS, name, "%s=%s", type_name, path);
}
empty = false;
}
if (r < 0)
return r;
r = sd_bus_message_exit_container(message);
if (r < 0)
return r;
if (!UNIT_WRITE_FLAGS_NOOP(flags) && empty) {
path_free_specs(p);
unit_write_settingf(u, flags, name, "PathExists=");
}
return 1;
}
if (streq(name, "TriggerLimitBurst"))
return bus_set_transient_unsigned(u, name, &p->trigger_limit.burst, message, flags, error);
if (streq(name, "TriggerLimitIntervalUSec"))
return bus_set_transient_usec(u, name, &p->trigger_limit.interval, message, flags, error);
return 0;
}
int bus_path_set_property(
Unit *u,
const char *name,
sd_bus_message *message,
UnitWriteFlags mode,
sd_bus_error *error) {
Path *p = PATH(u);
assert(p);
assert(name);
assert(message);
if (u->transient && u->load_state == UNIT_STUB)
return bus_path_set_transient_property(p, name, message, mode, error);
return 0;
}
| gpl-2.0 |
MSM8939-Samsung/android_kernel_samsung_a7lte | drivers/sensors/taos_tmd3782.c | 1 | 53509 | /*
* Copyright (c) 2010 SAMSUNG
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/gpio.h>
#include <linux/wakelock.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/workqueue.h>
#include <linux/uaccess.h>
#include <linux/of_gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/sensor/sensors_core.h>
#include "taos_tmd3782.h"
/* Note about power vs enable/disable:
* The chip has two functions, proximity and ambient light sensing.
* There is no separate power enablement to the two functions (unlike
* the Capella CM3602/3623).
* This module implements two drivers: /dev/proximity and /dev/light.
* When either driver is enabled (via sysfs attributes), we give power
* to the chip. When both are disabled, we remove power from the chip.
* In suspend, we remove power if light is disabled but not if proximity is
* enabled (proximity is allowed to wakeup from suspend).
*
* There are no ioctls for either driver interfaces. Output is via
* input device framework and control via sysfs attributes.
*/
extern unsigned int system_rev;
/* taos debug */
#define MODULE_NAME_PROX "proximity_sensor"
#define taos_dbgmsg(str, args...) pr_info("%s: " str, __func__, ##args)
#define TAOS_DEBUG
#ifdef TAOS_DEBUG
#define gprintk(fmt, x...) \
printk(KERN_INFO "%s(%d):" fmt, __func__, __LINE__, ## x)
#else
#define gprintk(x...) do { } while (0)
#endif
#define VENDOR_NAME "TAOS"
#define CHIP_NAME "TMD3782"
#define CHIP_ID 0x69
/* sensor type */
#define LIGHT 0
#define PROXIMITY 1
#define ALL 2
enum {
LIGHT_ENABLED = BIT(0),
PROXIMITY_ENABLED = BIT(1),
};
enum {
STATE_CLOSE = 0,
STATE_FAR = 1,
};
enum {
OFF = 0,
ON = 1,
};
#define Atime_ms 500 /*50.0 ms*/
#define DGF 578
#define R_Coef1 (340)
#define G_Coef1 (1000)
#define B_Coef1 (310)
#define IR_R_Coef1 (-1)
#define IR_G_Coef1 (109)
#define IR_B_Coef1 (-29)
#define IR_C_Coef1 (57)
#define IR_Coef1 (38)
#define CT_Coef1 (2855)
#define CT_Offset1 (1973)
#define INTEGRATION_CYCLE 240
#define ADC_BUFFER_NUM 6
#define PROX_AVG_COUNT 40
#define MAX_LUX 150000
#define TAOS_PROX_MAX 1023
#define TAOS_PROX_MIN 0
#define OFFSET_ARRAY_LENGTH 10
#define OFFSET_FILE_PATH "/efs/FactoryApp/prox_cal"
#define CAL_SKIP_ADC 204
#define CAL_FAIL_ADC 480
#ifdef CONFIG_PROX_WINDOW_TYPE
#define WINDOW_TYPE_FILE_PATH "/sys/class/sec/sec_touch_ic/window_type"
#endif
/* driver data */
struct taos_data {
struct i2c_client *i2c_client;
struct taos_platform_data *pdata;
struct input_dev *proximity_input_dev;
struct input_dev *light_input_dev;
struct device *light_dev;
struct device *proximity_dev;
struct work_struct work_light;
struct work_struct work_prox;
struct work_struct work_prox_avg;
struct mutex prox_mutex;
struct mutex power_lock;
struct wake_lock prx_wake_lock;
struct hrtimer timer;
struct hrtimer prox_avg_timer;
struct workqueue_struct *wq;
struct workqueue_struct *wq_avg;
ktime_t light_poll_delay;
ktime_t prox_polling_time;
u8 power_state;
int irq;
bool adc_buf_initialized;
int adc_value_buf[ADC_BUFFER_NUM];
int adc_index_count;
int avg[3];
int prox_avg_enable;
s32 clrdata;
s32 reddata;
s32 grndata;
s32 bludata;
s32 irdata;
int lux;
/* Auto Calibration */
u16 offset_value;
int cal_result;
int threshold_high;
int threshold_low;
int proximity_value;
bool set_manual_thd;
#ifdef CONFIG_PROX_WINDOW_TYPE
char windowtype[2];
#endif
struct regulator *vdd_2p85;
struct regulator *leda_2p8;
struct regulator *lvs1_1p8;
};
static void taos_thresh_set(struct taos_data *taos);
static int proximity_get_adc(struct taos_data *taos);
static int lightsensor_get_adcvalue(struct taos_data *taos);
static int proximity_open_offset(struct taos_data *data);
static int proximity_adc_read(struct taos_data *taos);
#ifdef CONFIG_PROX_WINDOW_TYPE
static int proximity_open_window_type(struct taos_data *data);
#endif
static int tmd3782_setup_leden_gpio(struct taos_data *info)
{
int rc;
struct taos_platform_data *pdata = info->pdata;
if (pdata->enable < 0)
return 0;
else {
rc = gpio_request(pdata->enable, "prox_en");
if (rc < 0) {
pr_err("%s: gpio %d request failed (%d)\n",
__func__, pdata->enable, rc);
}
gpio_direction_output(pdata->enable, 1);
pr_info("%s: gpio %d request success\n",
__func__, pdata->enable);
return rc;
}
}
static int tmd3782_leden_gpio_onoff(struct taos_data *info, bool onoff)
{
struct taos_platform_data *pdata = info->pdata;
if (pdata->enable >= 0) {
gpio_set_value(pdata->enable, onoff);
pr_info("%s onoff:%d\n", __func__, onoff);
if (onoff)
usleep_range(20000, 21000);
}
return 0;
}
/*
static int prox_regulator_onoff(struct device *dev, bool onoff)
{
struct regulator* ldo19;
struct regulator* lvs1;
printk(KERN_ERR "%s %s\n", __func__, (onoff) ? "on" : "off");
ldo19 = devm_regulator_get(dev, "reg_vdd");
if (IS_ERR(ldo19)) {
pr_err("%s: cannot get ldo19\n", __func__);
return -ENOMEM;
}
lvs1 = devm_regulator_get(dev, "reg_vio");
if (IS_ERR(lvs1)) {
pr_err("%s: cannot get lvs1\n", __func__);
return -ENOMEM;
}
if (onoff) {
regulator_enable(ldo19);
msleep(5);
regulator_enable(lvs1);
msleep(5);
} else {
regulator_disable(ldo19);
msleep(5);
regulator_disable(lvs1);
msleep(5);
}
devm_regulator_put(ldo19);
devm_regulator_put(lvs1);
msleep(10);
return 0;
}
*/
static void sensor_power_on_vdd(struct taos_data *info, int onoff)
{
int ret;
if (!info->lvs1_1p8) {
info->lvs1_1p8 =
regulator_get(&info->i2c_client->dev, "reg_vio");
if(IS_ERR(info->lvs1_1p8)){
pr_err("%s: regulator_get for lvs1_1p8 failed\n",
__func__);
}
}
if (!info->vdd_2p85) {
info->vdd_2p85 =
regulator_get(&info->i2c_client->dev, "reg_vdd");
if(IS_ERR(info->vdd_2p85)){
pr_err("%s: regulator_get for vdd_2p85 failed\n",
__func__);
}
}
if (onoff == 1) {
if(!(IS_ERR(info->lvs1_1p8))) {
ret = regulator_enable(info->lvs1_1p8);
if (ret)
pr_err("%s: Failed to enable regulator lvs1_1p8.\n",
__func__);
}
if(!(IS_ERR(info->vdd_2p85))) {
ret = regulator_enable(info->vdd_2p85);
if (ret)
pr_err("%s: Failed to enable regulator vdd_2p85.\n",
__func__);
}
} else if (onoff == 0) {
if(!(IS_ERR(info->lvs1_1p8))) {
if (regulator_is_enabled(info->lvs1_1p8)) {
ret = regulator_disable(info->lvs1_1p8);
if (ret)
pr_err("%s: error lvs1_1p8 disabling regulator\n",
__func__);
}
}
if(!(IS_ERR(info->vdd_2p85))) {
if (regulator_is_enabled(info->vdd_2p85)) {
ret = regulator_disable(info->vdd_2p85);
if (ret)
pr_err("%s: error vdd_2p85 disabling regulator\n",
__func__);
}
}
}
msleep(30);
return;
}
static int opt_i2c_write(struct taos_data *taos, u8 reg, u8 *val)
{
int ret;
ret = i2c_smbus_write_byte_data(taos->i2c_client,
(CMD_REG | reg), *val);
return ret;
}
static int opt_i2c_read(struct taos_data *taos, u8 reg , u8 *val)
{
int ret;
i2c_smbus_write_byte(taos->i2c_client, (CMD_REG | reg));
ret = i2c_smbus_read_byte(taos->i2c_client);
*val = ret;
return ret;
}
static int opt_i2c_write_command(struct taos_data *taos, u8 val)
{
int ret;
ret = i2c_smbus_write_byte(taos->i2c_client, val);
gprintk("[TAOS Command] val=[0x%x] - ret=[0x%x]\n", val, ret);
return ret;
}
static int proximity_get_adc(struct taos_data *taos)
{
int adc = 0;
adc = i2c_smbus_read_word_data(taos->i2c_client,
CMD_REG | PRX_LO);
if (adc < taos->pdata->prox_rawdata_trim)
return TAOS_PROX_MIN;
if (adc > TAOS_PROX_MAX)
adc = TAOS_PROX_MAX;
return adc - taos->pdata->prox_rawdata_trim;
}
static int taos_proximity_get_threshold(struct taos_data *taos, u8 buf)
{
u16 threshold;
threshold = i2c_smbus_read_word_data(taos->i2c_client,
(CMD_REG | buf));
if ((threshold == 0xFFFF) || (threshold == 0))
return (int)threshold;
return (int)threshold - taos->pdata->prox_rawdata_trim;
}
static void taos_thresh_set(struct taos_data *taos)
{
int i = 0;
int ret = 0;
u8 prox_int_thresh[4] = {0,};
u16 trim = (u16)taos->pdata->prox_rawdata_trim;
/* Setting for proximity interrupt */
if (taos->proximity_value == STATE_CLOSE) {
prox_int_thresh[0] = ((u16)taos->threshold_low+trim) & 0xFF;
prox_int_thresh[1] = ((taos->threshold_low+trim) >> 8) & 0xFF;
prox_int_thresh[2] = (0xFFFF) & 0xFF;
prox_int_thresh[3] = (0xFFFF >> 8) & 0xFF;
} else {
prox_int_thresh[0] = (0x0000) & 0xFF;
prox_int_thresh[1] = (0x0000 >> 8) & 0xFF;
prox_int_thresh[2] = ((u16)taos->threshold_high+trim) & 0xff;
prox_int_thresh[3] =
(((u16)taos->threshold_high+trim) >> 8) & 0xff;
}
for (i = 0; i < 4; i++) {
ret = opt_i2c_write(taos,
(CMD_REG|(PRX_MINTHRESHLO + i)),
&prox_int_thresh[i]);
if (ret < 0)
gprintk("opt_i2c_write failed, err = %d\n", ret);
}
}
static int taos_chip_on(struct taos_data *taos, bool prox_en)
{
int ret = 0;
u8 temp_val;
u8 reg_cntrl;
#ifndef CONFIG_SENSORS_TMD3782S_VDD_LEDA
tmd3782_leden_gpio_onoff(taos, 1);
#endif
temp_val = CNTL_PWRON;
ret = opt_i2c_write(taos, (CMD_REG|CNTRL), &temp_val);
if (ret < 0)
gprintk("opt_i2c_write to clr ctrl reg failed\n");
usleep_range(3000, 3100); // A minimum interval of 2.4ms must pass after PON is enabled.
temp_val = taos->pdata->als_time;
ret = opt_i2c_write(taos, (CMD_REG|ALS_TIME), &temp_val);
if (ret < 0)
gprintk("opt_i2c_write to als time reg failed\n");
temp_val = 0xff;
ret = opt_i2c_write(taos, (CMD_REG|WAIT_TIME), &temp_val);
if (ret < 0)
gprintk("opt_i2c_write to wait time reg failed\n");
temp_val = taos->pdata->intr_filter;
ret = opt_i2c_write(taos, (CMD_REG|INTERRUPT), &temp_val);
if (ret < 0)
gprintk("opt_i2c_write to interrupt reg failed\n");
temp_val = 0x0;
ret = opt_i2c_write(taos, (CMD_REG|PRX_CFG), &temp_val);
if (ret < 0)
gprintk("opt_i2c_write to prox cfg reg failed\n");
temp_val = taos->pdata->prox_pulsecnt;
ret = opt_i2c_write(taos, (CMD_REG|PRX_COUNT), &temp_val);
if (ret < 0)
gprintk("opt_i2c_write to prox cnt reg failed\n");
temp_val = taos->pdata->als_gain;
ret = opt_i2c_write(taos, (CMD_REG|GAIN), &temp_val);
if (ret < 0)
gprintk("opt_i2c_write to prox gain reg failed\n");
/* Enable light sensor separately to avoid running proximity sensor at hardware level.
* Enabling proximity sensor separately at hardware level will lead to erroneous readings.
*/
if (prox_en == false)
reg_cntrl = CNTL_ALS_ONLY_ENBL;
else
reg_cntrl = CNTL_INTPROXPON_ENBL;
ret = opt_i2c_write(taos, (CMD_REG|CNTRL), ®_cntrl);
if (ret < 0)
gprintk("opt_i2c_write to ctrl reg failed\n");
// Minimum 58 ms delay after initialization before reading data
usleep_range(60000, 61000);
return ret;
}
static int taos_chip_off(struct taos_data *taos)
{
int ret = 0;
u8 reg_cntrl;
reg_cntrl = CNTL_REG_CLEAR;
ret = opt_i2c_write(taos, (CMD_REG | CNTRL), ®_cntrl);
if (ret < 0) {
gprintk("opt_i2c_write to ctrl reg failed\n");
return ret;
}
#ifndef CONFIG_SENSORS_TMD3782S_VDD_LEDA
tmd3782_leden_gpio_onoff(taos, OFF);
usleep_range(20000, 21000);
#endif
return ret;
}
static int taos_get_cct(struct taos_data *taos)
{
int bp1 = taos->bludata - taos->irdata;
int rp1 = taos->reddata - taos->irdata;
int cct = 0;
if(rp1 != 0)
cct = CT_Coef1 * bp1 / rp1 + CT_Offset1;
return cct;
}
static int taos_get_lux(struct taos_data *taos)
{
s32 rp1, gp1, bp1;
s32 clrdata = 0;
s32 reddata = 0;
s32 grndata = 0;
s32 bludata = 0;
s32 calculated_lux = 0;
u8 reg_gain = 0x0;
u16 temp_gain = 0x0;
int gain = 1;
int ret = 0;
temp_gain = i2c_smbus_read_word_data(taos->i2c_client,
(CMD_REG | GAIN));
reg_gain = temp_gain & 0xff;
clrdata = i2c_smbus_read_word_data(taos->i2c_client,
(CMD_REG | CLR_CHAN0LO));
reddata = i2c_smbus_read_word_data(taos->i2c_client,
(CMD_REG | RED_CHAN1LO));
grndata = i2c_smbus_read_word_data(taos->i2c_client,
(CMD_REG | GRN_CHAN1LO));
bludata = i2c_smbus_read_word_data(taos->i2c_client,
(CMD_REG | BLU_CHAN1LO));
taos->clrdata = clrdata;
taos->reddata = reddata;
taos->grndata = grndata;
taos->bludata = bludata;
switch (reg_gain & 0x03) {
case 0x00:
gain = 1;
break;
case 0x01:
gain = 4;
break;
case 0x02:
gain = 16;
break;
/* case 0x03:
gain = 64;
break;*/
default:
break;
}
if (gain == 1 && clrdata < 25) {
reg_gain = 0x22;
ret = opt_i2c_write(taos, (CMD_REG | GAIN), ®_gain);
if (ret < 0)
gprintk("opt_i2c_write failed, err = %d\n", ret);
return taos->lux;
} else if (gain == 16 && clrdata > 15000) {
reg_gain = 0x20;
ret = opt_i2c_write(taos, (CMD_REG | GAIN), ®_gain);
if (ret < 0)
gprintk("opt_i2c_write failed, err = %d\n", ret);
return taos->lux;
}
if ((clrdata >= 18500) && (gain == 1)) {
calculated_lux = MAX_LUX;
return calculated_lux;
}
/* calculate lux */
taos->irdata = (reddata + grndata + bludata - clrdata) / 2;
/* remove ir from counts*/
rp1 = taos->reddata - taos->irdata;
gp1 = taos->grndata - taos->irdata;
bp1 = taos->bludata - taos->irdata;
calculated_lux = (rp1 * R_Coef1 + gp1 * G_Coef1 + bp1 * B_Coef1) /1000;
if(calculated_lux < 0)
calculated_lux = 0;
else {
/* divide by CPL, CPL = (Atime_ms * ALS_GAIN / DGF);*/
calculated_lux =calculated_lux*DGF;
calculated_lux *= 10;/*Atime_ms*/
calculated_lux /= Atime_ms;
calculated_lux /= gain;
}
taos->lux = (int)calculated_lux;
return taos->lux;
}
static void taos_light_enable(struct taos_data *taos)
{
int cct = 0;
int adc = 0;
taos_dbgmsg("starting poll timer, delay %lldns\n",
ktime_to_ns(taos->light_poll_delay));
taos_get_lux(taos);
msleep(60);/*first lux value need not update to hal*/
adc = taos_get_lux(taos);
cct = taos_get_cct(taos);
input_report_rel(taos->light_input_dev, REL_MISC, adc + 1);
input_report_rel(taos->light_input_dev, REL_WHEEL, cct);
input_sync(taos->light_input_dev);
taos_dbgmsg("light_enable, adc: %d, cct: %d\n",
adc,cct);
hrtimer_start(&taos->timer, taos->light_poll_delay, HRTIMER_MODE_REL);
}
static void taos_light_disable(struct taos_data *taos)
{
taos_dbgmsg("cancelling poll timer\n");
cancel_work_sync(&taos->work_light);
hrtimer_cancel(&taos->timer);
}
static ssize_t poll_delay_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct taos_data *taos = dev_get_drvdata(dev);
return sprintf(buf, "%lld\n", ktime_to_ns(taos->light_poll_delay));
}
static ssize_t poll_delay_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct taos_data *taos = dev_get_drvdata(dev);
int64_t new_delay;
int err;
err = strict_strtoll(buf, 10, &new_delay);
if (err < 0)
return err;
taos_dbgmsg("new delay = %lldns, old delay = %lldns\n",
new_delay, ktime_to_ns(taos->light_poll_delay));
mutex_lock(&taos->power_lock);
if (new_delay != ktime_to_ns(taos->light_poll_delay)) {
taos->light_poll_delay = ns_to_ktime(new_delay);
if (taos->power_state & LIGHT_ENABLED) {
taos_light_disable(taos);
taos_light_enable(taos);
}
}
mutex_unlock(&taos->power_lock);
return size;
}
static ssize_t light_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct taos_data *taos = dev_get_drvdata(dev);
return sprintf(buf, "%d\n",
(taos->power_state & LIGHT_ENABLED) ? 1 : 0);
}
static ssize_t proximity_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct taos_data *taos = dev_get_drvdata(dev);
return sprintf(buf, "%d\n",
(taos->power_state & PROXIMITY_ENABLED) ? 1 : 0);
}
static ssize_t light_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct taos_data *taos = dev_get_drvdata(dev);
bool new_value;
if (sysfs_streq(buf, "1")) {
new_value = true;
} else if (sysfs_streq(buf, "0")) {
new_value = false;
} else {
pr_err("%s: invalid value %d\n", __func__, *buf);
return -EINVAL;
}
mutex_lock(&taos->power_lock);
taos_dbgmsg("new_value = %d, old state = %d\n",
new_value, (taos->power_state & LIGHT_ENABLED) ? 1 : 0);
if (new_value && !(taos->power_state & LIGHT_ENABLED)) {
if (!taos->power_state) {
taos_chip_on(taos, false);
}
taos->power_state |= LIGHT_ENABLED;
taos_light_enable(taos);
} else if (!new_value && (taos->power_state & LIGHT_ENABLED)) {
taos_light_disable(taos);
taos->power_state &= ~LIGHT_ENABLED;
if (!taos->power_state)
taos_chip_off(taos);
}
mutex_unlock(&taos->power_lock);
return size;
}
static ssize_t proximity_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct taos_data *taos = dev_get_drvdata(dev);
bool new_value;
int temp = 0, ret = 0;
u8 reg_cntrl = 0;
if (sysfs_streq(buf, "1")) {
new_value = true;
} else if (sysfs_streq(buf, "0")) {
new_value = false;
} else {
pr_err("%s: invalid value %d\n", __func__, *buf);
return -EINVAL;
}
mutex_lock(&taos->power_lock);
taos_dbgmsg("new_value = %d, old state = %d\n",
new_value, (taos->power_state & PROXIMITY_ENABLED) ? 1 : 0);
if (new_value && !(taos->power_state & PROXIMITY_ENABLED)) {
if(taos->set_manual_thd == false) {
ret = proximity_open_offset(taos);
if (ret < 0 && ret != -ENOENT)
pr_err("%s: proximity_open_offset() failed\n",
__func__);
#ifdef CONFIG_PROX_WINDOW_TYPE
ret = proximity_open_window_type(taos);
#endif
taos->threshold_high =
taos->pdata->prox_thresh_hi
+ taos->offset_value;
taos->threshold_low =
taos->pdata->prox_thresh_low
+ taos->offset_value;
pr_err("%s: th_hi = %d, th_low = %d\n", __func__,
taos->threshold_high, taos->threshold_low);
}
if (!taos->power_state) {
taos_chip_on(taos, true);
} else {
// Proximity registers are already initialized so enable proximity hardware logic only
reg_cntrl = CNTL_INTPROXPON_ENBL;
ret = opt_i2c_write(taos, (CMD_REG|CNTRL), ®_cntrl);
if (ret < 0)
gprintk("opt_i2c_write to ctrl reg failed during prox enable\n");
usleep_range(60000, 61000);
}
taos->power_state |= PROXIMITY_ENABLED;
taos->proximity_value = STATE_FAR;
taos_thresh_set(taos);
usleep_range(10000, 11000);
/* interrupt clearing */
temp = (CMD_REG|CMD_SPL_FN|CMD_PROXALS_INTCLR);
ret = opt_i2c_write_command(taos, temp);
if (ret < 0)
gprintk("opt_i2c_write failed, err = %d\n", ret);
input_report_abs(taos->proximity_input_dev, ABS_DISTANCE, 1);
input_sync(taos->proximity_input_dev);
enable_irq(taos->irq);
enable_irq_wake(taos->irq);
} else if (!new_value && (taos->power_state & PROXIMITY_ENABLED)) {
disable_irq_wake(taos->irq);
disable_irq(taos->irq);
taos->power_state &= ~PROXIMITY_ENABLED;
if (!taos->power_state) {
taos_chip_off(taos);
} else {
// Light sensor is still running so disable proximity hardware logic only.
reg_cntrl = CNTL_ALS_ONLY_ENBL;
ret = opt_i2c_write(taos, (CMD_REG|CNTRL), ®_cntrl);
if (ret < 0)
gprintk("opt_i2c_write to ctrl reg failed during prox disable\n");
usleep_range(60000, 61000);
}
}
mutex_unlock(&taos->power_lock);
return size;
}
static ssize_t proximity_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct taos_data *taos = dev_get_drvdata(dev);
int adc = 0;
adc = proximity_get_adc(taos);
return sprintf(buf, "%d\n", adc);
}
#ifdef CONFIG_PROX_WINDOW_TYPE
static void change_proximity_default_threshold(struct taos_data *data)
{
int trim = data->pdata->prox_rawdata_trim;
switch (data->windowtype[1]) {
case WINTYPE_WHITE:
data->pdata->prox_thresh_hi = WHITEWINDOW_HI_THRESHOLD-trim;
data->pdata->prox_thresh_low = WHITEWINDOW_LOW_THRESHOLD-trim;
break;
case WINTYPE_OTHERS:
data->pdata->prox_thresh_hi = BLACKWINDOW_HI_THRESHOLD-trim;
data->pdata->prox_thresh_low = BLACKWINDOW_LOW_THRESHOLD-trim;
break;
default:
data->pdata->prox_thresh_hi = data->pdata->prox_thresh_hi;
data->pdata->prox_thresh_low = data->pdata->prox_thresh_low;
break;
}
}
static int proximity_open_window_type(struct taos_data *data)
{
struct file *wintype_filp = NULL;
int err = 0;
mm_segment_t old_fs;
old_fs = get_fs();
set_fs(KERNEL_DS);
wintype_filp = filp_open(WINDOW_TYPE_FILE_PATH, O_RDONLY, 0666);
if(IS_ERR(wintype_filp)) {
pr_err("%s: no window_type file\n", __func__);
err = PTR_ERR(wintype_filp);
if(err != -ENOENT)
pr_err("%s: Can't open window_type file\n", __func__);
set_fs(old_fs);
data->windowtype[0] = 0;
data->windowtype[1] = 0;
goto exit;
}
err = wintype_filp->f_op->read(wintype_filp,
(u8 *)&data->windowtype, sizeof(u8) * 2, &wintype_filp->f_pos);
if (err != sizeof(u8) * 2) {
pr_err("%s: Can't read the window_type data from file\n"
, __func__);
err = -EIO;
}
pr_err("%s: %c%c\n",
__func__, data->windowtype[0], data->windowtype[1]);
filp_close(wintype_filp, current->files);
set_fs(old_fs);
exit:
change_proximity_default_threshold(data);
return err;
}
#endif
static int proximity_open_offset(struct taos_data *data)
{
struct file *offset_filp = NULL;
int err = 0;
mm_segment_t old_fs;
old_fs = get_fs();
set_fs(KERNEL_DS);
offset_filp = filp_open(OFFSET_FILE_PATH, O_RDONLY, 0666);
if (IS_ERR(offset_filp)) {
pr_err("%s: no offset file\n", __func__);
err = PTR_ERR(offset_filp);
if (err != -ENOENT)
pr_err("%s: Can't open offset file\n", __func__);
set_fs(old_fs);
return err;
}
err = offset_filp->f_op->read(offset_filp,
(char *)&data->offset_value, sizeof(u16), &offset_filp->f_pos);
if (err != sizeof(u16)) {
pr_err("%s: Can't read the offset data from file\n", __func__);
err = -EIO;
}
pr_err("%s: data->offset_value = %d\n",
__func__, data->offset_value);
filp_close(offset_filp, current->files);
set_fs(old_fs);
return err;
}
static int proximity_adc_read(struct taos_data *taos)
{
int sum[OFFSET_ARRAY_LENGTH];
int i = 0;
int avg = 0;
int min = 0;
int max = 0;
int total = 0;
mutex_lock(&taos->prox_mutex);
for (i = 0; i < OFFSET_ARRAY_LENGTH; i++) {
usleep_range(10000, 11000);
sum[i] = proximity_get_adc(taos);
if (i == 0) {
min = sum[i];
max = sum[i];
} else {
if (sum[i] < min)
min = sum[i];
else if (sum[i] > max)
max = sum[i];
}
total += sum[i];
}
mutex_unlock(&taos->prox_mutex);
total -= (min + max);
avg = (int)(total / (OFFSET_ARRAY_LENGTH - 2));
return avg;
}
static int proximity_store_offset(struct device *dev, bool do_calib)
{
struct taos_data *taos = dev_get_drvdata(dev);
struct file *offset_filp = NULL;
mm_segment_t old_fs;
int err = 0;
u16 abnormal_ct = proximity_adc_read(taos);
u16 offset = 0;
if(do_calib) {
/* tap offset button */
pr_info("%s: calibration start\n", __func__);
if (abnormal_ct < CAL_SKIP_ADC) {
taos->offset_value = 0;
taos->threshold_high = taos->pdata->prox_thresh_hi;
taos->threshold_low = taos->pdata->prox_thresh_low;
taos_thresh_set(taos);
taos->set_manual_thd = false;
taos->cal_result = 2;
pr_info("%s: crosstalk < %d, skip calibration\n",
__func__, CAL_SKIP_ADC);
} else if ((abnormal_ct >= CAL_SKIP_ADC)
&& (abnormal_ct <= CAL_FAIL_ADC)) {
offset = abnormal_ct / 2;
taos->offset_value = offset;
taos->threshold_high = taos->pdata->prox_thresh_hi
+ offset;
taos->threshold_low = taos->pdata->prox_thresh_low
+ offset;
taos_thresh_set(taos);
taos->set_manual_thd = false;
taos->cal_result = 1;
} else {
taos->offset_value = 0;
taos->threshold_high = taos->pdata->prox_thresh_hi;
taos->threshold_low = taos->pdata->prox_thresh_low;
taos_thresh_set(taos);
taos->set_manual_thd = false;
taos->cal_result = 0;
pr_info("%s: crosstalk > %d, calibration failed\n",
__func__, CAL_FAIL_ADC);
}
} else {
/* tap reset button */
pr_info("%s: reset\n", __func__);
taos->threshold_high = taos->pdata->prox_thresh_hi;
taos->threshold_low = taos->pdata->prox_thresh_low;
taos_thresh_set(taos);
taos->offset_value = 0;
taos->cal_result = 2;
taos->set_manual_thd = false;
}
pr_info("%s: abnormal_ct : %d, offset : %d\n", __func__, abnormal_ct,
taos->offset_value);
/* store offset in file */
old_fs = get_fs();
set_fs(KERNEL_DS);
offset_filp = filp_open(OFFSET_FILE_PATH,
O_CREAT | O_TRUNC | O_WRONLY, 0666);
if (IS_ERR(offset_filp)) {
pr_err("%s: Can't open prox_offset file\n", __func__);
set_fs(old_fs);
err = PTR_ERR(offset_filp);
return err;
}
err = offset_filp->f_op->write(offset_filp,
(char *)&taos->offset_value, sizeof(u16), &offset_filp->f_pos);
if (err != sizeof(u16))
pr_err("%s: Can't write the offset data to file\n", __func__);
filp_close(offset_filp, current->files);
set_fs(old_fs);
return 1;
}
static ssize_t proximity_cal_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
bool do_calib;
int err;
if (sysfs_streq(buf, "1")) { /* calibrate cancelation value */
do_calib = true;
} else if (sysfs_streq(buf, "0")) { /* reset cancelation value */
do_calib = false;
} else {
pr_err("%s: invalid value %d\n", __func__, *buf);
return -EINVAL;
}
err = proximity_store_offset(dev, do_calib);
if (err < 0) {
pr_err("%s: proximity_store_offset() failed\n", __func__);
return err;
}
return size;
}
static ssize_t proximity_cal_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct taos_data *taos = dev_get_drvdata(dev);
proximity_open_offset(taos);
return sprintf(buf, "%d,%d,%d\n",
taos->offset_value, taos->threshold_high, taos->threshold_low);
}
static ssize_t prox_offset_pass_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct taos_data *taos = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", taos->cal_result);
}
static ssize_t proximity_avg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct taos_data *taos = dev_get_drvdata(dev);
return sprintf(buf, "%d,%d,%d\n", taos->avg[0], taos->avg[1],
taos->avg[2]);
}
static ssize_t proximity_avg_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct taos_data *taos = dev_get_drvdata(dev);
int new_value = 0;
if (sysfs_streq(buf, "1")) {
new_value = true;
} else if (sysfs_streq(buf, "0")) {
new_value = false;
} else {
pr_err("%s: invalid value %d\n", __func__, *buf);
return -EINVAL;
}
if (taos->prox_avg_enable == new_value)
taos_dbgmsg("%s same status\n", __func__);
else if (new_value == 1) {
taos_dbgmsg("starting poll timer, delay %lldns\n",
ktime_to_ns(taos->prox_polling_time));
hrtimer_start(&taos->prox_avg_timer,
taos->prox_polling_time, HRTIMER_MODE_REL);
taos->prox_avg_enable = 1;
} else {
taos_dbgmsg("cancelling prox avg poll timer\n");
hrtimer_cancel(&taos->prox_avg_timer);
cancel_work_sync(&taos->work_prox_avg);
taos->prox_avg_enable = 0;
}
return 1;
}
static ssize_t proximity_thresh_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct taos_data *taos = dev_get_drvdata(dev);
int thresh_hi = 0;
thresh_hi = taos_proximity_get_threshold(taos, PRX_MAXTHRESHLO);
pr_info("%s: THRESHOLD = %d\n", __func__, thresh_hi);
return sprintf(buf, "prox_threshold = %d\n", thresh_hi);
}
static ssize_t proximity_thresh_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct taos_data *taos = dev_get_drvdata(dev);
int thresh_value = (u8)(taos->pdata->prox_thresh_hi);
int err = 0;
err = kstrtoint(buf, 10, &thresh_value);
pr_err( "%s, value = %d\n",__func__,thresh_value);
if (err < 0)
pr_err("%s, kstrtoint failed.", __func__);
taos->threshold_high = thresh_value;
taos_thresh_set(taos);
usleep_range(20000, 21000);
return size;
}
static ssize_t thresh_high_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct taos_data *taos = dev_get_drvdata(dev);
int thresh_hi = 0,thresh_low = 0;
thresh_low = taos_proximity_get_threshold(taos, PRX_MINTHRESHLO);
thresh_hi = taos_proximity_get_threshold(taos, PRX_MAXTHRESHLO);
pr_info("%s: thresh_hi = %d, thresh_low = %d\n",
__func__, thresh_hi, thresh_low);
return sprintf(buf, "%d,%d\n", thresh_hi,thresh_low);
}
static ssize_t thresh_high_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct taos_data *taos = dev_get_drvdata(dev);
int thresh_value = (u8)(taos->pdata->prox_thresh_hi);
int err = 0;
err = kstrtoint(buf, 10, &thresh_value);
pr_info("%s, thresh_value = %d\n", __func__, thresh_value);
if (err < 0)
pr_err("%s, kstrtoint failed.", __func__);
taos->threshold_high = thresh_value;
taos_thresh_set(taos);
usleep_range(20000, 21000);
taos->set_manual_thd = true;
return size;
}
static ssize_t thresh_low_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct taos_data *taos = dev_get_drvdata(dev);
int thresh_hi = 0,thresh_low = 0;
thresh_hi = taos_proximity_get_threshold(taos, PRX_MAXTHRESHLO);
thresh_low = taos_proximity_get_threshold(taos, PRX_MINTHRESHLO);
pr_info("%s: thresh_hi = %d, thresh_low = %d\n",
__func__, thresh_hi, thresh_low);
return sprintf(buf, "%d,%d\n", thresh_hi,thresh_low);
}
static ssize_t thresh_low_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct taos_data *taos = dev_get_drvdata(dev);
int thresh_value = (u8)(taos->pdata->prox_thresh_low);
int err = 0;
err = kstrtoint(buf, 10, &thresh_value);
pr_info("%s, thresh_value = %d\n", __func__, thresh_value);
if (err < 0)
pr_err("%s, kstrtoint failed.", __func__);
taos->threshold_low = thresh_value;
taos_thresh_set(taos);
usleep_range(20000, 21000);
taos->set_manual_thd = true;
return size;
}
static ssize_t prox_trim_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct taos_data *taos = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", taos->pdata->prox_rawdata_trim);
}
static ssize_t prox_trim_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct taos_data *taos = dev_get_drvdata(dev);
int trim_value = (u8)(taos->pdata->prox_rawdata_trim);
int err = 0;
err = kstrtoint(buf, 10, &trim_value);
pr_info("%s, trim_value = %d\n", __func__, trim_value);
if (err < 0)
pr_err("%s, kstrtoint failed.", __func__);
taos->pdata->prox_rawdata_trim = trim_value;
return size;
}
static ssize_t get_vendor_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", VENDOR_NAME);
}
static ssize_t get_chip_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", CHIP_NAME);
}
static DEVICE_ATTR(vendor, S_IRUGO, get_vendor_name, NULL);
static DEVICE_ATTR(name, S_IRUGO, get_chip_name, NULL);
static ssize_t lightsensor_file_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct taos_data *taos = dev_get_drvdata(dev);
int adc = 0;
adc = lightsensor_get_adcvalue(taos);
return sprintf(buf, "%d\n", adc);
}
static ssize_t lightsensor_raw_data_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct taos_data *taos = dev_get_drvdata(dev);
return sprintf(buf, "%u,%u,%u,%u\n",
taos->reddata, taos->grndata, taos->bludata, taos->clrdata);
}
static struct device_attribute dev_attr_light_raw_data =
__ATTR(raw_data, S_IRUGO, lightsensor_raw_data_show, NULL);
static DEVICE_ATTR(adc, S_IRUGO, lightsensor_file_state_show, NULL);
static DEVICE_ATTR(lux, S_IRUGO, lightsensor_file_state_show, NULL);
static DEVICE_ATTR(poll_delay, S_IRUGO | S_IWUSR | S_IWGRP,
poll_delay_show, poll_delay_store);
static struct device_attribute dev_attr_light_enable =
__ATTR(enable, S_IRUGO | S_IWUSR | S_IWGRP,
light_enable_show, light_enable_store);
static struct attribute *light_sysfs_attrs[] = {
&dev_attr_light_enable.attr,
&dev_attr_poll_delay.attr,
NULL
};
static struct attribute_group light_attribute_group = {
.attrs = light_sysfs_attrs,
};
static struct device_attribute *lightsensor_additional_attributes[] = {
&dev_attr_adc,
&dev_attr_lux,
&dev_attr_vendor,
&dev_attr_name,
&dev_attr_light_raw_data,
NULL
};
static struct device_attribute dev_attr_proximity_enable =
__ATTR(enable, S_IRUGO | S_IWUSR | S_IWGRP,
proximity_enable_show, proximity_enable_store);
static struct attribute *proximity_sysfs_attrs[] = {
&dev_attr_proximity_enable.attr,
NULL
};
static struct attribute_group proximity_attribute_group = {
.attrs = proximity_sysfs_attrs,
};
static struct device_attribute dev_attr_proximity_raw_data =
__ATTR(raw_data, S_IRUGO, proximity_state_show, NULL);
static DEVICE_ATTR(prox_cal, S_IRUGO | S_IWUSR, proximity_cal_show,
proximity_cal_store);
static DEVICE_ATTR(prox_avg, S_IRUGO|S_IWUSR, proximity_avg_show,
proximity_avg_store);
static DEVICE_ATTR(state, S_IRUGO, proximity_state_show, NULL);
static DEVICE_ATTR(prox_offset_pass, S_IRUGO,
prox_offset_pass_show, NULL);
static DEVICE_ATTR(prox_thresh, 0644, proximity_thresh_show,
proximity_thresh_store);
static DEVICE_ATTR(thresh_high, 0644, thresh_high_show,
thresh_high_store);
static DEVICE_ATTR(thresh_low, 0644, thresh_low_show,
thresh_low_store);
static DEVICE_ATTR(prox_trim, S_IRUGO| S_IWUSR | S_IWGRP,
prox_trim_show, prox_trim_store);
static struct device_attribute *prox_sensor_attrs[] = {
&dev_attr_state,
&dev_attr_prox_avg,
&dev_attr_vendor,
&dev_attr_name,
&dev_attr_proximity_raw_data,
&dev_attr_prox_cal,
&dev_attr_prox_offset_pass,
&dev_attr_prox_thresh,
&dev_attr_thresh_high,
&dev_attr_thresh_low,
&dev_attr_prox_trim,
NULL
};
static int lightsensor_get_adcvalue(struct taos_data *taos)
{
int i = 0;
int j = 0;
unsigned int adc_total = 0;
int adc_avr_value;
unsigned int adc_index = 0;
unsigned int adc_max = 0;
unsigned int adc_min = 0;
int value = 0;
/* get ADC */
value = taos_get_lux(taos);
adc_index = (taos->adc_index_count++) % ADC_BUFFER_NUM;
/*ADC buffer initialize (light sensor off ---> light sensor on) */
if (!taos->adc_buf_initialized) {
taos->adc_buf_initialized = true;
for (j = 0; j < ADC_BUFFER_NUM; j++)
taos->adc_value_buf[j] = value;
} else
taos->adc_value_buf[adc_index] = value;
adc_max = taos->adc_value_buf[0];
adc_min = taos->adc_value_buf[0];
for (i = 0; i < ADC_BUFFER_NUM; i++) {
adc_total += taos->adc_value_buf[i];
if (adc_max < taos->adc_value_buf[i])
adc_max = taos->adc_value_buf[i];
if (adc_min > taos->adc_value_buf[i])
adc_min = taos->adc_value_buf[i];
}
adc_avr_value = (adc_total-(adc_max+adc_min))/(ADC_BUFFER_NUM - 2);
if (taos->adc_index_count == ADC_BUFFER_NUM)
taos->adc_index_count = 0;
return adc_avr_value;
}
static void taos_work_func_light(struct work_struct *work)
{
struct taos_data *taos = container_of(work, struct taos_data,
work_light);
int adc = taos_get_lux(taos);
int cct = taos_get_cct(taos);
input_report_rel(taos->light_input_dev, REL_MISC, adc + 1);
input_report_rel(taos->light_input_dev, REL_WHEEL, cct);
input_sync(taos->light_input_dev);
}
static void taos_work_func_prox(struct work_struct *work)
{
struct taos_data *taos =
container_of(work, struct taos_data, work_prox);
int adc_data;
int threshold_high;
int threshold_low;
u8 chipid = 0x69;
int ret =0;
int i =0;
int proximity_value = 0;
/* disable INT */
disable_irq_nosync(taos->irq);
while (chipid != 0x69 && i < 10) {
usleep_range(20000, 21000);
ret = opt_i2c_read(taos, CHIPID, &chipid);
i++;
}
if (ret < 0)
gprintk("opt_i2c_read failed, err = %d\n", ret);
/* change Threshold */
mutex_lock(&taos->prox_mutex);
adc_data = proximity_get_adc(taos);
mutex_unlock(&taos->prox_mutex);
threshold_high = taos_proximity_get_threshold(taos, PRX_MAXTHRESHLO);
threshold_low = taos_proximity_get_threshold(taos, PRX_MINTHRESHLO);
pr_err("%s: hi = %d, low = %d, adc_data = %d\n", __func__,
taos->threshold_high, taos->threshold_low, adc_data);
if ((threshold_high == (taos->threshold_high)) &&
(adc_data >= (taos->threshold_high))) {
proximity_value = STATE_CLOSE;
input_report_abs(taos->proximity_input_dev,
ABS_DISTANCE, proximity_value);
input_sync(taos->proximity_input_dev);
pr_info("[%s] prox value = %d\n", __func__, proximity_value);
} else if ((threshold_high == (0xFFFF)) &&
(adc_data <= (taos->threshold_low))) {
proximity_value = STATE_FAR;
input_report_abs(taos->proximity_input_dev,
ABS_DISTANCE, proximity_value);
input_sync(taos->proximity_input_dev);
pr_info("[%s] prox value = %d\n", __func__, proximity_value);
} else {
pr_err("[%s]Error Case!adc=[%X], th_high=[%d], th_min=[%d]\n",
__func__, adc_data, threshold_high, threshold_low);
goto exit;
}
taos->proximity_value = proximity_value;
taos_thresh_set(taos);
/* reset Interrupt pin */
/* to active Interrupt, TMD2771x Interuupt pin shoud be reset. */
exit:
i2c_smbus_write_byte(taos->i2c_client,
(CMD_REG|CMD_SPL_FN|CMD_PROXALS_INTCLR));
/* enable INT */
enable_irq(taos->irq);
}
static void taos_work_func_prox_avg(struct work_struct *work)
{
struct taos_data *taos = container_of(work, struct taos_data,
work_prox_avg);
int proximity_value = 0;
int min = 0, max = 0, avg = 0;
int i = 0;
for (i = 0; i < PROX_AVG_COUNT; i++) {
mutex_lock(&taos->prox_mutex);
proximity_value = proximity_get_adc(taos);
mutex_unlock(&taos->prox_mutex);
if (proximity_value > TAOS_PROX_MIN) {
avg += proximity_value;
if (!i)
min = proximity_value;
if (proximity_value < min)
min = proximity_value;
if (proximity_value > max)
max = proximity_value;
} else {
proximity_value = TAOS_PROX_MIN;
}
msleep(40);
}
avg /= i;
taos->avg[0] = min;
taos->avg[1] = avg;
taos->avg[2] = max;
}
/* This function is for light sensor. It operates every a few seconds.
* It asks for work to be done on a thread because i2c needs a thread
* context (slow and blocking) and then reschedules the timer to run again.
*/
static enum hrtimer_restart taos_timer_func(struct hrtimer *timer)
{
struct taos_data *taos = container_of(timer, struct taos_data, timer);
queue_work(taos->wq, &taos->work_light);
hrtimer_forward_now(&taos->timer, taos->light_poll_delay);
return HRTIMER_RESTART;
}
static enum hrtimer_restart taos_prox_timer_func(struct hrtimer *timer)
{
struct taos_data *taos = container_of(timer, struct taos_data,
prox_avg_timer);
queue_work(taos->wq_avg, &taos->work_prox_avg);
hrtimer_forward_now(&taos->prox_avg_timer, taos->prox_polling_time);
return HRTIMER_RESTART;
}
/* interrupt happened due to transition/change of near/far proximity state */
irqreturn_t taos_irq_handler(int irq, void *data)
{
struct taos_data *ip = data;
if (ip->irq != -1) {
wake_lock_timeout(&ip->prx_wake_lock, 3*HZ);
queue_work(ip->wq, &ip->work_prox);
}
pr_err("taos interrupt handler is called\n");
return IRQ_HANDLED;
}
static int taos_setup_irq(struct taos_data *taos)
{
int rc = -EIO;
struct taos_platform_data *pdata = taos->pdata;
int irq;
taos_dbgmsg("start\n");
rc = gpio_request(pdata->als_int, "gpio_proximity_out");
if (rc < 0) {
pr_err("%s: gpio %d request failed (%d)\n",
__func__, pdata->als_int, rc);
return rc;
}
rc = gpio_direction_input(pdata->als_int);
if (rc < 0) {
pr_err("%s: failed to set gpio %d as input (%d)\n",
__func__, pdata->als_int, rc);
goto err_gpio_direction_input;
}
irq = gpio_to_irq(pdata->als_int);
rc = request_threaded_irq(irq, NULL,taos_irq_handler,
IRQF_TRIGGER_FALLING|IRQF_ONESHOT,
"proximity_int",taos);
if (rc < 0) {
pr_err("%s: request_irq(%d) failed for gpio %d (%d)\n",
__func__, irq,
pdata->als_int, rc);
goto err_request_irq;
}
/* start with interrupts disabled */
disable_irq(irq);
taos->irq = irq;
taos_dbgmsg("success\n");
goto done;
err_request_irq:
err_gpio_direction_input:
gpio_free(pdata->als_int);
done:
return rc;
}
static int taos_get_initial_offset(struct taos_data *taos)
{
int ret = 0;
u8 p_offset = 0;
ret = proximity_open_offset(taos);
if(ret < 0) {
p_offset = 0;
taos->offset_value = 0;
} else
p_offset = taos->offset_value;
pr_err("%s: initial offset = %d\n", __func__, p_offset);
return p_offset;
}
#ifdef CONFIG_OF
/* device tree parsing function */
static int taos_parse_dt(struct device *dev, struct taos_platform_data *pdata)
{
int ret = 0;
struct device_node *np = dev->of_node;
pdata->als_int = of_get_named_gpio_flags(np, "taos,irq_gpio",
0, &pdata->als_int_flags);
pdata->enable = of_get_named_gpio(np, "taos,en", 0);
if (pdata->enable < 0) {
pr_err("%s : get taos,en(%d) error\n", __func__, pdata->enable);
pdata->enable = -1;
}
ret = of_property_read_u32(np, "taos,prox_rawdata_trim",
&pdata->prox_rawdata_trim);
ret = of_property_read_u32(np, "taos,prox_thresh_hi",
&pdata->prox_thresh_hi);
ret = of_property_read_u32(np, "taos,prox_thresh_low",
&pdata->prox_thresh_low);
ret = of_property_read_u32(np, "taos,als_time",
&pdata->als_time);
ret = of_property_read_u32(np, "taos,intr_filter",
&pdata->intr_filter);
ret = of_property_read_u32(np, "taos,prox_pulsecnt",
&pdata->prox_pulsecnt);
ret = of_property_read_u32(np, "taos,als_gain",
&pdata->als_gain);
ret = of_property_read_u32(np, "taos,coef_atime",
&pdata->coef_atime);
ret = of_property_read_u32(np, "taos,ga",
&pdata->ga);
ret = of_property_read_u32(np, "taos,coef_a",
&pdata->coef_a);
ret = of_property_read_u32(np, "taos,coef_b",
&pdata->coef_b);
ret = of_property_read_u32(np, "taos,coef_c",
&pdata->coef_c);
ret = of_property_read_u32(np, "taos,coef_d",
&pdata->coef_d);
pr_info("%s irq_gpio:%d and enable gpio %d\n", __func__,
pdata->als_int, pdata->enable);
return 0;
}
#else
static int taos_parse_dt(struct device *dev,
struct taos_platform_data)
{
return -ENODEV;
}
#endif
static int taos_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret = -ENODEV,err;
struct input_dev *input_dev;
struct taos_data *taos;
struct taos_platform_data *pdata = NULL;
pr_info("%s: taos_i2c_probe Start\n", __func__);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
pr_err("%s: i2c functionality check failed!\n", __func__);
return ret;
}
taos = kzalloc(sizeof(struct taos_data), GFP_KERNEL);
if (!taos) {
pr_err("%s: failed to alloc memory for module data\n",
__func__);
ret = -ENOMEM;
goto done;
}
if(client->dev.of_node) {
pdata = devm_kzalloc (&client->dev ,
sizeof(struct taos_platform_data ), GFP_KERNEL);
if(!pdata) {
dev_err(&client->dev, "Failed to allocate memory\n");
ret = -ENOMEM;
goto err_taos_data_free;
}
err = taos_parse_dt(&client->dev, pdata);
if(err)
goto err_devicetree;
} else
pdata = client->dev.platform_data;
if (!pdata) {
pr_err("%s: missing pdata!\n", __func__);
goto err_taos_data_free;
}
taos->pdata = pdata;
taos->i2c_client = client;
i2c_set_clientdata(client, taos);
taos->lux = 0;
taos->offset_value = taos_get_initial_offset(taos);
#ifdef CONFIG_PROX_WINDOW_TYPE
proximity_open_window_type(taos);
#endif
taos->set_manual_thd = false;
sensor_power_on_vdd(taos,1);
ret = tmd3782_setup_leden_gpio(taos);
if (ret) {
pr_err("%s: could not setup leden_gpio\n", __func__);
goto err_setup_leden_gpio;
}
tmd3782_leden_gpio_onoff(taos, 1);
/* ID Check */
ret = i2c_smbus_read_byte_data(client, CMD_REG | CHIPID);
if (ret != CHIP_ID) {
pr_err("%s: i2c read error [%X]\n", __func__, ret);
goto err_chip_id_or_i2c_error;
}
taos->threshold_high = taos->pdata->prox_thresh_hi + taos->offset_value;
taos->threshold_low = taos->pdata->prox_thresh_low + taos->offset_value;
mutex_init(&taos->prox_mutex);
/* wake lock init */
wake_lock_init(&taos->prx_wake_lock, WAKE_LOCK_SUSPEND,
"prx_wake_lock");
mutex_init(&taos->power_lock);
/* allocate proximity input_device */
input_dev = input_allocate_device();
if (!input_dev) {
pr_err("%s: could not allocate input device\n", __func__);
goto err_input_allocate_device_proximity;
}
taos->proximity_input_dev = input_dev;
input_set_drvdata(input_dev, taos);
input_dev->name = "proximity_sensor";
input_set_capability(input_dev, EV_ABS, ABS_DISTANCE);
input_set_abs_params(input_dev, ABS_DISTANCE, 0, 1, 0, 0);
taos_dbgmsg("registering proximity input device\n");
ret = input_register_device(input_dev);
if (ret < 0) {
pr_err("%s: could not register input device\n", __func__);
input_free_device(input_dev);
goto err_input_register_device_proximity;
}
ret = sensors_register(taos->proximity_dev, taos,
prox_sensor_attrs, MODULE_NAME_PROX);
/*factory attributs*/
if (ret < 0) {
pr_err("%s: could not registersensors_register\n", __func__);
input_unregister_device(input_dev);
goto err_input_register_device_proximity;
}
ret = sensors_create_symlink(&input_dev->dev.kobj, input_dev->name);
if (ret < 0) {
input_unregister_device(input_dev);
sensors_unregister(taos->proximity_dev, prox_sensor_attrs);
goto err_input_register_device_proximity;
}
ret = sysfs_create_group(&input_dev->dev.kobj,
&proximity_attribute_group);
if (ret < 0) {
pr_err("%s: could not create sysfs group\n", __func__);
input_unregister_device(input_dev);
sensors_unregister(taos->proximity_dev, prox_sensor_attrs);
sensors_remove_symlink(&input_dev->dev.kobj,
taos->proximity_input_dev->name);
goto err_input_register_device_proximity;
}
/* hrtimer settings. we poll for light values using a timer. */
hrtimer_init(&taos->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
taos->light_poll_delay = ns_to_ktime(200 * NSEC_PER_MSEC);
taos->timer.function = taos_timer_func;
/* the timer just fires off a work queue request. we need a thread
to read the i2c (can be slow and blocking). */
taos->wq = create_singlethread_workqueue("taos_wq");
if (!taos->wq) {
ret = -ENOMEM;
pr_err("%s: could not create workqueue\n", __func__);
goto err_create_workqueue;
}
taos->wq_avg = create_singlethread_workqueue("taos_wq_avg");
if (!taos->wq_avg) {
ret = -ENOMEM;
pr_err("%s: could not create workqueue\n", __func__);
goto err_create_avg_workqueue;
}
/* this is the thread function we run on the work queue */
INIT_WORK(&taos->work_light, taos_work_func_light);
INIT_WORK(&taos->work_prox, taos_work_func_prox);
INIT_WORK(&taos->work_prox_avg, taos_work_func_prox_avg);
taos->prox_avg_enable = 0;
hrtimer_init(&taos->prox_avg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
taos->prox_polling_time = ns_to_ktime(2000 * NSEC_PER_MSEC);
taos->prox_avg_timer.function = taos_prox_timer_func;
/* allocate lightsensor-level input_device */
input_dev = input_allocate_device();
if (!input_dev) {
pr_err("%s: could not allocate input device\n", __func__);
ret = -ENOMEM;
goto err_input_allocate_device_light;
}
input_set_drvdata(input_dev, taos);
input_dev->name = "light_sensor";
input_set_capability(input_dev, EV_REL, REL_MISC);
input_set_capability(input_dev, EV_REL, REL_WHEEL);
taos_dbgmsg("registering lightsensor-level input device\n");
ret = input_register_device(input_dev);
if (ret < 0) {
pr_err("%s: could not register input device\n", __func__);
input_free_device(input_dev);
goto err_input_register_device_light;
}
ret = sensors_register(taos->light_dev, taos,
lightsensor_additional_attributes, "light_sensor");
if (ret < 0) {
pr_err("%s: cound not register light sensor device(%d).\n",
__func__, ret);
input_unregister_device(input_dev);
goto err_input_register_device_light;
}
ret = sensors_create_symlink(&input_dev->dev.kobj, input_dev->name);
if (ret < 0) {
input_unregister_device(input_dev);
sensors_unregister(taos->light_dev,
lightsensor_additional_attributes);
goto out_sensor_register_failed1;
}
ret = sysfs_create_group(&input_dev->dev.kobj, &light_attribute_group);
if (ret < 0) {
pr_err("%s: could not create sysfs group\n", __func__);
input_unregister_device(input_dev);
sensors_unregister(taos->light_dev,
lightsensor_additional_attributes);
sensors_remove_symlink(&taos->light_input_dev->dev.kobj,
taos->proximity_input_dev->name);
goto out_sensor_register_failed1;
}
taos->light_input_dev = input_dev;
ret = taos_setup_irq(taos);
if (ret < 0) {
pr_err("%s: could not setup irq\n", __func__);
goto err_setup_irq;
}
#ifndef CONFIG_SENSORS_TMD3782S_VDD_LEDA
tmd3782_leden_gpio_onoff(taos, OFF);
usleep_range(20000, 21000);
#endif
goto done;
/* error, unwind it all */
err_devicetree:
pr_info("%s: error in device tree\n", __func__);
out_sensor_register_failed1:
sensors_unregister(taos->light_dev, lightsensor_additional_attributes);
err_setup_irq:
err_input_register_device_light:
err_input_allocate_device_light:
destroy_workqueue(taos->wq_avg);
err_create_avg_workqueue:
destroy_workqueue(taos->wq);
err_create_workqueue:
sysfs_remove_group(&taos->proximity_input_dev->dev.kobj,
&proximity_attribute_group);
err_input_register_device_proximity:
err_input_allocate_device_proximity:
free_irq(taos->irq, 0);
gpio_free(taos->pdata->als_int);
mutex_destroy(&taos->power_lock);
wake_lock_destroy(&taos->prx_wake_lock);
err_chip_id_or_i2c_error:
err_setup_leden_gpio:
if (taos->pdata->enable >= 0)
gpio_free(taos->pdata->enable);
err_taos_data_free:
kfree(taos);
done:
return ret;
}
static int taos_suspend(struct device *dev)
{
/* We disable power only if proximity is disabled. If proximity
is enabled, we leave power on because proximity is allowed
to wake up device. We remove power without changing
taos->power_state because we use that state in resume.
*/
struct i2c_client *client = to_i2c_client(dev);
struct taos_data *taos = i2c_get_clientdata(client);
if (taos->power_state & LIGHT_ENABLED)
taos_light_disable(taos);
if (taos->power_state == LIGHT_ENABLED)
taos_chip_off(taos);
return 0;
}
static int taos_resume(struct device *dev)
{
/* Turn power back on if we were before suspend. */
struct i2c_client *client = to_i2c_client(dev);
struct taos_data *taos = i2c_get_clientdata(client);
if (taos->power_state == LIGHT_ENABLED)
taos_chip_on(taos, false);
if (taos->power_state & LIGHT_ENABLED)
taos_light_enable(taos);
return 0;
}
static int taos_i2c_remove(struct i2c_client *client)
{
struct taos_data *taos = i2c_get_clientdata(client);
sensors_unregister(taos->proximity_dev, prox_sensor_attrs);
sensors_remove_symlink(&taos->proximity_input_dev->dev.kobj,
taos->proximity_input_dev->name);
sysfs_remove_group(&taos->proximity_input_dev->dev.kobj,
&proximity_attribute_group);
input_unregister_device(taos->light_input_dev);
sensors_unregister(taos->light_dev, lightsensor_additional_attributes);
sensors_remove_symlink(&taos->light_input_dev->dev.kobj,
taos->proximity_input_dev->name);
sysfs_remove_group(&taos->light_input_dev->dev.kobj,
&light_attribute_group);
input_unregister_device(taos->proximity_input_dev);
free_irq(taos->irq, NULL);
gpio_free(taos->pdata->als_int);
if (taos->power_state) {
taos->power_state = 0;
if (taos->power_state & LIGHT_ENABLED)
taos_light_disable(taos);
taos->pdata->power(false);
sensor_power_on_vdd(taos,0);
regulator_put(taos->vdd_2p85);
regulator_put(taos->lvs1_1p8);
tmd3782_leden_gpio_onoff(taos, 0);
if (taos->pdata->enable >= 0)
gpio_free(taos->pdata->enable);
}
destroy_workqueue(taos->wq);
destroy_workqueue(taos->wq_avg);
mutex_destroy(&taos->power_lock);
wake_lock_destroy(&taos->prx_wake_lock);
kfree(taos);
return 0;
}
static const struct i2c_device_id taos_device_id[] = {
{"taos", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, taos_device_id);
static const struct dev_pm_ops taos_pm_ops = {
.suspend = taos_suspend,
.resume = taos_resume
};
#ifdef CONFIG_OF
static struct of_device_id tm3782_match_table[] = {
{ .compatible = "taos,tmd3782",},
{},
};
#else
#define tm2672_match_table NULL
#endif
static struct i2c_driver taos_i2c_driver = {
.driver = {
.name = "taos",
.owner = THIS_MODULE,
.pm = &taos_pm_ops,
.of_match_table = tm3782_match_table,
},
.probe = taos_i2c_probe,
.remove = taos_i2c_remove,
.id_table = taos_device_id,
};
static int __init taos_init(void)
{
return i2c_add_driver(&taos_i2c_driver);
}
static void __exit taos_exit(void)
{
i2c_del_driver(&taos_i2c_driver);
}
module_init(taos_init);
module_exit(taos_exit);
MODULE_AUTHOR("SAMSUNG");
MODULE_DESCRIPTION("Optical Sensor driver for taos");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ezterry/kernel-biff-testing | drivers/input/evdev.c | 1 | 21368 | /*
* Event char devices, giving access to raw input device events.
*
* Copyright (c) 1999-2002 Vojtech Pavlik
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#define EVDEV_MINOR_BASE 64
#define EVDEV_MINORS 32
#define EVDEV_MIN_BUFFER_SIZE 64U
#define EVDEV_BUF_PACKETS 8
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/major.h>
#include <linux/device.h>
#include <linux/wakelock.h>
#include "input-compat.h"
struct evdev {
int open;
int minor;
struct input_handle handle;
wait_queue_head_t wait;
struct evdev_client *grab;
struct list_head client_list;
spinlock_t client_lock; /* protects client_list */
struct mutex mutex;
struct device dev;
bool exist;
};
struct evdev_client {
int head;
int tail;
spinlock_t buffer_lock; /* protects access to buffer, head and tail */
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
struct wake_lock wake_lock;
char name[28];
int bufsize;
struct input_event buffer[];
};
static struct evdev *evdev_table[EVDEV_MINORS];
static DEFINE_MUTEX(evdev_table_mutex);
static void evdev_pass_event(struct evdev_client *client,
struct input_event *event)
{
/*
* Interrupts are disabled, just acquire the lock.
* Make sure we don't leave with the client buffer
* "empty" by having client->head == client->tail.
*/
spin_lock(&client->buffer_lock);
do {
client->buffer[client->head++] = *event;
client->head &= client->bufsize - 1;
} while (client->head == client->tail);
spin_unlock(&client->buffer_lock);
if (event->type == EV_SYN)
kill_fasync(&client->fasync, SIGIO, POLL_IN);
}
/*
* Pass incoming event to all connected clients.
*/
static void evdev_event(struct input_handle *handle,
unsigned int type, unsigned int code, int value)
{
struct evdev *evdev = handle->private;
struct evdev_client *client;
struct input_event event;
struct timespec ts;
ktime_get_ts(&ts);
event.time.tv_sec = ts.tv_sec;
event.time.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
event.type = type;
event.code = code;
event.value = value;
rcu_read_lock();
client = rcu_dereference(evdev->grab);
if (client)
evdev_pass_event(client, &event);
else
list_for_each_entry_rcu(client, &evdev->client_list, node)
evdev_pass_event(client, &event);
rcu_read_unlock();
wake_up_interruptible(&evdev->wait);
}
static int evdev_fasync(int fd, struct file *file, int on)
{
struct evdev_client *client = file->private_data;
return fasync_helper(fd, file, on, &client->fasync);
}
static int evdev_flush(struct file *file, fl_owner_t id)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
int retval;
retval = mutex_lock_interruptible(&evdev->mutex);
if (retval)
return retval;
if (!evdev->exist)
retval = -ENODEV;
else
retval = input_flush_device(&evdev->handle, file);
mutex_unlock(&evdev->mutex);
return retval;
}
static void evdev_free(struct device *dev)
{
struct evdev *evdev = container_of(dev, struct evdev, dev);
input_put_device(evdev->handle.dev);
kfree(evdev);
}
/*
* Grabs an event device (along with underlying input device).
* This function is called with evdev->mutex taken.
*/
static int evdev_grab(struct evdev *evdev, struct evdev_client *client)
{
int error;
if (evdev->grab)
return -EBUSY;
error = input_grab_device(&evdev->handle);
if (error)
return error;
rcu_assign_pointer(evdev->grab, client);
synchronize_rcu();
return 0;
}
static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client)
{
if (evdev->grab != client)
return -EINVAL;
rcu_assign_pointer(evdev->grab, NULL);
synchronize_rcu();
input_release_device(&evdev->handle);
return 0;
}
static void evdev_attach_client(struct evdev *evdev,
struct evdev_client *client)
{
spin_lock(&evdev->client_lock);
list_add_tail_rcu(&client->node, &evdev->client_list);
spin_unlock(&evdev->client_lock);
synchronize_rcu();
}
static void evdev_detach_client(struct evdev *evdev,
struct evdev_client *client)
{
spin_lock(&evdev->client_lock);
list_del_rcu(&client->node);
spin_unlock(&evdev->client_lock);
synchronize_rcu();
}
static int evdev_open_device(struct evdev *evdev)
{
int retval;
retval = mutex_lock_interruptible(&evdev->mutex);
if (retval)
return retval;
if (!evdev->exist)
retval = -ENODEV;
else if (!evdev->open++) {
retval = input_open_device(&evdev->handle);
if (retval)
evdev->open--;
}
mutex_unlock(&evdev->mutex);
return retval;
}
static void evdev_close_device(struct evdev *evdev)
{
mutex_lock(&evdev->mutex);
if (evdev->exist && !--evdev->open)
input_close_device(&evdev->handle);
mutex_unlock(&evdev->mutex);
}
/*
* Wake up users waiting for IO so they can disconnect from
* dead device.
*/
static void evdev_hangup(struct evdev *evdev)
{
struct evdev_client *client;
spin_lock(&evdev->client_lock);
list_for_each_entry(client, &evdev->client_list, node)
kill_fasync(&client->fasync, SIGIO, POLL_HUP);
spin_unlock(&evdev->client_lock);
wake_up_interruptible(&evdev->wait);
}
static int evdev_release(struct inode *inode, struct file *file)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
mutex_lock(&evdev->mutex);
if (evdev->grab == client)
evdev_ungrab(evdev, client);
mutex_unlock(&evdev->mutex);
evdev_detach_client(evdev, client);
wake_lock_destroy(&client->wake_lock);
kfree(client);
evdev_close_device(evdev);
put_device(&evdev->dev);
return 0;
}
static unsigned int evdev_compute_buffer_size(struct input_dev *dev)
{
unsigned int n_events =
max(dev->hint_events_per_packet * EVDEV_BUF_PACKETS,
EVDEV_MIN_BUFFER_SIZE);
return roundup_pow_of_two(n_events);
}
static int evdev_open(struct inode *inode, struct file *file)
{
struct evdev *evdev;
struct evdev_client *client;
int i = iminor(inode) - EVDEV_MINOR_BASE;
unsigned int bufsize;
int error;
if (i >= EVDEV_MINORS)
return -ENODEV;
error = mutex_lock_interruptible(&evdev_table_mutex);
if (error)
return error;
evdev = evdev_table[i];
if (evdev)
get_device(&evdev->dev);
mutex_unlock(&evdev_table_mutex);
if (!evdev)
return -ENODEV;
bufsize = evdev_compute_buffer_size(evdev->handle.dev);
client = kzalloc(sizeof(struct evdev_client) +
bufsize * sizeof(struct input_event),
GFP_KERNEL);
if (!client) {
error = -ENOMEM;
goto err_put_evdev;
}
client->bufsize = bufsize;
spin_lock_init(&client->buffer_lock);
snprintf(client->name, sizeof(client->name), "%s-%d",
dev_name(&evdev->dev), task_tgid_vnr(current));
wake_lock_init(&client->wake_lock, WAKE_LOCK_SUSPEND, client->name);
client->evdev = evdev;
evdev_attach_client(evdev, client);
error = evdev_open_device(evdev);
if (error)
goto err_free_client;
file->private_data = client;
nonseekable_open(inode, file);
return 0;
err_free_client:
evdev_detach_client(evdev, client);
kfree(client);
err_put_evdev:
put_device(&evdev->dev);
return error;
}
static ssize_t evdev_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
struct input_event event;
int retval;
retval = mutex_lock_interruptible(&evdev->mutex);
if (retval)
return retval;
if (!evdev->exist) {
retval = -ENODEV;
goto out;
}
while (retval < count) {
if (input_event_from_user(buffer + retval, &event)) {
retval = -EFAULT;
goto out;
}
input_inject_event(&evdev->handle,
event.type, event.code, event.value);
retval += input_event_size();
}
out:
mutex_unlock(&evdev->mutex);
return retval;
}
static int evdev_fetch_next_event(struct evdev_client *client,
struct input_event *event)
{
int have_event;
spin_lock_irq(&client->buffer_lock);
have_event = client->head != client->tail;
if (have_event) {
*event = client->buffer[client->tail++];
client->tail &= client->bufsize - 1;
}
spin_unlock_irq(&client->buffer_lock);
return have_event;
}
static ssize_t evdev_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
struct input_event event;
int retval;
if (count < input_event_size())
return -EINVAL;
if (client->head == client->tail && evdev->exist &&
(file->f_flags & O_NONBLOCK))
return -EAGAIN;
retval = wait_event_interruptible(evdev->wait,
client->head != client->tail || !evdev->exist);
if (retval)
return retval;
if (!evdev->exist)
return -ENODEV;
while (retval + input_event_size() <= count &&
evdev_fetch_next_event(client, &event)) {
if (input_event_to_user(buffer + retval, &event))
return -EFAULT;
retval += input_event_size();
}
return retval;
}
/* No kernel lock - fine */
static unsigned int evdev_poll(struct file *file, poll_table *wait)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
unsigned int mask;
poll_wait(file, &evdev->wait, wait);
mask = evdev->exist ? POLLOUT | POLLWRNORM : POLLHUP | POLLERR;
if (client->head != client->tail)
mask |= POLLIN | POLLRDNORM;
return mask;
}
#ifdef CONFIG_COMPAT
#define BITS_PER_LONG_COMPAT (sizeof(compat_long_t) * 8)
#define BITS_TO_LONGS_COMPAT(x) ((((x) - 1) / BITS_PER_LONG_COMPAT) + 1)
#ifdef __BIG_ENDIAN
static int bits_to_user(unsigned long *bits, unsigned int maxbit,
unsigned int maxlen, void __user *p, int compat)
{
int len, i;
if (compat) {
len = BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t);
if (len > maxlen)
len = maxlen;
for (i = 0; i < len / sizeof(compat_long_t); i++)
if (copy_to_user((compat_long_t __user *) p + i,
(compat_long_t *) bits +
i + 1 - ((i % 2) << 1),
sizeof(compat_long_t)))
return -EFAULT;
} else {
len = BITS_TO_LONGS(maxbit) * sizeof(long);
if (len > maxlen)
len = maxlen;
if (copy_to_user(p, bits, len))
return -EFAULT;
}
return len;
}
#else
static int bits_to_user(unsigned long *bits, unsigned int maxbit,
unsigned int maxlen, void __user *p, int compat)
{
int len = compat ?
BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t) :
BITS_TO_LONGS(maxbit) * sizeof(long);
if (len > maxlen)
len = maxlen;
return copy_to_user(p, bits, len) ? -EFAULT : len;
}
#endif /* __BIG_ENDIAN */
#else
static int bits_to_user(unsigned long *bits, unsigned int maxbit,
unsigned int maxlen, void __user *p, int compat)
{
int len = BITS_TO_LONGS(maxbit) * sizeof(long);
if (len > maxlen)
len = maxlen;
return copy_to_user(p, bits, len) ? -EFAULT : len;
}
#endif /* CONFIG_COMPAT */
static int str_to_user(const char *str, unsigned int maxlen, void __user *p)
{
int len;
if (!str)
return -ENOENT;
len = strlen(str) + 1;
if (len > maxlen)
len = maxlen;
return copy_to_user(p, str, len) ? -EFAULT : len;
}
#define OLD_KEY_MAX 0x1ff
static int handle_eviocgbit(struct input_dev *dev,
unsigned int type, unsigned int size,
void __user *p, int compat_mode)
{
static unsigned long keymax_warn_time;
unsigned long *bits;
int len;
switch (type) {
case 0: bits = dev->evbit; len = EV_MAX; break;
case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
case EV_REL: bits = dev->relbit; len = REL_MAX; break;
case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
case EV_SW: bits = dev->swbit; len = SW_MAX; break;
default: return -EINVAL;
}
/*
* Work around bugs in userspace programs that like to do
* EVIOCGBIT(EV_KEY, KEY_MAX) and not realize that 'len'
* should be in bytes, not in bits.
*/
if (type == EV_KEY && size == OLD_KEY_MAX) {
len = OLD_KEY_MAX;
if (printk_timed_ratelimit(&keymax_warn_time, 10 * 1000))
printk(KERN_WARNING
"evdev.c(EVIOCGBIT): Suspicious buffer size %u, "
"limiting output to %zu bytes. See "
"http://userweb.kernel.org/~dtor/eviocgbit-bug.html\n",
OLD_KEY_MAX,
BITS_TO_LONGS(OLD_KEY_MAX) * sizeof(long));
}
return bits_to_user(bits, len, size, p, compat_mode);
}
#undef OLD_KEY_MAX
static long evdev_do_ioctl(struct file *file, unsigned int cmd,
void __user *p, int compat_mode)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
struct input_dev *dev = evdev->handle.dev;
struct input_absinfo abs;
struct ff_effect effect;
int __user *ip = (int __user *)p;
unsigned int i, t, u, v;
unsigned int size;
int error;
/* First we check for fixed-length commands */
switch (cmd) {
case EVIOCGVERSION:
return put_user(EV_VERSION, ip);
case EVIOCGID:
if (copy_to_user(p, &dev->id, sizeof(struct input_id)))
return -EFAULT;
return 0;
case EVIOCGREP:
if (!test_bit(EV_REP, dev->evbit))
return -ENOSYS;
if (put_user(dev->rep[REP_DELAY], ip))
return -EFAULT;
if (put_user(dev->rep[REP_PERIOD], ip + 1))
return -EFAULT;
return 0;
case EVIOCSREP:
if (!test_bit(EV_REP, dev->evbit))
return -ENOSYS;
if (get_user(u, ip))
return -EFAULT;
if (get_user(v, ip + 1))
return -EFAULT;
input_inject_event(&evdev->handle, EV_REP, REP_DELAY, u);
input_inject_event(&evdev->handle, EV_REP, REP_PERIOD, v);
return 0;
case EVIOCGKEYCODE:
if (get_user(t, ip))
return -EFAULT;
error = input_get_keycode(dev, t, &v);
if (error)
return error;
if (put_user(v, ip + 1))
return -EFAULT;
return 0;
case EVIOCSKEYCODE:
if (get_user(t, ip) || get_user(v, ip + 1))
return -EFAULT;
return input_set_keycode(dev, t, v);
case EVIOCRMFF:
return input_ff_erase(dev, (int)(unsigned long) p, file);
case EVIOCGEFFECTS:
i = test_bit(EV_FF, dev->evbit) ?
dev->ff->max_effects : 0;
if (put_user(i, ip))
return -EFAULT;
return 0;
case EVIOCGRAB:
if (p)
return evdev_grab(evdev, client);
else
return evdev_ungrab(evdev, client);
}
size = _IOC_SIZE(cmd);
/* Now check variable-length commands */
#define EVIOC_MASK_SIZE(nr) ((nr) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
switch (EVIOC_MASK_SIZE(cmd)) {
case EVIOCGKEY(0):
return bits_to_user(dev->key, KEY_MAX, size, p, compat_mode);
case EVIOCGLED(0):
return bits_to_user(dev->led, LED_MAX, size, p, compat_mode);
case EVIOCGSND(0):
return bits_to_user(dev->snd, SND_MAX, size, p, compat_mode);
case EVIOCGSW(0):
return bits_to_user(dev->sw, SW_MAX, size, p, compat_mode);
case EVIOCGNAME(0):
return str_to_user(dev->name, size, p);
case EVIOCGPHYS(0):
return str_to_user(dev->phys, size, p);
case EVIOCGUNIQ(0):
return str_to_user(dev->uniq, size, p);
case EVIOC_MASK_SIZE(EVIOCSFF):
if (input_ff_effect_from_user(p, size, &effect))
return -EFAULT;
error = input_ff_upload(dev, &effect, file);
if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
return -EFAULT;
return error;
}
/* Multi-number variable-length handlers */
if (_IOC_TYPE(cmd) != 'E')
return -EINVAL;
if (_IOC_DIR(cmd) == _IOC_READ) {
if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0)))
return handle_eviocgbit(dev,
_IOC_NR(cmd) & EV_MAX, size,
p, compat_mode);
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
if (!dev->absinfo)
return -EINVAL;
t = _IOC_NR(cmd) & ABS_MAX;
abs = dev->absinfo[t];
if (copy_to_user(p, &abs, min_t(size_t,
size, sizeof(struct input_absinfo))))
return -EFAULT;
return 0;
}
}
if (_IOC_DIR(cmd) == _IOC_WRITE) {
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
if (!dev->absinfo)
return -EINVAL;
t = _IOC_NR(cmd) & ABS_MAX;
if (copy_from_user(&abs, p, min_t(size_t,
size, sizeof(struct input_absinfo))))
return -EFAULT;
if (size < sizeof(struct input_absinfo))
abs.resolution = 0;
/* We can't change number of reserved MT slots */
if (t == ABS_MT_SLOT)
return -EINVAL;
/*
* Take event lock to ensure that we are not
* changing device parameters in the middle
* of event.
*/
spin_lock_irq(&dev->event_lock);
dev->absinfo[t] = abs;
spin_unlock_irq(&dev->event_lock);
return 0;
}
}
return -EINVAL;
}
static long evdev_ioctl_handler(struct file *file, unsigned int cmd,
void __user *p, int compat_mode)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
int retval;
retval = mutex_lock_interruptible(&evdev->mutex);
if (retval)
return retval;
if (!evdev->exist) {
retval = -ENODEV;
goto out;
}
retval = evdev_do_ioctl(file, cmd, p, compat_mode);
out:
mutex_unlock(&evdev->mutex);
return retval;
}
static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return evdev_ioctl_handler(file, cmd, (void __user *)arg, 0);
}
#ifdef CONFIG_COMPAT
static long evdev_ioctl_compat(struct file *file,
unsigned int cmd, unsigned long arg)
{
return evdev_ioctl_handler(file, cmd, compat_ptr(arg), 1);
}
#endif
static const struct file_operations evdev_fops = {
.owner = THIS_MODULE,
.read = evdev_read,
.write = evdev_write,
.poll = evdev_poll,
.open = evdev_open,
.release = evdev_release,
.unlocked_ioctl = evdev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = evdev_ioctl_compat,
#endif
.fasync = evdev_fasync,
.flush = evdev_flush
};
static int evdev_install_chrdev(struct evdev *evdev)
{
/*
* No need to do any locking here as calls to connect and
* disconnect are serialized by the input core
*/
evdev_table[evdev->minor] = evdev;
return 0;
}
static void evdev_remove_chrdev(struct evdev *evdev)
{
/*
* Lock evdev table to prevent race with evdev_open()
*/
mutex_lock(&evdev_table_mutex);
evdev_table[evdev->minor] = NULL;
mutex_unlock(&evdev_table_mutex);
}
/*
* Mark device non-existent. This disables writes, ioctls and
* prevents new users from opening the device. Already posted
* blocking reads will stay, however new ones will fail.
*/
static void evdev_mark_dead(struct evdev *evdev)
{
mutex_lock(&evdev->mutex);
evdev->exist = false;
mutex_unlock(&evdev->mutex);
}
static void evdev_cleanup(struct evdev *evdev)
{
struct input_handle *handle = &evdev->handle;
evdev_mark_dead(evdev);
evdev_hangup(evdev);
evdev_remove_chrdev(evdev);
/* evdev is marked dead so no one else accesses evdev->open */
if (evdev->open) {
input_flush_device(handle, NULL);
input_close_device(handle);
}
}
/*
* Create new evdev device. Note that input core serializes calls
* to connect and disconnect so we don't need to lock evdev_table here.
*/
static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
const struct input_device_id *id)
{
struct evdev *evdev;
int minor;
int error;
for (minor = 0; minor < EVDEV_MINORS; minor++)
if (!evdev_table[minor])
break;
if (minor == EVDEV_MINORS) {
printk(KERN_ERR "evdev: no more free evdev devices\n");
return -ENFILE;
}
evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL);
if (!evdev)
return -ENOMEM;
INIT_LIST_HEAD(&evdev->client_list);
spin_lock_init(&evdev->client_lock);
mutex_init(&evdev->mutex);
init_waitqueue_head(&evdev->wait);
dev_set_name(&evdev->dev, "event%d", minor);
evdev->exist = true;
evdev->minor = minor;
evdev->handle.dev = input_get_device(dev);
evdev->handle.name = dev_name(&evdev->dev);
evdev->handle.handler = handler;
evdev->handle.private = evdev;
evdev->dev.devt = MKDEV(INPUT_MAJOR, EVDEV_MINOR_BASE + minor);
evdev->dev.class = &input_class;
evdev->dev.parent = &dev->dev;
evdev->dev.release = evdev_free;
device_initialize(&evdev->dev);
error = input_register_handle(&evdev->handle);
if (error)
goto err_free_evdev;
error = evdev_install_chrdev(evdev);
if (error)
goto err_unregister_handle;
error = device_add(&evdev->dev);
if (error)
goto err_cleanup_evdev;
return 0;
err_cleanup_evdev:
evdev_cleanup(evdev);
err_unregister_handle:
input_unregister_handle(&evdev->handle);
err_free_evdev:
put_device(&evdev->dev);
return error;
}
static void evdev_disconnect(struct input_handle *handle)
{
struct evdev *evdev = handle->private;
device_del(&evdev->dev);
evdev_cleanup(evdev);
input_unregister_handle(handle);
put_device(&evdev->dev);
}
static const struct input_device_id evdev_ids[] = {
{ .driver_info = 1 }, /* Matches all devices */
{ }, /* Terminating zero entry */
};
MODULE_DEVICE_TABLE(input, evdev_ids);
static struct input_handler evdev_handler = {
.event = evdev_event,
.connect = evdev_connect,
.disconnect = evdev_disconnect,
.fops = &evdev_fops,
.minor = EVDEV_MINOR_BASE,
.name = "evdev",
.id_table = evdev_ids,
};
static int __init evdev_init(void)
{
return input_register_handler(&evdev_handler);
}
static void __exit evdev_exit(void)
{
input_unregister_handler(&evdev_handler);
}
module_init(evdev_init);
module_exit(evdev_exit);
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Input driver event char devices");
MODULE_LICENSE("GPL");
| gpl-2.0 |
XCopter-HSU/XCopter | Sprint_2/USB_Controller_2/FT313_release_driver/FT313H_hcd_demo/ft313-q.c | 1 | 54087 | /*
* FT313 HCD qTD, qHead, iTD, siTD management.
*
* Copyright (C) 2011 Chang Yang <chang.yang@ftdichip.com>
*
* This code is *strongly* based on EHCI-HCD code by David Brownell since
* the chip is a quasi-EHCI compatible.
*
* Licensed under GPL version 2 only.
*/
/* this file is part of ft313-hcd.c */
/* fill a qtd, returning how much of the buffer we were able to queue up */
static int
qtd_fill(struct ft313_hcd *ft313, struct ehci_qtd *qtd, void* buf,
size_t len, int token, int maxpacket)
{
int i, count;
u32 addr;
size_t actual_len;
struct ft313_mem_blk *mem_blk_ptr;
FUN_ENTRY();
DEBUG_MSG("qTD buffer ptr is at 0x%X with token as 0x%08X\n", buf, token);
if (len != 0) {
DEBUG_MSG("Try to allocate %d bytes buffer.\n", len);
mem_blk_ptr = allocate_mem_blk(ft313, BUFFER, len); // Allocate comm buffer
if (mem_blk_ptr == NULL) {
printk("No more memory block available \n");
return -1;
}
DEBUG_MSG("Got a buffer with size %d at 0x%X.\n", mem_blk_ptr->size, mem_blk_ptr->offset);
actual_len = min(len, mem_blk_ptr->size);
qtd->buffer_ft313 = mem_blk_ptr->offset;
addr = qtd->buffer_ft313;
qtd->hw_buf[0] = addr;
if (mem_blk_ptr->size < len) { // Get a smaller buffer than needed
// Check whether buffer got is too small or not
if (mem_blk_ptr->size < maxpacket) {
free_mem_blk(ft313, mem_blk_ptr->offset);
ERROR_MSG("Cannot allocate minimal comm buffer for this urb, only %d bytes buffer found", mem_blk_ptr->size);
return -1;
}
if (0 != (actual_len % maxpacket)) {
// Adjust to be multiply of max packet size
actual_len = (actual_len / maxpacket) * maxpacket;
}
}
} else { // When no memory allocation is needed, set all as zero
actual_len = 0;
addr = 0;
qtd->hw_buf[0] = 0;
}
if ((0 != buf) && // 0 means qtd for status phase of control tranfer
(0 != actual_len) &&
(QTD_PID(token) != 1)) { // Not IN transfer
if ((unsigned int)buf <= 0x10000) {
ERROR_MSG("buf 0x%X pointer is not valid \n", buf);
return -1;
}
ft313_mem_write(ft313, buf, actual_len, mem_blk_ptr->offset); // Write payload
}
// qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
count = 0x1000 - (addr & 0x0fff); /* rest of that page */
if (likely (actual_len < count)) /* ... iff needed */
count = actual_len;
else {
addr += 0x1000;
addr &= ~0x0fff;
/* per-qtd limit: from 16K to 20K (best alignment) */
for (i = 1; count < actual_len && i < 5; i++) {
// addr = buf;
qtd->hw_buf[i] = cpu_to_hc32(ft313, (u32)addr);
// qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
// (u32)(addr >> 32));
addr += 0x1000;
if ((count + 0x1000) < actual_len)
count += 0x1000;
else
count = actual_len;
}
/* short packets may only terminate transfers */
if (count != actual_len)
count -= (count % maxpacket);
}
qtd->hw_token = cpu_to_hc32(ft313, (count << 16) | token);
qtd->length = count;
DEBUG_MSG("qTD length is %d\n", count);
FUN_EXIT();
return count;
}
/*-------------------------------------------------------------------------*/
static inline void
qh_update (struct ft313_hcd *ft313, struct ehci_qh *qh, struct ehci_qtd *qtd)
{
struct ehci_qh_hw *hw = qh->hw;
DEBUG_MSG("Enter++\n");
/* writes to an active overlay are unsafe */
BUG_ON(qh->qh_state != QH_STATE_IDLE);
// Do not update from memory as content just updated by caller
// and caller is solo!
hw->hw_qtd_next = QTD_NEXT(ft313, qtd->qtd_ft313);
ft313_mem_write(ft313,
&(hw->hw_qtd_next),
sizeof(hw->hw_qtd_next),
qh->qh_ft313 + offsetof(struct ehci_qh_hw, hw_qtd_next));
DEBUG_MSG("Updated Next qTD Pointer for qH 0x%X\n", qh->qh_ft313);
hw->hw_alt_next = EHCI_LIST_END(ft313);
ft313_mem_write(ft313,
&(hw->hw_alt_next),
sizeof(hw->hw_alt_next),
qh->qh_ft313 + offsetof(struct ehci_qh_hw, hw_alt_next));
DEBUG_MSG("Updated Alternate Next qTD Pointer for qH 0x%X\n", qh->qh_ft313);
/* Except for control endpoints, we make hardware maintain data
* toggle (like OHCI) ... here (re)initialize the toggle in the QH,
* and set the pseudo-toggle in udev. Only usb_clear_halt() will
* ever clear it.
*/
if (!(hw->hw_info1 & cpu_to_hc32(ft313, 1 << 14))) {
unsigned is_out, epnum;
is_out = qh->is_out;
epnum = (hc32_to_cpup(ft313, &hw->hw_info1) >> 8) & 0x0f;
if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
hw->hw_token &= ~cpu_to_hc32(ft313, QTD_TOGGLE);
DEBUG_MSG("Clear DT toggle bit for qH 0x%X\n", qh->ft313);
ft313_mem_write(ft313,
&(hw->hw_token),
sizeof(hw->hw_token),
qh->qh_ft313 + offsetof(struct ehci_qh_hw, hw_token));
DEBUG_MSG("Updated Token DWord for qH 0x%X\n", qh->qh_ft313);
usb_settoggle (qh->dev, epnum, is_out, 1);
}
}
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb ();
hw->hw_token &= cpu_to_hc32(ft313, QTD_TOGGLE | QTD_STS_PING);
ft313_mem_write(ft313,
&(hw->hw_token),
sizeof(hw->hw_token),
qh->qh_ft313 + offsetof(struct ehci_qh_hw, hw_token));
DEBUG_MSG("Updated Token DWord for qH 0x%X\n", qh->qh_ft313);
DEBUG_MSG("Exit--\n");
}
/* if it weren't for a common silicon quirk (writing the dummy into the qh
* overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
* recovery (including urb dequeue) would need software changes to a QH...
*/
static void
qh_refresh (struct ft313_hcd *ft313, struct ehci_qh *qh)
{
FUN_ENTRY();
struct ehci_qtd *qtd;
if (list_empty (&qh->qtd_list)) {
qtd = qh->dummy;
DEBUG_MSG("qH 0x%X has an empty qTD list\n", qh->qh_ft313);
}
else {
qtd = list_entry (qh->qtd_list.next,
struct ehci_qtd, qtd_list);
/* first qtd may already be partially processed */
ft313_mem_read(ft313, qtd, sizeof(struct ehci_qtd_hw), qtd->qtd_ft313);
ft313_mem_read(ft313, qh->hw, sizeof(struct ehci_qh_hw), qh->qh_ft313);
if (cpu_to_hc32(ft313, qtd->qtd_ft313) == qh->hw->hw_current) {
DEBUG_MSG("qTD 0x%X is the same as qH 0x%X's current qTD Pointer\n",
qtd->qtd_ft313, qh->qh_ft313);
DEBUG_MSG("No qh_update\n");
qtd = NULL;
}
}
if (qtd)
qh_update (ft313, qh, qtd);
FUN_EXIT();
}
static void ft313_clear_tt_buffer_complete(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct ft313_hcd *ft313 = hcd_to_ft313(hcd);
struct ehci_qh *qh = ep->hcpriv;
unsigned long flags;
spin_lock_irqsave(&ft313->lock, flags);
qh->clearing_tt = 0;
if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
&& HC_IS_RUNNING(hcd->state))
qh_link_async(ft313, qh);
spin_unlock_irqrestore(&ft313->lock, flags);
}
static void ft313_clear_tt_buffer(struct ft313_hcd *ft313, struct ehci_qh *qh,
struct urb *urb, u32 token)
{
FUN_ENTRY();
/* If an async split transaction gets an error or is unlinked,
* the TT buffer may be left in an indeterminate state. We
* have to clear the TT buffer.
*
* Note: this routine is never called for Isochronous transfers.
*/
if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
#ifdef DEBUG
struct usb_device *tt = urb->dev->tt->hub;
dev_dbg(&tt->dev,
"clear tt buffer port %d, a%d ep%d t%08x\n",
urb->dev->ttport, urb->dev->devnum,
usb_pipeendpoint(urb->pipe), token);
#endif /* DEBUG */
if (!ehci_is_TDI(ft313)
|| urb->dev->tt->hub !=
ft313_to_hcd(ft313)->self.root_hub) {
if (usb_hub_clear_tt_buffer(urb) == 0)
qh->clearing_tt = 1;
} else {
/* REVISIT ARC-derived cores don't clear the root
* hub TT buffer in this way...
*/
}
}
FUN_EXIT();
}
static int qtd_copy_status (
struct ft313_hcd *ft313,
struct urb *urb,
size_t length,
u32 token,
u32 qtd_buffer_offset
)
{
int status = -EINPROGRESS;
u32 actual_rx_data_length = 0;
void *buf = NULL;
DEBUG_MSG("Enter++ with token as 0x%X\n", token);
/* count IN/OUT bytes, not SETUP (even short packets) */
if (likely (QTD_PID (token) != 2)) {
// for IN tranfer, need to copy data here!
if (QTD_PID(token) == 1) {
actual_rx_data_length = length - QTD_LENGTH(token);
if (0 != actual_rx_data_length) {// 0 means qTD for status phase for ctrl without data phase
DEBUG_MSG("Receive %d bytes for urb 0x%X\n", actual_rx_data_length, urb);
if (urb->transfer_buffer == NULL) {
ERROR_MSG("Only DMA address is provided\n");
buf = phys_to_virt(urb->transfer_dma) + urb->actual_length;
ERROR_MSG("buffer ptr used for copy data IN will be 0x%X\n", buf);
}
else
buf = urb->transfer_buffer + urb->actual_length;
ft313_mem_read(ft313,
buf,
actual_rx_data_length,
qtd_buffer_offset);
}
}
urb->actual_length += (length - QTD_LENGTH (token));
DEBUG_MSG("urb 0x%X actual_length field become %d\n", urb, urb->actual_length);
}
/* don't modify error codes */
if (unlikely(urb->unlinked)) {
DEBUG_MSG("Exit-- due to urb unlinked with status is %d\n", status);
return status;
}
/* force cleanup after short read; not always an error */
if (unlikely (IS_SHORT_READ (token)))
status = -EREMOTEIO;
/* serious "can't proceed" faults reported by the hardware */
if (token & QTD_STS_HALT) {
if (token & QTD_STS_BABBLE) {
/* FIXME "must" disable babbling device's port too */
ALERT_MSG("\n\n\n FT313 got USB babbing error!!! \n\n\n");
status = -EOVERFLOW;
/* CERR nonzero + halt --> stall */
} else if (QTD_CERR(token)) {
DEBUG_MSG("urb 0x%X is stalled\n", urb);
status = -EPIPE;
/* In theory, more than one of the following bits can be set
* since they are sticky and the transaction is retried.
* Which to test first is rather arbitrary.
*/
} else if (token & QTD_STS_MMF) {
/* fs/ls interrupt xfer missed the complete-split */
status = -EPROTO;
} else if (token & QTD_STS_DBE) {
status = (QTD_PID (token) == 1) /* IN ? */
? -ENOSR /* hc couldn't read data */
: -ECOMM; /* hc couldn't write data */
} else if (token & QTD_STS_XACT) {
/* timeout, bad CRC, wrong PID, etc */
ft313_dbg(ft313, "devpath %s ep%d%s 3strikes\n",
urb->dev->devpath,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out");
status = -EPROTO;
} else { /* unknown */
status = -EPROTO;
}
/*
ehci_vdbg (ehci,
"dev%d ep%d%s qtd token %08x --> status %d\n",
usb_pipedevice (urb->pipe),
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
token, status); */
}
DEBUG_MSG("Exit-- with status as %d\n", status);
return status;
}
static void
ft313_urb_done(struct ft313_hcd *ft313, struct urb *urb, int status)
__releases(ehci->lock)
__acquires(ehci->lock)
{
DEBUG_MSG("Enter++\n");
if (likely (urb->hcpriv != NULL)) {
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
/* S-mask in a QH means it's an interrupt urb */
// FixMe: Is hw_info2 need update from FT313?
if ((qh->hw->hw_info2 & cpu_to_hc32(ft313, QH_SMASK)) != 0) {
/* ... update hc-wide periodic stats (for usbfs) */
ft313_to_hcd(ft313)->self.bandwidth_int_reqs--;
}
qh_put (qh);
}
if (unlikely(urb->unlinked)) {
COUNT(ft313->stats.unlink);
} else {
/* report non-error and short read status as zero */
if (status == -EINPROGRESS || status == -EREMOTEIO)
status = 0;
COUNT(ft313->stats.complete);
}
#ifdef EHCI_URB_TRACE
ehci_dbg (ehci,
"%s %s urb %p ep%d%s status %d len %d/%d\n",
__func__, urb->dev->devpath, urb,
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
status,
urb->actual_length, urb->transfer_buffer_length);
#endif
/* complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(ft313_to_hcd(ft313), urb);
spin_unlock (&ft313->lock);
usb_hcd_giveback_urb(ft313_to_hcd(ft313), urb, status);
spin_lock (&ft313->lock);
DEBUG_MSG("Exit--\n");
}
static void start_unlink_async (struct ft313_hcd *ft313, struct ehci_qh *qh);
static void unlink_async (struct ft313_hcd *ft313, struct ehci_qh *qh);
static int qh_schedule (struct ft313_hcd *ft313, struct ehci_qh *qh);
/*
* Process and free completed qtds for a qh, returning URBs to drivers.
* Chases up to qh->hw_current. Returns number of completions called,
* indicating how much "real" work we did.
*/
static unsigned
qh_completions (struct ft313_hcd *ft313, struct ehci_qh *qh)
{
FUN_ENTRY();
struct ehci_qtd *last, *end = qh->dummy;
struct list_head *entry, *tmp;
int last_status;
int stopped;
unsigned count = 0;
u8 state;
struct ehci_qh_hw *hw = qh->hw;
struct urb *urb = NULL;
struct ehci_qtd *qtd = NULL;
int last_seg_still_active = 0;
int got_short_packet = 0;
int qh_in_unlink = 0;
if (NULL == hw) {
BUG_ON("hw ptr is NULL!\n");
return count;
}
// Update qH from FT313 memory
DEBUG_MSG("Update qH 0x%X from FT313 memory\n", qh->qh_ft313);
ft313_mem_read(ft313, hw, sizeof(*hw), qh->qh_ft313);
if (unlikely (list_empty (&qh->qtd_list))) {
DEBUG_MSG("qH 0x%X has empty qTD list\n", qh->qh_ft313);
FUN_EXIT();
return count;
}
/* completions (or tasks on other cpus) must never clobber HALT
* till we've gone through and cleaned everything up, even when
* they add urbs to this qh's queue or mark them for unlinking.
*
* NOTE: unlinking expects to be done in queue order.
*
* It's a bug for qh->qh_state to be anything other than
* QH_STATE_IDLE, unless our caller is scan_async() or
* scan_periodic().
*/
state = qh->qh_state;
qh->qh_state = QH_STATE_COMPLETING;
stopped = (state == QH_STATE_IDLE);
DEBUG_MSG("Value of stopped is %d as qH 0x%X status is %d\n", stopped, qh->qh_ft313, state);
rescan:
last = NULL;
last_status = -EINPROGRESS;
qh->needs_rescan = 0;
/* remove de-activated QTDs from front of queue.
* after faults (including short reads), cleanup this urb
* then let the queue advance.
* if queue is stopped, handles unlinks.
*/
list_for_each_safe (entry, tmp, &qh->qtd_list) {
//struct ehci_qtd *qtd;
//struct urb *urb;
u32 token = 0;
qtd = list_entry (entry, struct ehci_qtd, qtd_list);
// Update qtd from ft313
if (NULL == qtd) {
BUG_ON("qtd ptr is NULL\n");
return count;
}
urb = qtd->urb;
DEBUG_MSG("urb associated with qTD 0x%X is 0x%X\n", qtd->qtd_ft313, urb);
/* clean up any state from previous QTD ...*/
if (last) {
if (likely (last->urb != urb)) {
DEBUG_MSG("urb is done in loop\n");
ft313_urb_done(ft313, last->urb, last_status);
count++;
last_status = -EINPROGRESS;
}
ft313_qtd_free (ft313, last);
last = NULL;
}
/* ignore urbs submitted during completions we reported */
if (qtd == end) {
DEBUG_MSG("Reach last qTD\n");
break;
}
/* hardware copies qtd out of qh overlay */
rmb ();
DEBUG_MSG("Update qTD 0x%X hw token from FT313 \n", qtd->qtd_ft313);
ft313_mem_read(ft313,
&(qtd->hw_token),
sizeof(qtd->hw_token),
qtd->qtd_ft313 + offsetof(struct ehci_qtd_hw, hw_token));
token = hc32_to_cpu(ft313, qtd->hw_token);
/* always clean up qtds the hc de-activated */
retry_xacterr:
if ((token & QTD_STS_ACTIVE) == 0) {
DEBUG_MSG("qTD active bit is off\n");
/* on STALL, error, and short reads this urb must
* complete and all its qtds must be recycled.
*/
if ((token & QTD_STS_HALT) != 0) {
DEBUG_MSG("Halt bit is set for qTD 0x%X\n", qtd->qtd_ft313);
/* retry transaction errors until we
* reach the software xacterr limit
*/
if ((token & QTD_STS_XACT) &&
QTD_CERR(token) == 0 &&
++qh->xacterrs < QH_XACTERR_MAX &&
!urb->unlinked) {
DEBUG_MSG(
"detected XactErr len %zu/%zu retry %d\n",
qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
/* reset the token in the qtd and the
* qh overlay (which still contains
* the qtd) so that we pick up from
* where we left off
*/
token &= ~QTD_STS_HALT;
token |= QTD_STS_ACTIVE |
(EHCI_TUNE_CERR << 10);
qtd->hw_token = cpu_to_hc32(ft313,
token);
ft313_mem_write(ft313, &token, sizeof(token),
qtd->qtd_ft313 + offsetof(struct ehci_qtd_hw, hw_token));
wmb();
hw->hw_token = cpu_to_hc32(ft313,
token);
ft313_mem_write(ft313, &token, sizeof(token),
qh->qh_ft313 + offsetof(struct ehci_qh_hw, hw_token));
goto retry_xacterr;
}
stopped = 1;
/* magic dummy for some short reads; qh won't advance.
* that silicon quirk can kick in with this dummy too.
*
* other short reads won't stop the queue, including
* control transfers (status stage handles that) or
* most other single-qtd reads ... the queue stops if
* URB_SHORT_NOT_OK was set so the driver submitting
* the urbs could clean it up.
*/
} else if (IS_SHORT_READ (token)
&& !(qtd->hw_alt_next
& EHCI_LIST_END(ft313))) {
DEBUG_MSG("qTD meet short packet\n");
got_short_packet = 1;
stopped = 1;
}
/* stop scanning when we reach qtds the hc is using */
} else if (likely (!stopped
&& HC_IS_RUNNING (ft313_to_hcd(ft313)->state))) {
if ((usb_pipetype (urb->pipe) != PIPE_INTERRUPT)) {
DEBUG_MSG("qTD Active bit in status is still on!\n");
DEBUG_MSG("Stop process and jump out of loop\n");
}
last_seg_still_active = 1;
break;
/* scan the whole queue for unlinks whenever it stops */
} else {
stopped = 1;
qh_in_unlink = 1;
/* cancel everything if we halt, suspend, etc */
if (!HC_IS_RUNNING(ft313_to_hcd(ft313)->state))
last_status = -ESHUTDOWN;
/* this qtd is active; skip it unless a previous qtd
* for its urb faulted, or its urb was canceled.
*/
else if (last_status == -EINPROGRESS && !urb->unlinked)
continue;
/* qh unlinked; token in overlay may be most current */
if (state == QH_STATE_IDLE
&& cpu_to_hc32(ft313, qtd->qtd_ft313)
== hw->hw_current) {
token = hc32_to_cpu(ft313, hw->hw_token);
/* An unlink may leave an incomplete
* async transaction in the TT buffer.
* We have to clear it.
*/
ft313_clear_tt_buffer(ft313, qh, urb, token);
}
}
/* unless we already know the urb's status, collect qtd status
* and update count of bytes transferred. in common short read
* cases with only one data qtd (including control transfers),
* queue processing won't halt. but with two or more qtds (for
* example, with a 32 KB transfer), when the first qtd gets a
* short read the second must be removed by hand.
*/
if (last_status == -EINPROGRESS) {
last_status = qtd_copy_status(ft313, urb,
qtd->length, token, qtd->hw_buf[0]);
if (last_status == -EREMOTEIO
&& (qtd->hw_alt_next
& EHCI_LIST_END(ft313)))
last_status = -EINPROGRESS;
/* As part of low/full-speed endpoint-halt processing
* we must clear the TT buffer (11.17.5).
*/
if (unlikely(last_status != -EINPROGRESS &&
last_status != -EREMOTEIO)) {
/* The TT's in some hubs malfunction when they
* receive this request following a STALL (they
* stop sending isochronous packets). Since a
* STALL can't leave the TT buffer in a busy
* state (if you believe Figures 11-48 - 11-51
* in the USB 2.0 spec), we won't clear the TT
* buffer in this case. Strictly speaking this
* is a violation of the spec.
*/
if (last_status != -EPIPE);
ft313_clear_tt_buffer(ft313, qh, urb,
token);
}
}
/* if we're removing something not at the queue head,
* patch the hardware queue pointer.
*/
if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
last = list_entry (qtd->qtd_list.prev,
struct ehci_qtd, qtd_list);
last->hw_next = qtd->hw_next;
DEBUG_MSG("Update qTD 0x%X's hw_next ptr to 0x%X\n", last->qtd_ft313, qtd->hw_next);
ft313_mem_write(ft313,
&(qtd->hw_next),
sizeof(qtd->hw_next),
last->qtd_ft313 + offsetof(struct ehci_qtd_hw, hw_next));
}
/* remove qtd; it's recycled after possible urb completion */
list_del (&qtd->qtd_list);
last = qtd;
/* reinit the xacterr counter for the next qtd */
qh->xacterrs = 0;
} // End of qtd list loop
/* last urb's completion might still need calling */
if (likely (last != NULL)) {
if ((usb_pipetype (urb->pipe) == PIPE_CONTROL) || // Control transfer
(usb_pipetype (urb->pipe) == PIPE_INTERRUPT) || // Interrupt transfer
(1 == got_short_packet) || // Got short packet case
(last_status != -EINPROGRESS) || // Error happens from copy_urb_status()
(last->urb->actual_length == last->urb->transfer_buffer_length)) { // Really complete
DEBUG_MSG("urb 0x%X is done out of loop by qH 0x%X\n", last->urb, qh->qh_ft313);
// Set lock here!
unsigned long flags;
if (!list_empty(&qh->urb_list)) {
// There is urb waiting
struct qh_urb_queue_item *qh_urb_q_item;
qh_urb_q_item = list_entry(qh->urb_list.next,
struct qh_urb_queue_item,
urb_list);
qh->urb = qh_urb_q_item->urb;
qh->urb_pending = 1;
qh->mem_flags = last->mem_flags;
DEBUG_MSG("urb 0x%X is waiting already\n", qh->urb);
list_del(&qh_urb_q_item->urb_list);
kfree(qh_urb_q_item);
} else {
// Nothing to do
qh->urb = NULL;
qh->urb_pending = 0;
DEBUG_MSG("qh 0x%X (0x%X) is idle already\n", qh->qh_ft313, qh);
}
ft313_urb_done(ft313, last->urb, last_status);
count++;
ft313_qtd_free (ft313, last);
} else {
if (0 == qh_in_unlink) { // qH is not in unlink process
// Program next segment for the same urb
if (0 == last_seg_still_active) { // This is used to prevent programing next segment
// if this function is called too early
qh->urb_pending = 1;
qh->urb = urb;
qh->mem_flags = last->mem_flags;
ft313_qtd_free(ft313, last);
DEBUG_MSG("Program next segment of urb 0x%X\n", urb);
} else {
ALERT_MSG("Last segment is still Active, error case!\n");
qh->urb_pending = 0;
}
} else {
qh->urb_pending = 0;
ft313_qtd_free(ft313, last);
}
}
} else if ((1 == last_seg_still_active) &&
(usb_pipetype (urb->pipe) != PIPE_INTERRUPT)) {
//DEBUG_MSG("Last segment is still Active when complete irq generated, may due to irq is from other qH\n");
qh->urb_pending = 0;
}
/* Do we need to rescan for URBs dequeued during a giveback? */
if (unlikely(qh->needs_rescan)) {
DEBUG_MSG("qH 0x%X need rescan\n", qh->qh_ft313);
/* If the QH is already unlinked, do the rescan now. */
if (state == QH_STATE_IDLE)
goto rescan;
/* Otherwise we have to wait until the QH is fully unlinked.
* Our caller will start an unlink if qh->needs_rescan is
* set. But if an unlink has already started, nothing needs
* to be done.
*/
if (state != QH_STATE_LINKED)
qh->needs_rescan = 0;
}
/* restore original state; caller must unlink or relink */
qh->qh_state = state;
/* be sure the hardware's done with the qh before refreshing
* it after fault cleanup, or recovering from silicon wrongly
* overlaying the dummy qtd (which reduces DMA chatter).
*/
if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ft313)) {
switch (state) {
case QH_STATE_IDLE:
qh_refresh(ft313, qh);
break;
case QH_STATE_LINKED:
/* We won't refresh a QH that's linked (after the HC
* stopped the queue). That avoids a race:
* - HC reads first part of QH;
* - CPU updates that first part and the token;
* - HC reads rest of that QH, including token
* Result: HC gets an inconsistent image, and then
* DMAs to/from the wrong memory (corrupting it).
*
* That should be rare for interrupt transfers,
* except maybe high bandwidth ...
*/
/* Tell the caller to start an unlink */
qh->needs_rescan = 1;
DEBUG_MSG("qH 0x%X needs rescan due to stopped flag set\n", qh->qh_ft313);
break;
/* otherwise, unlink already started */
}
}
FUN_EXIT();
return count;
}
/*-------------------------------------------------------------------------*/
// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
// ... and packet size, for any kind of endpoint descriptor
#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
/*
* reverse of qh_urb_transaction: free a list of TDs.
* used for cleanup after errors, before HC sees an URB's TDs.
*/
static void qtd_list_free (
struct ft313_hcd *ft313,
struct urb *urb,
struct list_head *qtd_list
) {
struct list_head *entry, *temp;
FUN_ENTRY();
list_for_each_safe (entry, temp, qtd_list) {
struct ehci_qtd *qtd;
qtd = list_entry (entry, struct ehci_qtd, qtd_list);
list_del (&qtd->qtd_list);
ft313_qtd_free (ft313, qtd);
}
FUN_EXIT();
}
/*
* create a list of filled qtds for this URB; won't link into qh.
*/
static struct list_head *
qh_urb_transaction (
struct ft313_hcd *ft313,
struct urb *urb,
struct list_head *head,
gfp_t flags
) {
struct ehci_qtd *qtd, *qtd_prev;
void *buf;
int len, this_sg_len, maxpacket;
int is_input;
u32 token;
int i;
struct scatterlist *sg;
FUN_ENTRY();
/*
* URBs map to sequences of QTDs: one logical transaction
*/
qtd = ft313_qtd_alloc (ft313, flags);
if (unlikely (!qtd))
return NULL;
list_add_tail (&qtd->qtd_list, head);
qtd->urb = urb;
qtd->mem_flags = flags;
token = QTD_STS_ACTIVE;
token |= (EHCI_TUNE_CERR << 10);
/* for split transactions, SplitXState initialized to zero */
len = urb->transfer_buffer_length - urb->actual_length;
DEBUG_MSG("urb 0x%X reminding payload size is %d\n", urb, len);
if (usb_pipebulk(urb->pipe) &&
(len <= 0)) {
ALERT_MSG("\n\n\nTry to programming %d length bulk packet!!!\n\n\n", len);
goto cleanup;
}
is_input = usb_pipein (urb->pipe);
if (usb_pipecontrol (urb->pipe)) {
DEBUG_MSG("Create qTD for SETUP token\n");
/* SETUP pid */
if (0 > qtd_fill(ft313, qtd, urb->setup_packet,
sizeof (struct usb_ctrlrequest),
token | (2 /* "setup" */ << 8), 8)) {
ERROR_MSG("qTD filling for SETUP token failed\n");
goto cleanup;
}
/* ... and always at least one more pid */
token ^= QTD_TOGGLE;
qtd_prev = qtd;
qtd = ft313_qtd_alloc (ft313, flags);
if (unlikely (!qtd)) {
ALERT_MSG("qTD allocation fail for ctrl transfer !!!\n");
goto cleanup;
}
qtd->urb = urb;
// qtd_prev->hw_next = QTD_NEXT(ft313, qtd->qtd_dma);
qtd_prev->hw_next = QTD_NEXT(ft313, qtd->qtd_ft313);
list_add_tail (&qtd->qtd_list, head);
ft313_mem_write(ft313, qtd_prev, sizeof (struct ehci_qtd_hw), qtd_prev->qtd_ft313);
/* for zero length DATA stages, STATUS is always IN */
if (len == 0) {
token |= (1 /* "in" */ << 8);
}
DEBUG_MSG("Data phase length is %d\n", len);
}
/*
* data transfer stage: buffer setup
*/
i = urb->num_sgs;
if (len > 0 && i > 0) {
ALERT_MSG("FT313 HCD does not support scatter list\n");
goto cleanup;
sg = urb->sg;
buf = sg_dma_address(sg);
/* urb->transfer_buffer_length may be smaller than the
* size of the scatterlist (or vice versa)
*/
this_sg_len = min_t(int, sg_dma_len(sg), len);
} else {
sg = NULL;
//buf = urb->transfer_dma;
if ((!usb_pipecontrol(urb->pipe)) &&
(urb->transfer_buffer == NULL)) {
ALERT_MSG("Only DMA address is provided\n");
goto cleanup;
}
else
buf = urb->transfer_buffer + urb->actual_length;
this_sg_len = len;
}
if (is_input)
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
qtd->maxpacket = maxpacket;
/*
* buffer gets wrapped in one or more qtds;
* last one may be "short" (including zero len)
* and may serve as a control status ack
*/
//for (;;) FixMe: One qTD only for FT313
{
int this_qtd_len;
this_qtd_len = qtd_fill(ft313, qtd, buf, this_sg_len, token,
maxpacket);
if (this_qtd_len < 0) {
ERROR_MSG("qTD 0x%X filling error\n", qtd->qtd_ft313);
goto cleanup;
}
this_sg_len -= this_qtd_len;
len -= this_qtd_len;
buf += this_qtd_len;
/*
* short reads advance to a "magic" dummy instead of the next
* qtd ... that forces the queue to stop, for manual cleanup.
* (this will usually be overridden later.)
*/
if (is_input)
qtd->hw_alt_next = ft313->async->hw->hw_alt_next;
/* qh makes control packets use qtd toggle; maybe switch it */
if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) {
token ^= QTD_TOGGLE;
}
#if 0
if (likely(this_sg_len <= 0)) {
if (--i <= 0 || len <= 0)
break;
sg = sg_next(sg);
buf = sg_dma_address(sg);
this_sg_len = min_t(int, sg_dma_len(sg), len);
}
qtd_prev = qtd;
qtd = ft313_qtd_alloc (ft313, flags);
if (unlikely (!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(ft313, qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
#endif
}
/*
* unless the caller requires manual cleanup after short reads,
* have the alt_next mechanism keep the queue running after the
* last data qtd (the only one, for control and most other cases).
*/
if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
|| usb_pipecontrol (urb->pipe)))
qtd->hw_alt_next = EHCI_LIST_END(ft313);
/*
* control requests may need a terminating data "status" ack;
* bulk ones may need a terminating short packet (zero length).
*/
if (likely (urb->transfer_buffer_length != 0)) {
int one_more = 0;
if (usb_pipecontrol (urb->pipe)) {
one_more = 1;
token ^= 0x0100; /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
} else if (usb_pipebulk (urb->pipe)
&& (urb->transfer_flags & URB_ZERO_PACKET)
&& !(urb->transfer_buffer_length % maxpacket)) {
ERROR_MSG("This urb requires additional Zero Length Packet\n");
one_more = 1;
}
if (one_more) {
qtd_prev = qtd;
qtd = ft313_qtd_alloc (ft313, flags);
if (unlikely (!qtd)) {
ALERT_MSG("qTD allocation fail!\n");
goto cleanup;
}
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(ft313, qtd->qtd_ft313);
ft313_mem_write(ft313, qtd_prev, sizeof (struct ehci_qtd_hw), qtd_prev->qtd_ft313);
list_add_tail (&qtd->qtd_list, head);
/* never any data in such packets */
qtd_fill(ft313, qtd, 0, 0, token, 0);
}
}
/* by default, enable interrupt on urb completion */
if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) {
DEBUG_MSG("Set IOC bit to token dword\n");
qtd->hw_token |= cpu_to_hc32(ft313, QTD_IOC);
} else {
DEBUG_MSG("URB_NO_INTERRUPT flag is set, force set IOC\n");
qtd->hw_token |= cpu_to_hc32(ft313, QTD_IOC);
}
ft313_mem_write(ft313, qtd, sizeof (struct ehci_qtd_hw), qtd->qtd_ft313);
FUN_EXIT();
return head;
cleanup:
qtd_list_free (ft313, urb, head);
ALERT_MSG("qTD list creation failed!\n");
FUN_EXIT();
return NULL;
}
/*
* Each QH holds a qtd list; a QH is used for everything except iso.
*
* For interrupt urbs, the scheduler must set the microframe scheduling
* mask(s) each time the QH gets scheduled. For highspeed, that's
* just one microframe in the s-mask. For split interrupt transactions
* there are additional complications: c-mask, maybe FSTNs.
*/
static struct ehci_qh *
qh_make (
struct ft313_hcd *ft313,
struct urb *urb,
gfp_t flags
) {
FUN_ENTRY();
struct ehci_qh *qh = ft313_qh_alloc (ft313, flags);
u32 info1 = 0, info2 = 0;
int is_input, type;
int maxp = 0;
struct usb_tt *tt = urb->dev->tt;
struct ehci_qh_hw *hw;
if (!qh)
return qh;
/*
* init endpoint/device data for this QH
*/
info1 |= usb_pipeendpoint (urb->pipe) << 8;
info1 |= usb_pipedevice (urb->pipe) << 0;
is_input = usb_pipein (urb->pipe);
type = usb_pipetype (urb->pipe);
maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
/* 1024 byte maxpacket is a hardware ceiling. High bandwidth
* acts like up to 3KB, but is built from smaller packets.
*/
if (max_packet(maxp) > 1024) {
ft313_dbg(ft313, "bogus qh maxpacket %d\n", max_packet(maxp));
goto done;
}
DEBUG_MSG("Max packet size is %d\n", max_packet(maxp));
/* Compute interrupt scheduling parameters just once, and save.
* - allowing for high bandwidth, how many nsec/uframe are used?
* - split transactions need a second CSPLIT uframe; same question
* - splits also need a schedule gap (for full/low speed I/O)
* - qh has a polling interval
*
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
is_input, 0,
hb_mult(maxp) * max_packet(maxp)));
qh->start = NO_FRAME;
qh->stamp = ft313->periodic_stamp;
if (urb->dev->speed == USB_SPEED_HIGH) {
DEBUG_MSG("High speed Interrupt transfer\n");
qh->c_usecs = 0;
qh->gap_uf = 0;
qh->period = urb->interval >> 3;
if (qh->period == 0 && urb->interval != 1) {
/* NOTE interval 2 or 4 uframes could work.
* But interval 1 scheduling is simpler, and
* includes high bandwidth.
*/
urb->interval = 1;
} else if (qh->period > ft313->periodic_size) {
qh->period = ft313->periodic_size;
urb->interval = qh->period << 3;
}
} else {
DEBUG_MSG("Full/low speed Interrupt tranfer\n");
int think_time;
/* gap is f(FS/LS transfer times) */
qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
is_input, 0, maxp) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { // SPLIT, gap, CSPLIT+DATA
qh->c_usecs = qh->usecs + HS_USECS (0);
qh->usecs = HS_USECS (1);
} else { // SPLIT+DATA, gap, CSPLIT
qh->usecs += HS_USECS (1);
qh->c_usecs = HS_USECS (0);
}
think_time = tt ? tt->think_time : 0;
qh->tt_usecs = NS_TO_US (think_time +
usb_calc_bus_time (urb->dev->speed,
is_input, 0, max_packet (maxp)));
qh->period = urb->interval;
if (qh->period > ft313->periodic_size) {
qh->period = ft313->periodic_size;
urb->interval = qh->period;
}
}
}
/* support for tt scheduling, and access to toggles */
qh->dev = urb->dev;
/* using TT? */
switch (urb->dev->speed) {
case USB_SPEED_LOW:
info1 |= (1 << 12); /* EPS "low" */
/* FALL THROUGH */
case USB_SPEED_FULL:
/* EPS 0 means "full" */
if (type != PIPE_INTERRUPT)
info1 |= (EHCI_TUNE_RL_TT << 28);
if (type == PIPE_CONTROL) {
info1 |= (1 << 27); /* for TT */
info1 |= 1 << 14; /* toggle from qtd */
}
info1 |= maxp << 16;
info2 |= (EHCI_TUNE_MULT_TT << 30);
/* Some Freescale processors have an erratum in which the
* port number in the queue head was 0..N-1 instead of 1..N.
*/
//if (ehci_has_fsl_portno_bug(ehci))
// info2 |= (urb->dev->ttport-1) << 23;
//else
info2 |= urb->dev->ttport << 23;
/* set the address of the TT; for TDI's integrated
* root hub tt, leave it zeroed.
*/
if (tt && tt->hub != ft313_to_hcd(ft313)->self.root_hub)
info2 |= tt->hub->devnum << 16;
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
break;
case USB_SPEED_HIGH: /* no TT involved */
info1 |= (2 << 12); /* EPS "high" */
if (type == PIPE_CONTROL) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 64 << 16; /* usb2 fixed maxpacket */
info1 |= 1 << 14; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (type == PIPE_BULK) {
info1 |= (EHCI_TUNE_RL_HS << 28);
/* The USB spec says that high speed bulk endpoints
* always use 512 byte maxpacket. But some device
* vendors decided to ignore that, and MSFT is happy
* to help them do so. So now people expect to use
* such nonconformant devices with Linux too; sigh.
*/
info1 |= max_packet(maxp) << 16;
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else { /* PIPE_INTERRUPT */
info1 |= max_packet (maxp) << 16;
info2 |= hb_mult (maxp) << 30;
}
break;
default:
dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed);
done:
qh_put (qh);
FUN_EXIT();
return NULL;
}
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
/* init as live, toggle clear, advance to dummy */
qh->qh_state = QH_STATE_IDLE;
hw = qh->hw;
hw->hw_info1 = cpu_to_hc32(ft313, info1);
hw->hw_info2 = cpu_to_hc32(ft313, info2);
qh->is_out = !is_input;
usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
ft313_mem_write(ft313, hw, sizeof(struct ehci_qh_hw), qh->qh_ft313);
qh_refresh (ft313, qh);
FUN_EXIT();
return qh;
}
/*-------------------------------------------------------------------------*/
/* move qh (and its qtds) onto async queue; maybe enable queue. */
static void qh_link_async (struct ft313_hcd *ft313, struct ehci_qh *qh)
{
__hc32 dma = QH_NEXT(ft313, qh->qh_dma);
__hc32 dma_ft313 = QH_NEXT(ft313, qh->qh_ft313);
struct ehci_qh *head;
FUN_ENTRY();
/* Don't link a QH if there's a Clear-TT-Buffer pending */
if (unlikely(qh->clearing_tt))
return;
WARN_ON(qh->qh_state != QH_STATE_IDLE);
/* (re)start the async schedule? */
head = ft313->async;
timer_action_done (ft313, TIMER_ASYNC_OFF);
if (!head->qh_next.qh) {
u32 cmd = ft313_reg_read32(ft313, &ft313->regs->command);
if (!(cmd & ASCH_EN)) {
/* in case a clear of CMD_ASE didn't take yet */
(void)handshake(ft313, &ft313->regs->status,
ASCH_STS, 0, 150);
cmd |= ASCH_EN | RS;
DEBUG_MSG("Start Async Schedule for qH 0x%X\n", qh->qh_ft313);
ft313_reg_write32(ft313, cmd, &ft313->regs->command);
ft313_to_hcd(ft313)->state = HC_STATE_RUNNING;
/* posted write need not be known to HC yet ... */
}
}
/* clear halt and/or toggle; and maybe recover from silicon quirk */
qh_refresh(ft313, qh);
/* splice right after start */
qh->qh_next = head->qh_next;
qh->hw->hw_next = head->hw->hw_next;
ft313_mem_write(ft313, &(head->hw->hw_next), sizeof(qh->hw->hw_next),
qh->qh_ft313 + offsetof(struct ehci_qh_hw, hw_next));
wmb ();
head->qh_next.qh = qh;
head->hw->hw_next = dma_ft313;
ft313_mem_write(ft313, &(head->hw->hw_next), sizeof(head->hw->hw_next),
head->qh_ft313 + offsetof(struct ehci_qh_hw, hw_next));
qh_get(qh);
qh->xacterrs = 0;
qh->qh_state = QH_STATE_LINKED;
/* qtd completions reported later by interrupt */
FUN_EXIT();
}
/*-------------------------------------------------------------------------*/
/*
* For control/bulk/interrupt, return QH with these TDs appended.
* Allocates and initializes the QH if necessary.
* Returns null if it can't allocate a QH it needs to.
* If the QH has TDs (urbs) already, that's great.
*/
static struct ehci_qh *qh_append_tds (
struct ft313_hcd *ft313,
struct urb *urb,
struct list_head *qtd_list,
int epnum,
void **ptr
)
{
struct ehci_qh *qh = NULL;
__hc32 qh_addr_mask = cpu_to_hc32(ft313, 0x7f);
FUN_ENTRY();
qh = (struct ehci_qh *) *ptr;
if (unlikely (qh == NULL)) {
/* can't sleep here, we have ehci->lock... */
DEBUG_MSG("qH not availiable yet, create one\n");
qh = qh_make (ft313, urb, GFP_ATOMIC);
*ptr = qh;
}
DEBUG_MSG("qH 0x%X is used to serve EP 0x%X at Addr %d\n", qh->qh_ft313, epnum, usb_pipedevice(urb->pipe));
if (likely (qh != NULL)) {
struct ehci_qtd *qtd;
if (unlikely (list_empty (qtd_list)))
qtd = NULL;
else
qtd = list_entry (qtd_list->next, struct ehci_qtd,
qtd_list);
/* control qh may need patching ... */
if (unlikely (epnum == 0)) {
/* usb_reset_device() briefly reverts to address 0 */
if (usb_pipedevice (urb->pipe) == 0) {
qh->hw->hw_info1 &= ~qh_addr_mask;
ft313_mem_write(ft313,
&(qh->hw->hw_info1),
sizeof(qh->hw->hw_info1),
qh->qh_ft313 + offsetof(struct ehci_qh_hw, hw_info1));
}
}
/* just one way to queue requests: swap with the dummy qtd.
* only hc or qh_refresh() ever modify the overlay.
*/
if (likely (qtd != NULL)) {
struct ehci_qtd *dummy;
dma_addr_t dma;
u32 dma_ft313;
__hc32 token;
/* to avoid racing the HC, use the dummy td instead of
* the first td of our list (becomes new dummy). both
* tds stay deactivated until we're done, when the
* HC is allowed to fetch the old dummy (4.10.2).
*/
token = qtd->hw_token;
qtd->hw_token = HALT_BIT(ft313);
ft313_mem_write(ft313, &(qtd->hw_token), sizeof(qtd->hw_token),
qtd->qtd_ft313 + offsetof(struct ehci_qtd_hw, hw_token));
wmb ();
dummy = qh->dummy;
dma = dummy->qtd_dma;
dma_ft313 = dummy->qtd_ft313;
*dummy = *qtd;
DEBUG_MSG("Dummy qTD for qH 0x%X is at 0x%X originally\n", qh->qh_ft313, dma_ft313);
ft313_mem_write(ft313, qtd, sizeof(struct ehci_qtd_hw), dma_ft313); //Copy in ft313 memory also
dummy->qtd_dma = dma;
dummy->qtd_ft313 = dma_ft313;
list_del (&qtd->qtd_list);
list_add (&dummy->qtd_list, qtd_list);
list_splice_tail(qtd_list, &qh->qtd_list);
ft313_qtd_init(ft313, qtd, qtd->qtd_dma);
DEBUG_MSG("qTD 0x%X become new dummy for qH 0x%X\n", qtd->qtd_ft313, qh->qh_ft313);
qh->dummy = qtd;
/* hc must see the new dummy at list end */
dma = qtd->qtd_dma;
dma_ft313 = qtd->qtd_ft313;
qtd = list_entry (qh->qtd_list.prev,
struct ehci_qtd, qtd_list);
//qtd->hw_next = QTD_NEXT(ehci, dma);
qtd->hw_next = QTD_NEXT(ft313, dma_ft313);
ft313_mem_write(ft313, &(qtd->hw_next), sizeof(qtd->hw_next),
qtd->qtd_ft313 + offsetof(struct ehci_qtd_hw, hw_next));
/* let the hc process these next qtds */
wmb ();
dummy->hw_token = token;
ft313_mem_write(ft313, &(dummy->hw_token), sizeof(dummy->hw_token),
dummy->qtd_ft313 + offsetof(struct ehci_qtd_hw, hw_token));
urb->hcpriv = qh_get (qh); // FIXME: maybe only needed for first segment programming
qh->urb = urb;
}
}
#if 0 // Debug only
{
struct list_head *entry, *temp;
DEBUG_MSG("Dummy qTD for qH 0x%X is 0x%X\n", qh->qh_ft313, qh->dummy->qtd_ft313);
list_for_each_safe (entry, temp, &qh->qtd_list) {
struct ehci_qtd *qtd;
qtd = list_entry (entry, struct ehci_qtd, qtd_list);
//list_del (&qtd->qtd_list);
//ft313_qtd_free (ft313, qtd);
DEBUG_MSG("qTD 0x%X is here\n", qtd->qtd_ft313);
}
ft313_reg_read32(ft313, &ft313->regs->intr_enable);
ft313_reg_read32(ft313, &ft313->regs->status);
ft313_reg_read32(ft313, &ft313->regs->async_next);
}
#endif
FUN_EXIT();
return qh;
}
/*-------------------------------------------------------------------------*/
static int
submit_async (
struct ft313_hcd *ft313,
struct urb *urb,
struct list_head *qtd_list,
gfp_t mem_flags
) {
FUN_ENTRY();
int epnum;
unsigned long flags;
struct ehci_qh *qh = NULL;
int rc;
DEBUG_MSG("mem flags for ctrl or bulk is 0x%X\n", mem_flags);
epnum = urb->ep->desc.bEndpointAddress;
#ifdef EHCI_URB_TRACE
{
struct ehci_qtd *qtd;
qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
ehci_dbg(ehci,
"%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
__func__, urb->dev->devpath, urb,
epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
urb->transfer_buffer_length,
qtd, urb->ep->hcpriv);
}
#endif
spin_lock_irqsave (&ft313->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(ft313_to_hcd(ft313)))) {
rc = -ESHUTDOWN;
goto done;
}
if (urb->actual_length == 0) { // Only first segment need this!
DEBUG_MSG("Link urb to ep \n");
rc = usb_hcd_link_urb_to_ep(ft313_to_hcd(ft313), urb);
if (unlikely(rc))
goto done;
} else {
DEBUG_MSG("Program next segment!\n");
}
qh = qh_append_tds(ft313, urb, qtd_list, epnum, &urb->ep->hcpriv);
if (unlikely(qh == NULL)) {
usb_hcd_unlink_urb_from_ep(ft313_to_hcd(ft313), urb);
rc = -ENOMEM;
goto done;
}
/* Control/bulk operations through TTs don't need scheduling,
* the HC and TT handle it when the TT has a buffer ready.
*/
if (likely (qh->qh_state == QH_STATE_IDLE))
qh_link_async(ft313, qh);
else {
DEBUG_MSG("qh 0x%X is not idle but %d \n", qh->qh_ft313, qh->qh_state);
}
#if 0 // Debug only
display_async_list(ft313);
#endif
done:
spin_unlock_irqrestore (&ft313->lock, flags);
if (unlikely (qh == NULL)) {
qtd_list_free (ft313, urb, qtd_list);
}
DEBUG_MSG("return value %d\n", rc);
FUN_EXIT();
return rc;
}
static int
submit_async_next (
struct ft313_hcd *ft313,
struct urb *urb,
struct list_head *qtd_list,
gfp_t mem_flags
) {
FUN_ENTRY();
int epnum;
unsigned long flags;
struct ehci_qh *qh = NULL;
int rc;
epnum = urb->ep->desc.bEndpointAddress;
#ifdef EHCI_URB_TRACE
{
struct ehci_qtd *qtd;
qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
ehci_dbg(ehci,
"%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
__func__, urb->dev->devpath, urb,
epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
urb->transfer_buffer_length,
qtd, urb->ep->hcpriv);
}
#endif
// spin_lock_irqsave (&ft313->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(ft313_to_hcd(ft313)))) {
rc = -ESHUTDOWN;
goto done;
}
if (urb->actual_length == 0) { // Only first segment need this!
DEBUG_MSG("Link urb to ep \n");
rc = usb_hcd_link_urb_to_ep(ft313_to_hcd(ft313), urb);
if (unlikely(rc))
goto done;
} else {
DEBUG_MSG("Program next segment!\n");
}
qh = qh_append_tds(ft313, urb, qtd_list, epnum, &urb->ep->hcpriv);
if (unlikely(qh == NULL)) {
usb_hcd_unlink_urb_from_ep(ft313_to_hcd(ft313), urb);
rc = -ENOMEM;
goto done;
}
/* Control/bulk operations through TTs don't need scheduling,
* the HC and TT handle it when the TT has a buffer ready.
*/
if (likely (qh->qh_state == QH_STATE_IDLE))
qh_link_async(ft313, qh);
else {
DEBUG_MSG("qh 0x%X is not idle but %d \n", qh->qh_ft313, qh->qh_state);
}
#if 0 // Debug only
display_async_list(ft313);
#endif
done:
// spin_unlock_irqrestore (&ft313->lock, flags);
if (unlikely (qh == NULL)) {
qtd_list_free (ft313, urb, qtd_list);
}
FUN_EXIT();
return rc;
}
/*-------------------------------------------------------------------------*/
/* the async qh for the qtds being reclaimed are now unlinked from the HC */
static void end_unlink_async (struct ft313_hcd *ft313)
{
FUN_ENTRY();
struct ehci_qh *qh = ft313->reclaim;
struct ehci_qh *next;
iaa_watchdog_done(ft313);
// Debug only
// display_async_list(ft313);
//Adjust the asynchronous list register value in case it is the same as qH for unlink
u32 current_async_addr = 0;
current_async_addr = ft313_reg_read32(ft313, &ft313->regs->async_next);
if (qh->qh_ft313 == current_async_addr) {
ERROR_MSG("Current async list register point to an unlinked qH, have to set manually!\n");
u32 cmd, status;
// FIXME: Check whether asynchronous schedule bit is still on before modify async list addr register
// This fix is not fully tested yet! Yang Chang on August 31, 2012
status = ft313_reg_read32(ft313, &ft313->regs->status);
cmd = ft313_reg_read32(ft313, &ft313->regs->command);
if (0 != (ASCH_STS & status)) { //Asychronous schedule still on
ALERT_MSG("Hack Async List Addr register on the fly!!!\n");
ALERT_MSG("Stop Async scheduling first\n");
ft313_reg_write32(ft313, cmd & ~ASCH_EN, &ft313->regs->command);
// Make sure asychrous schedule stopped
handshake(ft313, &ft313->regs->status, ASCH_STS, 0, 150);
}
if (qh->qh_next.qh != NULL) {
struct ehci_qh *next_qh;
next_qh = qh->qh_next.qh;
ft313_reg_write32(ft313, next_qh->qh_ft313, &ft313->regs->async_next);
DEBUG_MSG("Set async list reg as 0x%X\n", next_qh->qh_ft313);
} else {
ft313_reg_write32(ft313, ft313->async->qh_ft313, &ft313->regs->async_next);
ERROR_MSG("Set async list reg as 0x%X\n", ft313->async->qh_ft313);
}
//FIXME: Restore Asyncronous schedule bit if stopped above
if (0 != (ASCH_STS & status)) {
ft313_reg_write32(ft313, cmd | ASCH_EN, &ft313->regs->command);
handshake(ft313, &ft313->regs->status, ASCH_STS, ASCH_STS, 150);
ALERT_MSG("Restore Async scheduling\n");
}
}
// qh->hw_next = cpu_to_hc32(qh->qh_dma);
qh->qh_state = QH_STATE_IDLE;
DEBUG_MSG("qH 0x%X is made state QH_STATE_IDLE\n", qh->qh_ft313);
qh->qh_next.qh = NULL;
qh_put (qh); // refcount from reclaim
/* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
next = qh->reclaim;
ft313->reclaim = next;
qh->reclaim = NULL;
qh_completions (ft313, qh);
if (!list_empty (&qh->qtd_list)
&& HC_IS_RUNNING (ft313_to_hcd(ft313)->state))
qh_link_async (ft313, qh);
else {
/* it's not free to turn the async schedule on/off; leave it
* active but idle for a while once it empties.
*/
if (HC_IS_RUNNING (ft313_to_hcd(ft313)->state)
&& ft313->async->qh_next.qh == NULL)
timer_action (ft313, TIMER_ASYNC_OFF);
}
qh_put(qh); /* refcount from async list */
if (next) {
ft313->reclaim = NULL;
start_unlink_async (ft313, next);
}
FUN_EXIT();
}
/* makes sure the async qh will become idle */
/* caller must own ehci->lock */
static void start_unlink_async (struct ft313_hcd *ft313, struct ehci_qh *qh)
{
FUN_ENTRY();
int cmd = ft313_reg_read32(ft313, &ft313->regs->command);
struct ehci_qh *prev;
#ifdef DEBUG
assert_spin_locked(&ft313->lock);
if (ft313->reclaim
|| (qh->qh_state != QH_STATE_LINKED
&& qh->qh_state != QH_STATE_UNLINK_WAIT)
)
BUG ();
#endif
/* stop async schedule right now? */
if (unlikely (qh == ft313->async)) {
/* can't get here without STS_ASS set */
if (ft313_to_hcd(ft313)->state != HC_STATE_HALT
&& !ft313->reclaim) {
/* ... and CMD_IAAD clear */
ft313_reg_write32(ft313, cmd & ~ASCH_EN,
&ft313->regs->command);
wmb ();
DEBUG_MSG("Stop Async Scheduling\n");
// Restore async reg to original value if not!
u32 async_reg = 0;
async_reg = ft313_reg_read32(ft313, &ft313->regs->async_next);
if (async_reg != ft313->async->qh_ft313)
ft313_reg_write32(ft313, ft313->async->qh_ft313, &ft313->regs->async_next);
// handshake later, if we need to
timer_action_done (ft313, TIMER_ASYNC_OFF);
}
FUN_EXIT();
return;
}
qh->qh_state = QH_STATE_UNLINK;
DEBUG_MSG("qH 0x%X is set as UNLINK state\n", qh->qh_ft313);
ft313->reclaim = qh = qh_get (qh);
DEBUG_MSG("Set qh 0x%X (%p) as qH for reclaim\n", qh->qh_ft313, qh);
prev = ft313->async;
// DEBUG_MSG("From qh 0x%X -> ", prev->qh_ft313);
while (prev->qh_next.qh != qh) {
prev = prev->qh_next.qh;
// printk("qh 0x%X -> ", prev->qh_ft313);
}
// printk(" qh 0x%X\n", qh->qh_ft313);
// DEBUG_MSG("Before remove qH 0x%X\n", qh->qh_ft313);
// display_async_list(ft313);
prev->hw->hw_next = qh->hw->hw_next;
ft313_mem_write(ft313,
&(qh->hw->hw_next),
sizeof(qh->hw->hw_next),
prev->qh_ft313 + offsetof(struct ehci_qh_hw, hw_next));
prev->qh_next = qh->qh_next;
if (ft313->qh_scan_next == qh)
ft313->qh_scan_next = qh->qh_next.qh;
wmb ();
// DEBUG_MSG("After remove qH 0x%X\n", qh->qh_ft313);
// display_async_list(ft313);
#if 0
//Adjust the asynchronous list register value in case it is the same as qH for unlink
u32 current_async_addr = 0;
current_async_addr = ft313_reg_read32(ft313, &ft313->regs->async_next);
if (qh->qh_ft313 == current_async_addr) {
if (qh->qh_next.qh != NULL) {
struct ehci_qh *next_qh;
next_qh = qh->qh_next.qh;
ft313_reg_write32(ft313, next_qh->qh_ft313, &ft313->regs->async_next);
} else {
ft313_reg_write32(ft313, ft313->async->qh_ft313, &ft313->regs->async_next);
}
}
#endif
/* If the controller isn't running, we don't have to wait for it */
if (unlikely(!HC_IS_RUNNING(ft313_to_hcd(ft313)->state))) {
/* if (unlikely (qh->reclaim != 0))
* this will recurse, probably not much
*/
DEBUG_MSG("HC is not running!\n");
end_unlink_async (ft313);
FUN_EXIT();
return;
}
cmd |= INT_OAAD;
DEBUG_MSG("Set Interrupt on Asynch Advance Doorbell\n");
ft313_reg_write32(ft313, cmd, &ft313->regs->command);
(void)ft313_reg_read32(ft313, &ft313->regs->command);
iaa_watchdog_start(ft313);
FUN_EXIT();
}
/*-------------------------------------------------------------------------*/
static void scan_async (struct ft313_hcd *ft313)
{
FUN_ENTRY();
bool stopped;
struct ehci_qh *qh;
enum ft313_timer_action action = TIMER_IO_WATCHDOG;
int qh_count = 0;
timer_action_done (ft313, TIMER_ASYNC_SHRINK);
stopped = !HC_IS_RUNNING(ft313_to_hcd(ft313)->state);
ft313->qh_scan_next = ft313->async->qh_next.qh;
while (ft313->qh_scan_next) {
qh = ft313->qh_scan_next;
ft313->qh_scan_next = qh->qh_next.qh;
rescan:
/* clean any finished work for this qh */
if (!list_empty(&qh->qtd_list)) {
DEBUG_MSG("qH 0x%X qTD list is not empty\n", qh->qh_ft313);
int temp;
/*
* Unlinks could happen here; completion reporting
* drops the lock. That's why ehci->qh_scan_next
* always holds the next qh to scan; if the next qh
* gets unlinked then ehci->qh_scan_next is adjusted
* in start_unlink_async().
*/
qh = qh_get(qh);
temp = qh_completions(ft313, qh);
if (qh->needs_rescan)
unlink_async(ft313, qh);
qh->unlink_time = jiffies + EHCI_SHRINK_JIFFIES;
qh_put(qh);
qh_count++;
if (temp != 0)
goto rescan;
}
if (0 != in_interrupt()) { //Assume called from ft313_work by ft313_irq()
if (qh->urb_pending == 1) {
if (qh->urb != NULL) {
//spin_unlock(&ft313->lock);
if (0 > ft313_urb_enqueue_next(ft313, qh->urb, GFP_ATOMIC)) {
ALERT_MSG("Program next segment failed!\n");
qh->urb_pending = 0;
//spin_lock(&ft313->lock); // ft313_urb_done will release lock first!
ft313_urb_done(ft313, qh->urb, -EPROTO); // report protocol error!
} else {
//qh->urb = NULL;
qh->urb_pending = 0;
// spin_lock(&ft313->lock);
}
}
}
}
/* unlink idle entries, reducing DMA usage as well
* as HCD schedule-scanning costs. delay for any qh
* we just scanned, there's a not-unusual case that it
* doesn't stay idle for long.
* (plus, avoids some kind of re-activation race.)
*/
#ifdef ENABLE_DYN_UNLINK
if (list_empty(&qh->qtd_list)
&& qh->qh_state == QH_STATE_LINKED) {
if (!ft313->reclaim && (stopped ||
time_after_eq(jiffies, qh->unlink_time)))
start_unlink_async(ft313, qh);
else
action = TIMER_ASYNC_SHRINK;
}
#endif
}
if (action == TIMER_ASYNC_SHRINK)
timer_action (ft313, TIMER_ASYNC_SHRINK);
DEBUG_MSG("Totally there are %d qh actually processed \n", qh_count);
FUN_EXIT();
}
| gpl-2.0 |
muromec/qtopia-ezx | src/libraries/qtopia/qdrmcontentengine.cpp | 1 | 3864 | /****************************************************************************
**
** This file is part of the Qtopia Opensource Edition Package.
**
** Copyright (C) 2008 Trolltech ASA.
**
** Contact: Qt Extended Information (info@qtextended.org)
**
** This file may be used under the terms of the GNU General Public License
** versions 2.0 as published by the Free Software Foundation and appearing
** in the file LICENSE.GPL included in the packaging of this file.
**
** Please review the following information to ensure GNU General Public
** Licensing requirements will be met:
** http://www.fsf.org/licensing/licenses/info/GPLv2.html.
**
**
****************************************************************************/
#include <qtopia/private/qdrmcontentengine_p.h>
#include <qtopia/private/drmcontent_p.h>
#include <qtopiabase/qtopialog.h>
#include <qtopiabase/qtopianamespace.h>
/*!
\class QDrmContentEngine
\mainclass
\brief QDrmContentEngine is the default content engine for DRM protected content.
\internal
*/
/*!
Constructs a new unpopulated QDrmContentEngine.
*/
QDrmContentEngine::QDrmContentEngine()
: QFSContentEngine( QLatin1String( "*/*" ) )
{
}
/*!
Constructs a new QDrmContentPrivate where \a engineType is the engine mime type of an inheriting engine.
*/
QDrmContentEngine::QDrmContentEngine( const QString &engineType )
: QFSContentEngine( engineType )
{
}
/*!
Destroys a QDrmContentEngine.
*/
QDrmContentEngine::~QDrmContentEngine()
{
}
/*!
\reimp
*/
QContentEngine *QDrmContentEngine::createCopy() const
{
QDrmContentEngine *engine = new QDrmContentEngine;
engine->copy( *this );
engine->setId( id() );
return engine;
}
/*!
\reimp
*/
QIODevice *QDrmContentEngine::open( QIODevice::OpenMode mode )
{
QDrmContentPlugin *plugin = DrmContentPrivate::plugin( fileName() );
QDrmContentLicense *license = plugin ? plugin->license( fileName() ) : 0;
QIODevice *io = license ? plugin->createDecoder( fileName(), license->permission() ) : 0;
if( !(io && io->open( mode )) )
{
delete io;
io = 0;
}
return io;
}
/*!
\reimp
*/
bool QDrmContentEngine::execute( const QStringList &arguments ) const
{
if( role() == QContent::Application )
{
qLog(DocAPI) << "QDrmContentEngine::execute" << fileName() << arguments;
Qtopia::execute( fileName(), arguments.count() ? arguments[0] : QString() );
return true;
}
else
{
QContent app = mimeType().application();
if( app.isValid() && const_cast< QDrmContentEngine * >( this )->activate( mimeType().permission(), 0 ) )
{
app.execute( QStringList() << arguments << fileName() );
return true;
}
}
return false;
}
/*!
\reimp
*/
QDrmRights::Permissions QDrmContentEngine::queryPermissions()
{
return DrmContentPrivate::permissions( fileName() );
}
/*!
\reimp
*/
QDrmRights QDrmContentEngine::rights( QDrmRights::Permission permission ) const
{
return DrmContentPrivate::getRights( fileName(), permission );
}
/*!
\reimp
*/
bool QDrmContentEngine::canActivate() const
{
return DrmContentPrivate::canActivate( fileName() );
}
/*!
\reimp
*/
bool QDrmContentEngine::activate( QDrmRights::Permission permission, QWidget *parent )
{
return DrmContentPrivate::activate( fileName(), permission, parent );
}
/*!
\reimp
*/
bool QDrmContentEngine::reactivate( QDrmRights::Permission permission, QWidget *parent )
{
DrmContentPrivate::reactivate( fileName(), permission, parent );
return true;
}
/*!
\reimp
*/
QDrmContentLicense *QDrmContentEngine::requestLicense( QDrmRights::Permission permission, QDrmContent::LicenseOptions options )
{
return DrmContentPrivate::requestContentLicense( QContent( this ), permission, options );
}
| gpl-2.0 |
cournia/fob | fob/quaternion.cpp | 1 | 10293 | /*
-------------------------------------------------------------------------------
libfob - C++ interface to Ascension Technology Corporation's
Flock of Birds position and orientation measurement system.
Copyright (C) 2002 Nathan Cournia
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-------------------------------------------------------------------------------
*/
//FILE: quaternion.cpp
//AUTHOR: Nathan Cournia <nathan@cournia.com>
#include <cassert>
#include "fob/quaternion.h"
//////////////////////////////////////////////////////////////////////////
const math::quaternion math::quaternion::IDENTITY(
0.0, 0.0, 0.0, //xyz
1.0 //w
);
//////////////////////////////////////////////////////////////////////////
void
math::quaternion::get_rotation_matrix( math::matrix4& m ) const
{
real_t x2 = m_v.x( ) + m_v.x( );
real_t y2 = m_v.y( ) + m_v.y( );
real_t z2 = m_v.z( ) + m_v.z( );
real_t xx = m_v.x( ) * x2;
real_t xy = m_v.x( ) * y2;
real_t xz = m_v.x( ) * z2;
real_t xw = x2 * m_w;
real_t yy = m_v.y( ) * y2;
real_t yz = m_v.y( ) * z2;
real_t yw = y2 * m_w;
real_t zz = m_v.z( ) * z2;
real_t zw = z2 * m_w;
m.set( 0, 0, 1.0 - ( yy + zz ) );
m.set( 0, 1, ( xy - zw ) );
m.set( 0, 2, ( xz + yw ) );
m.set( 0, 3, 0.0 );
m.set( 1, 0, ( xy + zw ) );
m.set( 1, 1, 1.0 - ( xx + zz ) );
m.set( 1, 2, ( yz - xw ) );
m.set( 1, 3, 0.0 );
m.set( 2, 0, ( xz - yw ) );
m.set( 2, 1, ( yz + xw ) );
m.set( 2, 2, 1.0 - ( xx + yy ) );
m.set( 2, 3, 0.0 );
m.set( 3, 0, 0.0 );
m.set( 3, 1, 0.0 );
m.set( 3, 2, 0.0 );
m.set( 3, 3, 1.0 );
}
//////////////////////////////////////////////////////////////////////////
math::matrix4
math::quaternion::get_rotation_matrix( void ) const
{
math::matrix4 m;
get_rotation_matrix( m );
return m;
}
//////////////////////////////////////////////////////////////////////////
void
math::quaternion::get_transposed_rotation_matrix( math::matrix4& m ) const
{
real_t x2 = m_v.x( ) + m_v.x( );
real_t y2 = m_v.y( ) + m_v.y( );
real_t z2 = m_v.z( ) + m_v.z( );
real_t xx = m_v.x( ) * x2;
real_t xy = m_v.x( ) * y2;
real_t xz = m_v.x( ) * z2;
real_t xw = x2 * m_w;
real_t yy = m_v.y( ) * y2;
real_t yz = m_v.y( ) * z2;
real_t yw = y2 * m_w;
real_t zz = m_v.z( ) * z2;
real_t zw = z2 * m_w;
m.set( 0, 0, 1.0 - ( yy + zz ) );
m.set( 0, 1, ( xy + zw ) );
m.set( 0, 2, ( xz - yw ) );
m.set( 0, 3, 0.0 );
m.set( 1, 0, ( xy - zw ) );
m.set( 1, 1, 1.0 - ( xx + zz ) );
m.set( 1, 2, ( yz + xw ) );
m.set( 1, 3, 0.0 );
m.set( 2, 0, ( xz + yw ) );
m.set( 2, 1, ( yz - xw ) );
m.set( 2, 2, 1.0 - ( xx + yy ) );
m.set( 2, 3, 0.0 );
m.set( 3, 0, 0.0 );
m.set( 3, 1, 0.0 );
m.set( 3, 2, 0.0 );
m.set( 3, 3, 1.0 );
}
//////////////////////////////////////////////////////////////////////////
math::matrix4
math::quaternion::get_transposed_rotation_matrix( void ) const
{
math::matrix4 m;
get_transposed_rotation_matrix( m );
return m;
}
//////////////////////////////////////////////////////////////////////////
//not communative
math::quaternion
math::quaternion::operator* ( const math::quaternion& rhs ) const
{
return math::quaternion(
rhs.m_w * m_v.x() + rhs.m_v.x() * m_w +
rhs.m_v.z() * m_v.y() - rhs.m_v.y() * m_v.z(), //x
rhs.m_w * m_v.y() + rhs.m_v.y() * m_w +
rhs.m_v.x() * m_v.z() - rhs.m_v.z() * m_v.x(), //y
rhs.m_w * m_v.z() + rhs.m_v.z() * m_w +
rhs.m_v.y() * m_v.x() - rhs.m_v.x() * m_v.y(), //z
rhs.m_w * m_w - rhs.m_v.x() * m_v.x() -
rhs.m_v.y() * m_v.y() - rhs.m_v.z() * m_v.z() //w
);
}
//////////////////////////////////////////////////////////////////////////
math::vector3
math::quaternion::operator* ( const math::vector3& rhs ) const
{
math::matrix4 rotation;
get_rotation_matrix( rotation );
return rotation * rhs;
}
//////////////////////////////////////////////////////////////////////////
void
math::quaternion::from_angle_axis( real_t radians, const math::vector3& axis )
{
//axis should be normalized
assert( math::equals( axis.length( ), 1.0 ) );
radians *= 0.5;
math::multiply( m_v, axis, sin( radians ) );
m_w = cos( radians );
}
//////////////////////////////////////////////////////////////////////////
void
math::quaternion::get_angle_axis( real_t& radians, math::vector3& axis ) const
{
//math::quaternion should be normalized
assert( math::equals( magnitude( ), 1.0 ) );
//angle
radians = acos( m_w ) * 2.0;
//axis
real_t sin_a = sqrt( 1.0 - m_w * m_w );
//make sure we don't divide by zero
if( math::equals( sin_a, 0.0 ) ) {
sin_a = 1.0;
}
//set the axis
axis.set(
m_v.x( ) / sin_a,
m_v.y( ) / sin_a,
m_v.z( ) / sin_a
);
}
//////////////////////////////////////////////////////////////////////////
//this method can probably be optimized
void
math::quaternion::from_angles( real_t x_rad, real_t y_rad,
real_t z_rad )
{
math::quaternion q1, q2;
q1.from_angle_axis( x_rad, math::vector3::X_AXIS );
q2.from_angle_axis( y_rad, math::vector3::Y_AXIS );
q1 = q1 * q2;
q2.from_angle_axis( z_rad, math::vector3::Z_AXIS );
q1 = q1 * q2;
q1.normalize( ); //needed?
*this = q1;
}
//////////////////////////////////////////////////////////////////////////
//modified from matrix faq
//http://www.j3d.org/matrix_faq/matrfaq_latest.html
//assumes 3x3 matrix in row major format
void
math::quaternion::from_matrix3( const real_t *mat )
{
//find trace
real_t s;
real_t t = 1.0 + mat[ 0 ] + mat[ 4 ] + mat[ 8 ];
if( t > 0.000001 ) {
//perform "instant" calc (this happens most of the time?)
s = sqrt( t ) * 2.0;
m_v.x( (mat[ 7 ] - mat[ 5 ]) / s );
m_v.y( (mat[ 2 ] - mat[ 6 ]) / s );
m_v.z( (mat[ 3 ] - mat[ 1 ]) / s );
m_w = 0.25 * s;
return;
}
if( mat[ 0 ] > mat[ 4 ] && mat[ 0 ] > mat[ 8 ] ) {
s = sqrt( 1.0 + mat[ 0 ] - mat[ 4 ] - mat[ 8 ] ) * 2.0;
m_v.x( 0.25 * s );
m_v.y( (mat[ 3 ] + mat[ 1 ]) / s );
m_v.z( (mat[ 2 ] + mat[ 6 ]) / s );
m_w = (mat[ 7 ] - mat[ 5 ]) / s;
} else if( mat[ 4 ] > mat[ 8 ] ) {
s = sqrt( 1.0 + mat[ 4 ] - mat[ 0 ] - mat[ 8 ]) * 2;
m_v.x( (mat[ 3 ] + mat[ 1 ]) / s );
m_v.y( 0.25 * s );
m_v.z( (mat[ 7 ] + mat[ 5 ]) / s );
m_w = (mat[ 2 ] - mat[ 6 ]) / s;
} else {
s = sqrt( 1.0 + mat[ 8 ] - mat[ 0 ] - mat[ 4 ]) * 2;
m_v.x( (mat[ 2 ] + mat[ 6 ]) / s );
m_v.y( (mat[ 7 ] + mat[ 5 ]) / s );
m_v.z( 0.25 * s );
m_w = (mat[ 3 ] - mat[ 1 ]) / s;
}
}
//////////////////////////////////////////////////////////////////////////
//modified from matrix faq
//http://www.j3d.org/matrix_faq/matrfaq_latest.html
//assumes 4x4 matrix in row major format
void
math::quaternion::from_matrix4( const real_t *mat )
{
//find trace
real_t s;
real_t t = 1.0 + mat[ 0 ] + mat[ 5 ] + mat[ 10 ];
if( t > 0.000001 ) {
//perform "instant" calc (this happens most of the time?)
s = sqrt( t ) * 2.0;
m_v.x( (mat[ 9 ] - mat[ 6 ]) / s );
m_v.y( (mat[ 2 ] - mat[ 8 ]) / s );
m_v.z( (mat[ 4 ] - mat[ 1 ]) / s );
m_w = 0.25 * s;
return;
}
if( mat[ 0 ] > mat[ 5 ] && mat[ 0 ] > mat[ 10 ] ) {
s = sqrt( 1.0 + mat[ 0 ] - mat[ 5 ] - mat[ 10 ] ) * 2.0;
m_v.x( 0.25 * s );
m_v.y( (mat[ 4 ] + mat[ 1 ]) / s );
m_v.z( (mat[ 2 ] + mat[ 8 ]) / s );
m_w = (mat[ 9 ] - mat[ 6 ]) / s;
} else if( mat[ 5 ] > mat[ 10 ] ) {
s = sqrt( 1.0 + mat[ 5 ] - mat[ 0 ] - mat[ 10 ]) * 2;
m_v.x( (mat[ 4 ] + mat[ 1 ]) / s );
m_v.y( 0.25 * s );
m_v.z( (mat[ 9 ] + mat[ 6 ]) / s );
m_w = (mat[ 2 ] - mat[ 8 ]) / s;
} else {
s = sqrt( 1.0 + mat[ 10 ] - mat[ 0 ] - mat[ 5 ]) * 2;
m_v.x( (mat[ 2 ] + mat[ 8 ]) / s );
m_v.y( (mat[ 9 ] + mat[ 6 ]) / s );
m_v.z( 0.25 * s );
m_w = (mat[ 4 ] - mat[ 1 ]) / s;
}
}
//////////////////////////////////////////////////////////////////////////
//modified from http://www.magic-software.com/Documentation/quat.pdf
math::quaternion
math::quaternion::slerp( real_t percent, const math::quaternion& qa,
math::quaternion qb )
{
//quick check to see if quats are the same
if( math::equals( qa.w( ), qb.w( ) ) && qa.v( ) == qb.v( ) ) {
return qa;
}
real_t cos_a = qa.w( ) * qb.w( ) + math::dot( qa.v( ), qb.v( ) );
if( cos_a < 0.0 ) {
qb.m_v.negate( );
qb.m_w *= -1.0;
cos_a = -cos_a;
}
//set default scales to lerp
real_t a = 1.0 - percent;
real_t b = percent;
//is angle great enough to do real slerp?
if( 1 - cos_a > 0.1 ) {
real_t angle = acos( cos_a );
real_t inverse_sin = 1.0 / sin( angle );
a = sin( (1.0 - percent) * angle ) * inverse_sin;
b = sin( percent * angle) * inverse_sin;
}
return math::quaternion(
a * qa.x( ) + b * qb.x( ),
a * qa.y( ) + b * qb.y( ),
a * qa.z( ) + b * qb.z( ),
a * qa.w( ) + b * qb.w( )
);
}
//////////////////////////////////////////////////////////////////////////
std::ostream&
math::operator<< ( std::ostream& o, const math::quaternion& q )
{
return o << q.m_v << " " << q.m_w;
}
| gpl-2.0 |
angelbbs/linux-sunxi | drivers/mmc/host/sunxi-mci.c | 1 | 87687 | /*
* drivers/mmc/host/sunxi-mci.c
* (C) Copyright 2010-2015
* Reuuimlla Technology Co., Ltd. <www.reuuimllatech.com>
* Aaron.Maoye <leafy.myeh@reuuimllatech.com>
* James Deng <csjamesdeng@reuuimllatech.com>
*
* description for this code
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sd.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
//#include <asm/cacheflush.h>
//#include <asm/uaccess.h>
#include <mach/system.h>
//#include <plat/sys_config.h>
//
#include <mach/hardware.h>
//#include <mach/platform.h>
//#include <mach/gpio.h>
#include <mach/clock.h>
#include "sunxi-mci.h"
#if defined CONFIG_MMC_SUNXI || defined CONFIG_MMC_SUNXI_MODULE
#error Only one of the old and new SUNXI MMC drivers may be selected
#endif
#define sw_host_num (sunxi_is_sun5i() ? 3 : 4)
static DEFINE_MUTEX(sw_host_rescan_mutex);
static int sw_host_rescan_pending[4] = { 0, };
static struct sunxi_mmc_host* sw_host[4] = {NULL, NULL, NULL, NULL};
static const char * const mmc_para_io[10] = { "sdc_clk", "sdc_cmd", "sdc_d0",
"sdc_d1", "sdc_d2", "sdc_d3", "sdc_d4", "sdc_d5", "sdc_d6", "sdc_d7" };
#if 0
static void dumphex32(char* name, char* base, int len)
{
u32 i;
printk("dump %s registers:", name);
for (i=0; i<len; i+=4) {
if (!(i&0xf))
printk("\n0x%p : ", base + i);
printk("0x%08x ", readl(base + i));
}
printk("\n");
}
static void hexdump(char* name, char* base, int len)
{
u32 i;
printk("%s :", name);
for (i=0; i<len; i++) {
if (!(i&0x1f))
printk("\n0x%p : ", base + i);
if (!(i&0xf))
printk(" ");
printk("%02x ", readb(base + i));
}
printk("\n");
}
#endif
static s32 sw_mci_init_host(struct sunxi_mmc_host* smc_host)
{
u32 rval;
SMC_DBG(smc_host, "MMC Driver init host %d\n", smc_host->pdev->id);
/* reset controller */
rval = mci_readl(smc_host, REG_GCTRL) | SDXC_HWReset;
mci_writel(smc_host, REG_GCTRL, rval);
mci_writel(smc_host, REG_FTRGL, 0x70008);
mci_writel(smc_host, REG_TMOUT, 0xffffffff);
mci_writel(smc_host, REG_IMASK, 0);
mci_writel(smc_host, REG_RINTR, 0xffffffff);
mci_writel(smc_host, REG_DBGC, 0xdeb);
mci_writel(smc_host, REG_FUNS, 0xceaa0000);
rval = mci_readl(smc_host, REG_GCTRL)|SDXC_INTEnb;
mci_writel(smc_host, REG_GCTRL, rval);
smc_host->voltage = SDC_WOLTAGE_OFF;
return 0;
}
s32 sw_mci_exit_host(struct sunxi_mmc_host* smc_host)
{
u32 rval;
SMC_DBG(smc_host, "MMC Driver exit host %d\n", smc_host->pdev->id);
smc_host->ferror = 0;
smc_host->voltage = SDC_WOLTAGE_OFF;
rval = mci_readl(smc_host, REG_GCTRL) | SDXC_HWReset;
mci_writel(smc_host, REG_GCTRL, SDXC_HWReset);
return 0;
}
s32 sw_mci_set_vddio(struct sunxi_mmc_host* smc_host, u32 vdd)
{
char* vddstr[] = {"3.3V", "1.8V", "1.2V", "OFF"};
static u32 on[4] = {0};
u32 id = smc_host->pdev->id;
if (smc_host->regulator == NULL)
return 0;
BUG_ON(vdd > SDC_WOLTAGE_OFF);
switch (vdd) {
case SDC_WOLTAGE_3V3:
regulator_set_voltage(smc_host->regulator, 3300000, 3300000);
if (!on[id]) {
SMC_DBG(smc_host, "regulator on\n");
regulator_enable(smc_host->regulator);
on[id] = 1;
}
break;
case SDC_WOLTAGE_1V8:
regulator_set_voltage(smc_host->regulator, 1800000, 1800000);
if (!on[id]) {
SMC_DBG(smc_host, "regulator on\n");
regulator_enable(smc_host->regulator);
on[id] = 1;
}
break;
case SDC_WOLTAGE_1V2:
regulator_set_voltage(smc_host->regulator, 1200000, 1200000);
if (!on[id]) {
SMC_DBG(smc_host, "regulator on\n");
regulator_enable(smc_host->regulator);
on[id] = 1;
}
break;
case SDC_WOLTAGE_OFF:
if (on[id]) {
SMC_DBG(smc_host, "regulator off\n");
regulator_force_disable(smc_host->regulator);
on[id] = 0;
}
break;
}
SMC_MSG(smc_host, "sdc%d switch io voltage to %s\n", smc_host->pdev->id, vddstr[vdd]);
return 0;
}
s32 sw_mci_update_clk(struct sunxi_mmc_host* smc_host)
{
u32 rval;
s32 expire = jiffies + msecs_to_jiffies(1000); //1000ms timeout
s32 ret = 0;
rval = SDXC_Start|SDXC_UPCLKOnly|SDXC_WaitPreOver;
if (smc_host->voltage_switching)
rval |= SDXC_VolSwitch;
mci_writel(smc_host, REG_CMDR, rval);
do {
rval = mci_readl(smc_host, REG_CMDR);
} while (jiffies < expire && (rval & SDXC_Start));
if (rval & SDXC_Start) {
smc_host->ferror = 1;
SMC_ERR(smc_host, "update clock timeout, fatal error\n");
ret = -1;
}
return ret;
}
/* UHS-I Operation Modes
* DS 25MHz 12.5MB/s 3.3V
* HS 50MHz 25MB/s 3.3V
* SDR12 25MHz 12.5MB/s 1.8V
* SDR25 50MHz 25MB/s 1.8V
* SDR50 100MHz 50MB/s 1.8V
* SDR104 208MHz 104MB/s 1.8V
* DDR50 50MHz 50MB/s 1.8V
* MMC Operation Modes
* DS 26MHz 26MB/s 3/1.8/1.2V
* HS 52MHz 52MB/s 3/1.8/1.2V
* HSDDR 52MHz 104MB/s 3/1.8/1.2V
* HS200 200MHz 200MB/s 1.8/1.2V
*
* Spec. Timing
* SD3.0
* Fcclk Tcclk Fsclk Tsclk Tis Tih odly RTis RTih
* 400K 2.5us 24M 41ns 5ns 5ns 1 2209ns 41ns
* 25M 40ns 600M 1.67ns 5ns 5ns 3 14.99ns 5.01ns
* 50M 20ns 600M 1.67ns 6ns 2ns 3 14.99ns 5.01ns
* 50MDDR 20ns 600M 1.67ns 6ns 0.8ns 2 6.67ns 3.33ns
* 104M 9.6ns 600M 1.67ns 3ns 0.8ns 1 7.93ns 1.67ns
* 208M 4.8ns 600M 1.67ns 1.4ns 0.8ns 1 3.33ns 1.67ns
* 25M 40ns 300M 3.33ns 5ns 5ns 2 13.34ns 6.66ns
* 50M 20ns 300M 3.33ns 6ns 2ns 2 13.34ns 6.66ns
* 50MDDR 20ns 300M 3.33ns 6ns 0.8ns 1 6.67ns 3.33ns
* 104M 9.6ns 300M 3.33ns 3ns 0.8ns 0 7.93ns 1.67ns
* 208M 4.8ns 300M 3.33ns 1.4ns 0.8ns 0 3.13ns 1.67ns
* eMMC4.5
* 400K 2.5us 24M 41ns 3ns 3ns 1 2209ns 41ns
* 25M 40ns 600M 1.67ns 3ns 3ns 3 14.99ns 5.01ns
* 50M 20ns 600M 1.67ns 3ns 3ns 3 14.99ns 5.01ns
* 50MDDR 20ns 600M 1.67ns 2.5ns 2.5ns 2 6.67ns 3.33ns
* 200M 5ns 600M 1.67ns 1.4ns 0.8ns 1 3.33ns 1.67ns
*/
struct sw_mmc_clk_dly {
u32 mode;
#define MMC_CLK_400K 0
#define MMC_CLK_25M 1
#define MMC_CLK_50M 2
#define MMC_CLK_50MDDR 3
#define MMC_CLK_50MDDR_8BIT 4
#define MMC_CLK_100M 5
#define MMC_CLK_200M 6
#define MMC_CLK_MOD_NUM 7
u32 oclk_dly;
u32 sclk_dly;
} mmc_clk_dly [MMC_CLK_MOD_NUM] = {
{MMC_CLK_400K, 0, 7},
{MMC_CLK_25M, 0, 5},
{MMC_CLK_50M, 3, 5},
{MMC_CLK_50MDDR, 2, 4},
{MMC_CLK_50MDDR_8BIT, 2, 4},
{MMC_CLK_100M, 1, 4},
{MMC_CLK_200M, 1, 4},
};
s32 sw_mci_set_clk_dly(struct sunxi_mmc_host* smc_host, u32 oclk_dly, u32 sclk_dly)
{
u32 smc_no = smc_host->pdev->id;
void __iomem *mclk_base = __io_address(0x01c20088 + 0x4 * smc_no);
u32 rval;
unsigned long iflags;
spin_lock_irqsave(&smc_host->lock, iflags);
rval = readl(mclk_base);
rval &= ~((0x7U << 8) | (0x7U << 20));
rval |= (oclk_dly << 8) | (sclk_dly << 20);
writel(rval, mclk_base);
spin_unlock_irqrestore(&smc_host->lock, iflags);
smc_host->oclk_dly = oclk_dly;
smc_host->sclk_dly = sclk_dly;
SMC_DBG(smc_host, "oclk_dly %d, sclk_dly %d\n", oclk_dly, sclk_dly);
return 0;
}
s32 sw_mci_oclk_onoff(struct sunxi_mmc_host* smc_host, u32 oclk_en, u32 pwr_save)
{
u32 rval = mci_readl(smc_host, REG_CLKCR);
rval &= ~(SDXC_CardClkOn | SDXC_LowPowerOn);
if (oclk_en)
rval |= SDXC_CardClkOn;
if (pwr_save || !smc_host->io_flag)
rval |= SDXC_LowPowerOn;
mci_writel(smc_host, REG_CLKCR, rval);
sw_mci_update_clk(smc_host);
return 0;
}
static void sw_mci_send_cmd(struct sunxi_mmc_host* smc_host, struct mmc_command* cmd)
{
u32 imask = SDXC_IntErrBit;
u32 cmd_val = SDXC_Start|(cmd->opcode&0x3f);
unsigned long iflags;
u32 wait = SDC_WAIT_NONE;
wait = SDC_WAIT_CMD_DONE;
if (cmd->opcode == MMC_GO_IDLE_STATE) {
cmd_val |= SDXC_SendInitSeq;
imask |= SDXC_CmdDone;
}
if (cmd->opcode == SD_SWITCH_VOLTAGE) {
cmd_val |= SDXC_VolSwitch;
imask |= SDXC_VolChgDone;
smc_host->voltage_switching = 1;
wait = SDC_WAIT_SWITCH1V8;
/* switch controller to high power mode */
sw_mci_oclk_onoff(smc_host, 1, 0);
}
if (cmd->flags & MMC_RSP_PRESENT) {
cmd_val |= SDXC_RspExp;
if (cmd->flags & MMC_RSP_136)
cmd_val |= SDXC_LongRsp;
if (cmd->flags & MMC_RSP_CRC)
cmd_val |= SDXC_CheckRspCRC;
if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) {
cmd_val |= SDXC_DataExp | SDXC_WaitPreOver;
wait = SDC_WAIT_DATA_OVER;
if (cmd->data->flags & MMC_DATA_STREAM) {
imask |= SDXC_AutoCMDDone;
cmd_val |= SDXC_Seqmod | SDXC_SendAutoStop;
wait = SDC_WAIT_AUTOCMD_DONE;
}
if (cmd->data->stop) {
imask |= SDXC_AutoCMDDone;
cmd_val |= SDXC_SendAutoStop;
wait = SDC_WAIT_AUTOCMD_DONE;
} else
imask |= SDXC_DataOver;
if (cmd->data->flags & MMC_DATA_WRITE)
cmd_val |= SDXC_Write;
else
wait |= SDC_WAIT_DMA_DONE;
} else
imask |= SDXC_CmdDone;
} else
imask |= SDXC_CmdDone;
SMC_DBG(smc_host, "smc %d cmd %d(%08x) arg %x ie 0x%08x wt %x len %d\n",
smc_host->pdev->id, cmd_val&0x3f, cmd->arg, cmd_val, imask, wait,
smc_host->mrq->data ? smc_host->mrq->data->blksz * smc_host->mrq->data->blocks : 0);
spin_lock_irqsave(&smc_host->lock, iflags);
smc_host->wait = wait;
smc_host->state = SDC_STATE_SENDCMD;
mci_writew(smc_host, REG_IMASK, imask);
mci_writel(smc_host, REG_CARG, cmd->arg);
mci_writel(smc_host, REG_CMDR, cmd_val);
smp_wmb();
spin_unlock_irqrestore(&smc_host->lock, iflags);
}
static void sw_mci_init_idma_des(struct sunxi_mmc_host* smc_host, struct mmc_data* data)
{
struct sunxi_mmc_idma_des* pdes = (struct sunxi_mmc_idma_des*)smc_host->sg_cpu;
struct sunxi_mmc_idma_des* pdes_pa = (struct sunxi_mmc_idma_des*)smc_host->sg_dma;
u32 des_idx = 0;
u32 buff_frag_num = 0;
u32 remain;
u32 i, j;
u32 config;
for (i=0; i<data->sg_len; i++) {
buff_frag_num = data->sg[i].length >> SDXC_DES_NUM_SHIFT;
remain = data->sg[i].length & (SDXC_DES_BUFFER_MAX_LEN-1);
if (remain)
buff_frag_num ++;
else
remain = SDXC_DES_BUFFER_MAX_LEN;
for (j=0; j < buff_frag_num; j++, des_idx++) {
memset((void*)&pdes[des_idx], 0, sizeof(struct sunxi_mmc_idma_des));
config = SDXC_IDMAC_DES0_CH|SDXC_IDMAC_DES0_OWN|SDXC_IDMAC_DES0_DIC;
if (buff_frag_num > 1 && j != buff_frag_num-1)
pdes[des_idx].data_buf1_sz = SDXC_DES_BUFFER_MAX_LEN;
else
pdes[des_idx].data_buf1_sz = remain;
pdes[des_idx].buf_addr_ptr1 = sg_dma_address(&data->sg[i])
+ j * SDXC_DES_BUFFER_MAX_LEN;
if (i==0 && j==0)
config |= SDXC_IDMAC_DES0_FD;
if ((i == data->sg_len-1) && (j == buff_frag_num-1)) {
config &= ~SDXC_IDMAC_DES0_DIC;
config |= SDXC_IDMAC_DES0_LD|SDXC_IDMAC_DES0_ER;
pdes[des_idx].buf_addr_ptr2 = 0;
} else {
pdes[des_idx].buf_addr_ptr2 = (u32)&pdes_pa[des_idx+1];
}
pdes[des_idx].config = config;
SMC_INF(smc_host, "sg %d, frag %d, remain %d, des[%d](%08x): "
"[0] = %08x, [1] = %08x, [2] = %08x, [3] = %08x\n", i, j, remain,
des_idx, (u32)&pdes[des_idx],
(u32)((u32*)&pdes[des_idx])[0], (u32)((u32*)&pdes[des_idx])[1],
(u32)((u32*)&pdes[des_idx])[2], (u32)((u32*)&pdes[des_idx])[3]);
}
}
smp_wmb();
return;
}
static int sw_mci_prepare_dma(struct sunxi_mmc_host* smc_host, struct mmc_data* data)
{
u32 dma_len;
u32 i;
u32 temp;
struct scatterlist *sg;
if (smc_host->sg_cpu == NULL)
return -ENOMEM;
dma_len = dma_map_sg(mmc_dev(smc_host->mmc), data->sg, data->sg_len,
(data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (dma_len == 0) {
SMC_ERR(smc_host, "no dma map memory\n");
return -ENOMEM;
}
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->offset & 3 || sg->length & 3) {
SMC_ERR(smc_host, "unaligned scatterlist: os %x length %d\n",
sg->offset, sg->length);
return -EINVAL;
}
}
sw_mci_init_idma_des(smc_host, data);
temp = mci_readl(smc_host, REG_GCTRL);
temp |= SDXC_DMAEnb;
mci_writel(smc_host, REG_GCTRL, temp);
temp |= SDXC_DMAReset;
mci_writel(smc_host, REG_GCTRL, temp);
mci_writel(smc_host, REG_DMAC, SDXC_IDMACSoftRST);
temp = SDXC_IDMACFixBurst|SDXC_IDMACIDMAOn;
mci_writel(smc_host, REG_DMAC, temp);
temp = mci_readl(smc_host, REG_IDIE);
temp &= ~(SDXC_IDMACReceiveInt|SDXC_IDMACTransmitInt);
if (data->flags & MMC_DATA_WRITE)
temp |= SDXC_IDMACTransmitInt;
else
temp |= SDXC_IDMACReceiveInt;
mci_writel(smc_host, REG_IDIE, temp);
//write descriptor address to register
mci_writel(smc_host, REG_DLBA, smc_host->sg_dma);
mci_writel(smc_host, REG_FTRGL, smc_host->pdata->dma_tl);
return 0;
}
int sw_mci_send_manual_stop(struct sunxi_mmc_host* smc_host, struct mmc_request* req)
{
struct mmc_data* data = req->data;
u32 cmd_val = SDXC_Start | SDXC_RspExp | SDXC_StopAbortCMD
| SDXC_CheckRspCRC | MMC_STOP_TRANSMISSION;
u32 iflags = 0;
u32 imask = 0;
int ret = 0;
u32 expire = jiffies + msecs_to_jiffies(1000);
if (!data) {
SMC_ERR(smc_host, "no data request\n");
return -1;
}
/* disable interrupt */
imask = mci_readw(smc_host, REG_IMASK);
mci_writew(smc_host, REG_IMASK, 0);
mci_writel(smc_host, REG_CARG, 0);
mci_writel(smc_host, REG_CMDR, cmd_val);
do {
iflags = mci_readw(smc_host, REG_RINTR);
} while(!(iflags & (SDXC_CmdDone | SDXC_IntErrBit)) && jiffies < expire);
if (iflags & SDXC_IntErrBit) {
SMC_ERR(smc_host, "sdc %d send stop command failed\n", smc_host->pdev->id);
ret = -1;
}
if (req->stop)
req->stop->resp[0] = mci_readl(smc_host, REG_RESP0);
mci_writew(smc_host, REG_RINTR, iflags);
/* enable interrupt */
mci_writew(smc_host, REG_IMASK, imask);
return ret;
}
void sw_mci_dump_errinfo(struct sunxi_mmc_host* smc_host)
{
SMC_ERR(smc_host, "smc %d err, cmd %d, %s%s%s%s%s%s%s%s%s%s\n",
smc_host->pdev->id, smc_host->mrq->cmd ? smc_host->mrq->cmd->opcode : -1,
smc_host->int_sum & SDXC_RespErr ? " RE" : "",
smc_host->int_sum & SDXC_RespCRCErr ? " RCE" : "",
smc_host->int_sum & SDXC_DataCRCErr ? " DCE" : "",
smc_host->int_sum & SDXC_RespTimeout ? " RTO" : "",
smc_host->int_sum & SDXC_DataTimeout ? " DTO" : "",
smc_host->int_sum & SDXC_DataStarve ? " DS" : "",
smc_host->int_sum & SDXC_FIFORunErr ? " FE" : "",
smc_host->int_sum & SDXC_HardWLocked ? " HL" : "",
smc_host->int_sum & SDXC_StartBitErr ? " SBE" : "",
smc_host->int_sum & SDXC_EndBitErr ? " EBE" : ""
);
}
s32 sw_mci_request_done(struct sunxi_mmc_host* smc_host)
{
struct mmc_request* req = smc_host->mrq;
u32 temp;
s32 ret = 0;
if (smc_host->int_sum & SDXC_IntErrBit) {
/* if we got response timeout error information, we should check
if the command done status has been set. if there is no command
done information, we should wait this bit to be set */
if ((smc_host->int_sum & SDXC_RespTimeout) && !(smc_host->int_sum & SDXC_CmdDone)) {
u32 rint;
u32 expire = jiffies + 1;
do {
rint = mci_readl(smc_host, REG_RINTR);
} while (jiffies < expire && !(rint & SDXC_CmdDone));
}
sw_mci_dump_errinfo(smc_host);
if (req->data)
SMC_ERR(smc_host, "In data %s operation\n",
req->data->flags & MMC_DATA_WRITE ? "write" : "read");
ret = -1;
goto out;
}
if (req->cmd) {
if (req->cmd->flags & MMC_RSP_136) {
req->cmd->resp[0] = mci_readl(smc_host, REG_RESP3);
req->cmd->resp[1] = mci_readl(smc_host, REG_RESP2);
req->cmd->resp[2] = mci_readl(smc_host, REG_RESP1);
req->cmd->resp[3] = mci_readl(smc_host, REG_RESP0);
} else {
req->cmd->resp[0] = mci_readl(smc_host, REG_RESP0);
}
}
out:
if (req->data) {
struct mmc_data* data = req->data;
mci_writel(smc_host, REG_IDST, 0x337);
mci_writel(smc_host, REG_IDIE, 0);
mci_writel(smc_host, REG_DMAC, 0);
temp = mci_readl(smc_host, REG_GCTRL);
mci_writel(smc_host, REG_GCTRL, temp|SDXC_DMAReset);
temp &= ~SDXC_DMAEnb;
mci_writel(smc_host, REG_GCTRL, temp);
temp |= SDXC_FIFOReset;
mci_writel(smc_host, REG_GCTRL, temp);
dma_unmap_sg(mmc_dev(smc_host->mmc), data->sg, data->sg_len,
data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
mci_writew(smc_host, REG_IMASK, 0);
if (smc_host->int_sum & (SDXC_RespErr | SDXC_HardWLocked | SDXC_RespTimeout)) {
SMC_DBG(smc_host, "sdc %d abnormal status: %s\n", smc_host->pdev->id,
smc_host->int_sum & SDXC_HardWLocked ? "HardWLocked" : "RespErr");
}
mci_writew(smc_host, REG_RINTR, 0xffff);
SMC_DBG(smc_host, "smc %d done, resp %08x %08x %08x %08x\n", smc_host->pdev->id,
req->cmd->resp[0], req->cmd->resp[1], req->cmd->resp[2], req->cmd->resp[3]);
if (req->data && (smc_host->int_sum & SDXC_IntErrBit)) {
SMC_MSG(smc_host, "found data error, need to send stop command\n");
sw_mci_send_manual_stop(smc_host, req);
}
return ret;
}
/* static s32 sw_mci_set_clk(struct sunxi_mmc_host* smc_host, u32 clk);
* set clock and the phase of output/input clock incording on
* the different timing condition
*/
static int sw_mci_set_clk(struct sunxi_mmc_host* smc_host, u32 clk)
{
struct clk *sclk = NULL;
u32 mod_clk = 0;
u32 src_clk = 0;
u32 temp;
u32 oclk_dly = 2;
u32 sclk_dly = 2;
struct sw_mmc_clk_dly* dly = NULL;
s32 err;
u32 rate;
if (clk <= 400000) {
mod_clk = smc_host->mod_clk;
sclk = clk_get(&smc_host->pdev->dev, MMC_SRCCLK_HOSC);
} else {
mod_clk = smc_host->mod_clk;
sclk = clk_get(&smc_host->pdev->dev, MMC_SRCCLK_PLL6);
}
if (IS_ERR(sclk)) {
SMC_ERR(smc_host, "Error to get source clock for clk %dHz\n", clk);
return -1;
}
err = clk_set_parent(smc_host->mclk, sclk);
if (err) {
SMC_ERR(smc_host, "sdc%d set mclk parent error\n", smc_host->pdev->id);
clk_put(sclk);
return -1;
}
err = clk_set_rate(smc_host->mclk, mod_clk);
if (err) {
SMC_ERR(smc_host, "sdc%d set mclk rate error, rate %dHz\n",
smc_host->pdev->id, mod_clk);
clk_put(sclk);
return -1;
}
rate = clk_get_rate(smc_host->mclk);
if (0 == rate) {
SMC_ERR(smc_host, "sdc%d get mclk rate error\n",
smc_host->pdev->id);
clk_put(sclk);
return -1;
}
src_clk = clk_get_rate(sclk);
clk_put(sclk);
smc_host->mod_clk = smc_host->card_clk = rate;
SMC_MSG(smc_host, "sdc%d set round clock %d, src %d\n", smc_host->pdev->id, rate, src_clk);
sw_mci_oclk_onoff(smc_host, 0, 0);
/* clear internal divider */
temp = mci_readl(smc_host, REG_CLKCR);
temp &= ~0xff;
mci_writel(smc_host, REG_CLKCR, temp);
sw_mci_oclk_onoff(smc_host, 0, 0);
if (clk <= 400000) {
dly = &mmc_clk_dly[MMC_CLK_400K];
} else if (clk <= 25000000) {
dly = &mmc_clk_dly[MMC_CLK_25M];
} else if (clk <= 50000000) {
if (smc_host->ddr) {
if (smc_host->bus_width == 8)
dly = &mmc_clk_dly[MMC_CLK_50MDDR_8BIT];
else
dly = &mmc_clk_dly[MMC_CLK_50MDDR];
} else {
dly = &mmc_clk_dly[MMC_CLK_50M];
}
} else if (clk <= 104000000) {
dly = &mmc_clk_dly[MMC_CLK_100M];
} else if (clk <= 208000000) {
dly = &mmc_clk_dly[MMC_CLK_200M];
} else
dly = &mmc_clk_dly[MMC_CLK_50M];
oclk_dly = dly->oclk_dly;
sclk_dly = dly->sclk_dly;
if (src_clk >= 300000000 && src_clk <= 400000000) {
if (oclk_dly)
oclk_dly--;
if (sclk_dly)
sclk_dly--;
}
sw_mci_set_clk_dly(smc_host, oclk_dly, sclk_dly);
sw_mci_oclk_onoff(smc_host, 1, 0);
return 0;
}
static void sw_mci_update_io_driving(struct sunxi_mmc_host *smc_host, u32 drv)
{
u32 smc_no = smc_host->pdev->id;
struct sunxi_mmc_platform_data *pdata = smc_host->pdata;
int i, r;
for (i = 0; i < pdata->width + 2; i++) {
r = gpio_set_one_pin_driver_level(pdata->mmcio[i], drv,
mmc_para_io[i]);
if (r != 0) {
SMC_ERR(smc_host, "sdc%u set %s drvlvl failed\n",
smc_no, mmc_para_io[i]);
}
}
SMC_DBG(smc_host, "sdc%u set mmcio driving to %d\n", smc_no, drv);
}
static int sw_mci_resource_request(struct sunxi_mmc_host *smc_host)
{
struct platform_device *pdev = smc_host->pdev;
u32 smc_no = pdev->id;
char hclk_name[16] = {0};
char mclk_name[8] = {0};
struct resource* res = NULL;
s32 ret;
/* io mapping */
res = request_mem_region(SMC_BASE(smc_no), SMC_BASE_OS, pdev->name);
if (!res) {
SMC_ERR(smc_host, "Failed to request io memory region.\n");
return -ENOENT;
}
smc_host->reg_base = ioremap(res->start, SMC_BASE_OS);
if (!smc_host->reg_base) {
SMC_ERR(smc_host, "Failed to ioremap() io memory region.\n");
ret = -EINVAL;
goto free_mem_region;
}
/* hclk */
sprintf(hclk_name, MMC_AHBCLK_PREFIX"%d", smc_no);
smc_host->hclk = clk_get(&pdev->dev, hclk_name);
if (IS_ERR(smc_host->hclk)) {
ret = PTR_ERR(smc_host->hclk);
SMC_ERR(smc_host, "Error to get ahb clk for %s\n", hclk_name);
goto iounmap;
}
/* mclk */
sprintf(mclk_name, MMC_MODCLK_PREFIX"%d", smc_no);
smc_host->mclk = clk_get(&pdev->dev, mclk_name);
if (IS_ERR(smc_host->mclk)) {
ret = PTR_ERR(smc_host->mclk);
SMC_ERR(smc_host, "Error to get clk for %s\n", mclk_name);
goto free_hclk;
}
/* alloc idma descriptor structure */
smc_host->sg_cpu = dma_alloc_writecombine(NULL, PAGE_SIZE,
&smc_host->sg_dma, GFP_KERNEL);
if (smc_host->sg_cpu == NULL) {
SMC_ERR(smc_host, "alloc dma des failed\n");
goto free_mclk;
}
/* get power regulator */
if (smc_host->pdata->regulator[0]) {
smc_host->regulator = regulator_get(NULL, smc_host->pdata->regulator);
if (!smc_host->regulator) {
SMC_ERR(smc_host, "Get regulator %s failed\n", smc_host->pdata->regulator);
goto free_sgbuff;
}
}
return 0;
free_sgbuff:
dma_free_coherent(NULL, PAGE_SIZE, smc_host->sg_cpu, smc_host->sg_dma);
smc_host->sg_cpu = NULL;
smc_host->sg_dma = 0;
free_mclk:
clk_put(smc_host->mclk);
smc_host->mclk = NULL;
free_hclk:
clk_put(smc_host->hclk);
smc_host->hclk = NULL;
iounmap:
iounmap(smc_host->reg_base);
free_mem_region:
release_mem_region(SMC_BASE(smc_no), SMC_BASE_OS);
return -1;
}
static int sw_mci_resource_release(struct sunxi_mmc_host *smc_host)
{
/* free power regulator */
if (smc_host->regulator) {
regulator_put(smc_host->regulator);
smc_host->regulator = NULL;
}
/* free idma descriptor structrue */
if (smc_host->sg_cpu) {
dma_free_coherent(NULL, PAGE_SIZE,
smc_host->sg_cpu, smc_host->sg_dma);
smc_host->sg_cpu = NULL;
smc_host->sg_dma = 0;
}
clk_put(smc_host->hclk);
smc_host->hclk = NULL;
clk_put(smc_host->mclk);
smc_host->mclk = NULL;
iounmap(smc_host->reg_base);
release_mem_region(SMC_BASE(smc_host->pdev->id), SMC_BASE_OS);
return 0;
}
static void sw_mci_hold_io(struct sunxi_mmc_host* smc_host)
{
int ret;
u32 i;
struct sunxi_mmc_platform_data *pdata = smc_host->pdata;
for (i = 0; i < pdata->width + 2; i++) {
user_gpio_set_t settings = pdata->mmcio_settings[i];
settings.mul_sel = 0;
settings.pull = 0;
ret = gpio_set_one_pin_status(pdata->mmcio[i], &settings,
mmc_para_io[i], 1);
if (ret != 0) {
SMC_ERR(smc_host, "sdc%d hold mmcio%d failed\n",
smc_host->pdev->id, i);
return;
}
}
SMC_DBG(smc_host, "mmc %d suspend pins\n", smc_host->pdev->id);
return;
}
static void sw_mci_restore_io(struct sunxi_mmc_host* smc_host)
{
int ret;
u32 i;
struct sunxi_mmc_platform_data *pdata = smc_host->pdata;
for (i = 0; i < pdata->width + 2; i++) {
ret = gpio_set_one_pin_status(pdata->mmcio[i], NULL,
mmc_para_io[i], 0);
if (ret) {
SMC_ERR(smc_host, "sdc%d restore mmcio%d failed\n",
smc_host->pdev->id, i);
return;
}
}
SMC_DBG(smc_host, "mmc %d resume pins\n", smc_host->pdev->id);
}
static void sw_mci_finalize_request(struct sunxi_mmc_host *smc_host)
{
struct mmc_request* mrq = smc_host->mrq;
unsigned long iflags;
spin_lock_irqsave(&smc_host->lock, iflags);
if (smc_host->wait != SDC_WAIT_FINALIZE) {
spin_unlock_irqrestore(&smc_host->lock, iflags);
SMC_MSG(smc_host, "nothing finalize, wt %x, st %d\n",
smc_host->wait, smc_host->state);
return;
}
smc_host->wait = SDC_WAIT_NONE;
smc_host->state = SDC_STATE_IDLE;
smc_host->trans_done = 0;
smc_host->dma_done = 0;
spin_unlock_irqrestore(&smc_host->lock, iflags);
sw_mci_request_done(smc_host);
if (smc_host->error) {
mrq->cmd->error = -ETIMEDOUT;
if (mrq->data)
mrq->data->error = -ETIMEDOUT;
if (mrq->stop)
mrq->stop->error = -ETIMEDOUT;
} else {
if (mrq->data)
mrq->data->bytes_xfered = (mrq->data->blocks * mrq->data->blksz);
}
smc_host->mrq = NULL;
smc_host->error = 0;
smc_host->int_sum = 0;
smp_wmb();
mmc_request_done(smc_host->mmc, mrq);
return;
}
static s32 sw_mci_get_ro(struct mmc_host *mmc)
{
struct sunxi_mmc_host *smc_host = mmc_priv(mmc);
struct sunxi_mmc_platform_data *pdata = smc_host->pdata;
u32 wp_val;
if (pdata->wpmode) {
wp_val = gpio_read_one_pin_value(pdata->wp, "sdc_wp");
SMC_DBG(smc_host, "sdc fetch card wp pin status: %d \n", wp_val);
if (!wp_val) {
smc_host->read_only = 0;
return 0;
} else {
SMC_MSG(smc_host, "Card is write-protected\n");
smc_host->read_only = 1;
return 1;
}
} else {
smc_host->read_only = 0;
return 0;
}
return 0;
}
static void sw_mci_cd_cb(unsigned long data)
{
struct sunxi_mmc_host *smc_host = (struct sunxi_mmc_host *)data;
struct sunxi_mmc_platform_data *pdata = smc_host->pdata;
u32 gpio_val = 0;
u32 present;
u32 i = 0;
for (i=0; i<5; i++) {
gpio_val += gpio_read_one_pin_value(pdata->cd, "sdc_det");
mdelay(1);
}
if (gpio_val==5) {
present = 0;
} else if (gpio_val==0)
present = 1;
else
goto modtimer;
SMC_DBG(smc_host, "cd %d, host present %d, cur present %d\n",
gpio_val, smc_host->present, present);
if (smc_host->present ^ present) {
SMC_MSG(smc_host, "mmc %d detect change, present %d\n",
smc_host->pdev->id, present);
smc_host->present = present;
smp_wmb();
if (smc_host->present)
mmc_detect_change(smc_host->mmc, msecs_to_jiffies(500));
else
mmc_detect_change(smc_host->mmc, msecs_to_jiffies(50));
}
modtimer:
if (smc_host->cd_mode == CARD_DETECT_BY_GPIO_POLL)
mod_timer(&smc_host->cd_timer, jiffies + msecs_to_jiffies(300));
return;
}
#if 0
static u32 sw_mci_cd_irq(void *data)
{
sw_mci_cd_cb((unsigned long)data);
return 0;
}
#endif
static int sw_mci_card_present(struct mmc_host *mmc)
{
struct sunxi_mmc_host *smc_host = mmc_priv(mmc);
return smc_host->present;
}
static irqreturn_t sw_mci_irq(int irq, void *dev_id)
{
struct sunxi_mmc_host *smc_host = dev_id;
u32 sdio_int = 0;
u32 raw_int;
u32 msk_int;
u32 idma_inte;
u32 idma_int;
spin_lock(&smc_host->lock);
idma_int = mci_readl(smc_host, REG_IDST);
idma_inte = mci_readl(smc_host, REG_IDIE);
raw_int = mci_readl(smc_host, REG_RINTR);
msk_int = mci_readl(smc_host, REG_MISTA);
if (!msk_int && !idma_int) {
SMC_MSG(smc_host, "sdc%d nop irq: ri %08x mi %08x ie %08x idi %08x\n",
smc_host->pdev->id, raw_int, msk_int, idma_inte, idma_int);
spin_unlock(&smc_host->lock);
return IRQ_HANDLED;
}
smc_host->int_sum |= raw_int;
SMC_INF(smc_host, "smc %d irq, ri %08x(%08x) mi %08x ie %08x idi %08x\n",
smc_host->pdev->id, raw_int, smc_host->int_sum,
msk_int, idma_inte, idma_int);
if (msk_int & SDXC_SDIOInt) {
sdio_int = 1;
mci_writel(smc_host, REG_RINTR, SDXC_SDIOInt);
goto sdio_out;
}
if (smc_host->wait == SDC_WAIT_NONE && !sdio_int) {
SMC_ERR(smc_host, "smc %x, nothing to complete, ri %08x, "
"mi %08x\n", smc_host->pdev->id, raw_int, msk_int);
goto irq_out;
}
if ((raw_int & SDXC_IntErrBit) || (idma_int & SDXC_IDMA_ERR)) {
smc_host->error = raw_int & SDXC_IntErrBit;
smc_host->wait = SDC_WAIT_FINALIZE;
smc_host->state = SDC_STATE_CMDDONE;
goto irq_out;
}
if (idma_int & (SDXC_IDMACTransmitInt|SDXC_IDMACReceiveInt))
smc_host->dma_done = 1;
if (msk_int & (SDXC_AutoCMDDone|SDXC_DataOver|SDXC_CmdDone|SDXC_VolChgDone))
smc_host->trans_done = 1;
if ((smc_host->trans_done && (smc_host->wait == SDC_WAIT_AUTOCMD_DONE
|| smc_host->wait == SDC_WAIT_DATA_OVER
|| smc_host->wait == SDC_WAIT_CMD_DONE
|| smc_host->wait == SDC_WAIT_SWITCH1V8))
|| (smc_host->trans_done && smc_host->dma_done && (smc_host->wait & SDC_WAIT_DMA_DONE))) {
smc_host->wait = SDC_WAIT_FINALIZE;
smc_host->state = SDC_STATE_CMDDONE;
}
irq_out:
mci_writel(smc_host, REG_RINTR, msk_int&(~SDXC_SDIOInt));
mci_writel(smc_host, REG_IDST, idma_int);
if (smc_host->wait == SDC_WAIT_FINALIZE) {
smp_wmb();
mci_writew(smc_host, REG_IMASK, 0);
tasklet_schedule(&smc_host->tasklet);
}
sdio_out:
spin_unlock(&smc_host->lock);
if (sdio_int)
mmc_signal_sdio_irq(smc_host->mmc);
return IRQ_HANDLED;
}
static void sw_mci_tasklet(unsigned long data)
{
struct sunxi_mmc_host *smc_host = (struct sunxi_mmc_host *) data;
sw_mci_finalize_request(smc_host);
}
static void sw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sunxi_mmc_host *smc_host = mmc_priv(mmc);
char* bus_mode[] = {"", "OD", "PP"};
char* pwr_mode[] = {"OFF", "UP", "ON"};
char* vdd[] = {"3.3V", "1.8V", "1.2V"};
char* timing[] = {"LEGACY(SDR12)", "MMC-HS(SDR20)", "SD-HS(SDR25)",
"UHS-SDR50", "UHS-SDR104", "UHS-DDR50", "MMC-HS200"};
char* drv_type[] = {"B", "A", "C", "D"};
static u32 last_clock[4] = {0};
u32 id = smc_host->pdev->id;
u32 temp;
s32 err;
BUG_ON(ios->bus_mode >= sizeof(bus_mode)/sizeof(bus_mode[0]));
BUG_ON(ios->power_mode >= sizeof(pwr_mode)/sizeof(pwr_mode[0]));
BUG_ON(ios->signal_voltage >= sizeof(vdd)/sizeof(vdd[0]));
BUG_ON(ios->timing >= sizeof(timing)/sizeof(timing[0]));
SMC_MSG(smc_host, "sdc%d set ios: "
"clk %dHz bm %s pm %s vdd %s width %d timing %s dt %s\n",
smc_host->pdev->id, ios->clock, bus_mode[ios->bus_mode],
pwr_mode[ios->power_mode], vdd[ios->signal_voltage],
1 << ios->bus_width, timing[ios->timing], drv_type[ios->drv_type]);
/* Set the power state */
switch (ios->power_mode) {
case MMC_POWER_ON:
break;
case MMC_POWER_UP:
if (!smc_host->power_on) {
SMC_MSG(smc_host, "sdc%d power on\n", smc_host->pdev->id);
sw_mci_restore_io(smc_host);
err = clk_enable(smc_host->hclk);
if (err) {
SMC_ERR(smc_host, "Failed to enable sdc%d hclk\n",
smc_host->pdev->id);
}
err = clk_enable(smc_host->mclk);
if (err) {
SMC_ERR(smc_host, "Failed to enable sdc%d mclk\n",
smc_host->pdev->id);
}
err = clk_reset(smc_host->mclk, AW_CCU_CLK_NRESET);
if (err) {
SMC_ERR(smc_host, "Failed to release sdc%d reset\n",
smc_host->pdev->id);
}
mdelay(1);
sw_mci_init_host(smc_host);
enable_irq(smc_host->irq);
smc_host->power_on = 1;
}
break;
case MMC_POWER_OFF:
if (smc_host->power_on) {
SMC_MSG(smc_host, "sdc%d power off\n", smc_host->pdev->id);
disable_irq(smc_host->irq);
sw_mci_exit_host(smc_host);
err = clk_reset(smc_host->mclk, AW_CCU_CLK_RESET);
if (err) {
SMC_ERR(smc_host, "Failed to set sdc%d reset\n",
smc_host->pdev->id);
}
clk_disable(smc_host->mclk);
clk_disable(smc_host->hclk);
sw_mci_hold_io(smc_host);
smc_host->power_on = 0;
smc_host->ferror = 0;
last_clock[id] = 0;
}
break;
}
/* set bus width */
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
mci_writel(smc_host, REG_WIDTH, SDXC_WIDTH1);
smc_host->bus_width = 1;
break;
case MMC_BUS_WIDTH_4:
mci_writel(smc_host, REG_WIDTH, SDXC_WIDTH4);
smc_host->bus_width = 4;
break;
case MMC_BUS_WIDTH_8:
mci_writel(smc_host, REG_WIDTH, SDXC_WIDTH8);
smc_host->bus_width = 8;
break;
}
/* set ddr mode */
temp = mci_readl(smc_host, REG_GCTRL);
if (ios->timing == MMC_TIMING_UHS_DDR50) {
temp |= SDXC_DDR_MODE;
smc_host->ddr = 1;
/* change io driving */
sw_mci_update_io_driving(smc_host, 3);
} else {
temp &= ~SDXC_DDR_MODE;
smc_host->ddr = 0;
}
mci_writel(smc_host, REG_GCTRL, temp);
/* set up clock */
if (ios->clock && ios->clock != last_clock[id]) {
if (smc_host->ddr)
ios->clock = smc_host->pdata->f_ddr_max;
/* 8bit ddr, mod_clk = 2 * card_clk */
if (smc_host->ddr && smc_host->bus_width == 8)
smc_host->mod_clk = ios->clock << 1;
else
smc_host->mod_clk = ios->clock;
smc_host->card_clk = ios->clock;
if (smc_host->mod_clk > 45000000)
smc_host->mod_clk = 45000000;
sw_mci_set_clk(smc_host, smc_host->card_clk);
last_clock[id] = ios->clock;
usleep_range(50000, 55000);
} else if (!ios->clock) {
last_clock[id] = 0;
sw_mci_update_clk(smc_host);
}
}
static void sw_mci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct sunxi_mmc_host *smc_host = mmc_priv(mmc);
unsigned long flags;
u32 imask;
spin_lock_irqsave(&smc_host->lock, flags);
imask = mci_readl(smc_host, REG_IMASK);
if (enable)
imask |= SDXC_SDIOInt;
else
imask &= ~SDXC_SDIOInt;
mci_writel(smc_host, REG_IMASK, imask);
spin_unlock_irqrestore(&smc_host->lock, flags);
}
void sw_mci_hw_reset(struct mmc_host *mmc)
{
struct sunxi_mmc_host *smc_host = mmc_priv(mmc);
u32 id = smc_host->pdev->id;
if (id == 2 || id == 3) {
mci_writel(smc_host, REG_HWRST, 0);
udelay(10);
mci_writel(smc_host, REG_HWRST, 1);
udelay(300);
}
}
static void sw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sunxi_mmc_host *smc_host = mmc_priv(mmc);
struct mmc_command* cmd = mrq->cmd;
struct mmc_data* data = mrq->data;
u32 byte_cnt = 0;
int ret;
if (sw_mci_card_present(mmc) == 0 || smc_host->ferror ||
smc_host->suspend || !smc_host->power_on) {
SMC_DBG(smc_host, "no medium present, ferr %d, suspend %d pwd %d\n",
smc_host->ferror, smc_host->suspend, smc_host->power_on);
mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq);
return;
}
smc_host->mrq = mrq;
if (data) {
byte_cnt = data->blksz * data->blocks;
mci_writel(smc_host, REG_BLKSZ, data->blksz);
mci_writel(smc_host, REG_BCNTR, byte_cnt);
ret = sw_mci_prepare_dma(smc_host, data);
if (ret < 0) {
SMC_ERR(smc_host, "smc %d prepare DMA failed\n", smc_host->pdev->id);
cmd->error = ret;
cmd->data->error = ret;
smp_wmb();
mmc_request_done(smc_host->mmc, mrq);
return;
}
}
sw_mci_send_cmd(smc_host, cmd);
}
static int sw_mci_do_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sunxi_mmc_host *smc_host = mmc_priv(mmc);
if (smc_host->voltage != SDC_WOLTAGE_3V3 &&
ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
sw_mci_set_vddio(smc_host, SDC_WOLTAGE_3V3);
/* wait for 5ms */
usleep_range(1000, 1500);
smc_host->voltage = SDC_WOLTAGE_3V3;
return 0;
} else if (smc_host->voltage != SDC_WOLTAGE_1V8 &&
(ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) {
u32 data_down;
/* clock off */
sw_mci_oclk_onoff(smc_host, 0, 0);
/* check whether data[3:0] is 0000 */
data_down = mci_readl(smc_host, REG_STAS);
if (!(data_down & SDXC_CardPresent)) {
/* switch voltage of card vdd to 1.8V */
sw_mci_set_vddio(smc_host, SDC_WOLTAGE_1V8);
/* the standard defines the time limit is 5ms, here we
wait for 8ms to make sure that the card completes the
voltage switching */
usleep_range(8000, 8500);
/* clock on again */
sw_mci_oclk_onoff(smc_host, 1, 0);
/* wait for 1ms */
usleep_range(2000, 2500);
/* check whether data[3:0] is 1111 */
data_down = mci_readl(smc_host, REG_STAS);
if (data_down & SDXC_CardPresent) {
u32 rval = mci_readl(smc_host, REG_RINTR);
if ((rval & SDXC_VolChgDone & SDXC_CmdDone)
== (SDXC_VolChgDone & SDXC_CmdDone)) {
smc_host->voltage = SDC_WOLTAGE_1V8;
mci_writew(smc_host, REG_RINTR,
SDXC_VolChgDone | SDXC_CmdDone);
smc_host->voltage_switching = 0;
return 0;
}
}
}
/*
* If we are here, that means the switch to 1.8V signaling
* failed. We power cycle the card, and retry initialization
* sequence by setting S18R to 0.
*/
usleep_range(5000, 5500);
sw_mci_set_vddio(smc_host, SDC_WOLTAGE_OFF);
usleep_range(1000, 1500);
sw_mci_set_vddio(smc_host, SDC_WOLTAGE_3V3);
SMC_ERR(smc_host, ": Switching to 1.8V signalling "
"voltage failed, retrying with S18R set to 0\n");
mci_writel(smc_host, REG_GCTRL, mci_readl(smc_host, REG_GCTRL)|SDXC_HWReset);
mci_writew(smc_host, REG_RINTR, SDXC_VolChgDone | SDXC_CmdDone);
sw_mci_oclk_onoff(smc_host, 1, 0);
smc_host->voltage_switching = 0;
return -EAGAIN;
} else
return 0;
}
/*
* Here we execute a tuning operation to find the sample window of MMC host.
* Then we select the best sampling point in the host for DDR50, SDR50, and
* SDR104 modes.
*/
static int sw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
static const char tuning_blk_4b[] = {
0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde
};
static const char tuning_blk_8b[] = {
0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee
};
struct sunxi_mmc_host *smc_host = mmc_priv(mmc);
u32 sample_min = 1;
u32 sample_max = 0;
u32 sample_bak = smc_host->sclk_dly;
u32 sample_dly = 0;
u32 sample_win = 0;
u32 loops = 64;
u32 tuning_done = 0;
char* rcv_pattern = (char*)kmalloc(128, GFP_KERNEL|GFP_DMA);
char* std_pattern = NULL;
int err = 0;
if (!rcv_pattern) {
SMC_ERR(smc_host, "sdc%d malloc tuning pattern buffer failed\n",
smc_host->pdev->id);
return -EIO;
}
SMC_MSG(smc_host, "sdc%d executes tuning operation\n", smc_host->pdev->id);
/*
* The Host Controller needs tuning only in case of SDR104 mode
* and for SDR50 mode. Issue CMD19 repeatedly till get all of the
* sample points or the number of loops reaches 40 times or a
* timeout of 150ms occurs.
*/
do {
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct mmc_request mrq = {0};
struct scatterlist sg;
sw_mci_set_clk_dly(smc_host, smc_host->oclk_dly, sample_dly);
cmd.opcode = opcode;
cmd.arg = 0;
cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
sg.length = 128;
data.blksz = 128;
std_pattern = (char*)tuning_blk_8b;
} else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
sg.length = 64;
data.blksz = 64;
std_pattern = (char*)tuning_blk_4b;
}
} else {
sg.length = 64;
data.blksz = 64;
std_pattern = (char*)tuning_blk_4b;
}
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, rcv_pattern, sg.length);
mrq.cmd = &cmd;
mrq.data = &data;
mmc_wait_for_req(mmc, &mrq);
/*
* If no error happened in the transmission, compare data with
* the tuning pattern. If there is no error, record the minimal
* and the maximal value of the sampling clock delay to find
* the best sampling point in the sampling window.
*/
if (!cmd.error && !data.error) {
if (!memcmp(rcv_pattern, std_pattern, data.blksz)) {
SMC_MSG(smc_host, "sdc%d tuning ok, sclk_dly %d\n",
smc_host->pdev->id, sample_dly);
if (!sample_win)
sample_min = sample_dly;
sample_win++;
if (sample_dly == 7) {
SMC_MSG(smc_host, "sdc%d tuning reach to max sclk_dly 7\n",
smc_host->pdev->id);
tuning_done = 1;
sample_max = sample_dly;
break;
}
} else if (sample_win) {
SMC_MSG(smc_host, "sdc%d tuning data failed, sclk_dly %d\n",
smc_host->pdev->id, sample_dly);
tuning_done = 1;
sample_max = sample_dly-1;
break;
}
} else if (sample_win) {
SMC_MSG(smc_host, "sdc%d tuning trans fail, sclk_dly %d\n",
smc_host->pdev->id, sample_dly);
tuning_done = 1;
sample_max = sample_dly-1;
break;
}
sample_dly++;
/* if sclk_dly reach to 7(maximum), down the clock and tuning again */
if (sample_dly == 8 && loops)
break;
} while (!tuning_done && loops--);
/* select the best sampling point from the sampling window */
if (sample_win) {
sample_dly = sample_min + sample_win/2;
SMC_MSG(smc_host, "sdc%d sample_window:[%d, %d], sample_point %d\n",
smc_host->pdev->id, sample_min, sample_max, sample_dly);
sw_mci_set_clk_dly(smc_host, smc_host->oclk_dly, sample_dly);
err = 0;
} else {
SMC_ERR(smc_host, "sdc%d cannot find a sample point\n", smc_host->pdev->id);
sw_mci_set_clk_dly(smc_host, smc_host->oclk_dly, sample_bak);
mmc->ios.bus_width = MMC_BUS_WIDTH_1;
mmc->ios.timing = MMC_TIMING_LEGACY;
err = -EIO;
}
kfree(rcv_pattern);
return err;
}
/*
* Here provide a function to scan card, for some SDIO cards that
* may stay in busy status after writing operations. MMC host does
* not wait for ready itself. So the driver of this kind of cards
* should call this function to check the real status of the card.
*/
void sunximmc_rescan_card(unsigned id, unsigned insert)
{
struct sunxi_mmc_host *smc_host = NULL;
if (id > 3) {
pr_err("%s: card id more than 3.\n", __func__);
return;
}
mutex_lock(&sw_host_rescan_mutex);
smc_host = sw_host[id];
if (!smc_host)
sw_host_rescan_pending[id] = insert;
mutex_unlock(&sw_host_rescan_mutex);
if (!smc_host)
return;
smc_host->present = insert ? 1 : 0;
mmc_detect_change(smc_host->mmc, 0);
return;
}
EXPORT_SYMBOL_GPL(sunximmc_rescan_card);
int sw_mci_check_r1_ready(struct mmc_host* mmc, unsigned ms)
{
struct sunxi_mmc_host *smc_host = mmc_priv(mmc);
unsigned expire = jiffies + msecs_to_jiffies(ms);
do {
if (!(mci_readl(smc_host, REG_STAS) & SDXC_CardDataBusy))
break;
} while (jiffies < expire);
if ((mci_readl(smc_host, REG_STAS) & SDXC_CardDataBusy)) {
SMC_MSG(smc_host, "wait r1 rdy %d ms timeout\n", ms);
return -1;
} else
return 0;
}
EXPORT_SYMBOL_GPL(sw_mci_check_r1_ready);
static struct mmc_host_ops sw_mci_ops = {
.request = sw_mci_request,
.set_ios = sw_mci_set_ios,
.get_ro = sw_mci_get_ro,
.get_cd = sw_mci_card_present,
.enable_sdio_irq= sw_mci_enable_sdio_irq,
.hw_reset = sw_mci_hw_reset,
.start_signal_voltage_switch = sw_mci_do_voltage_switch,
.execute_tuning = sw_mci_execute_tuning,
};
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
static int sw_mci_proc_drvversion(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
char *p = page;
p += sprintf(p, "%s\n", DRIVER_VERSION);
return p - page;
}
static int sw_mci_proc_hostinfo(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
char *p = page;
struct sunxi_mmc_host *smc_host = (struct sunxi_mmc_host *)data;
struct device* dev = &smc_host->pdev->dev;
char* cd_mode[] = {"None", "GPIO Check", "GPIO IRQ", "Always In", "Manual"};
char* state[] = {"Idle", "Sending CMD", "CMD Done"};
char* vol[] = {"3.3V", "1.8V", "1.2V", "off"};
u32 Fmclk_MHz = (smc_host->mod_clk == 24000000 ? 24000000 : 600000000)/1000000;
u32 Tmclk_ns = Fmclk_MHz ? 10000/Fmclk_MHz : 0;
u32 odly = smc_host->oclk_dly ? Tmclk_ns*smc_host->oclk_dly : Tmclk_ns >> 1;
u32 sdly = smc_host->sclk_dly ? Tmclk_ns*smc_host->sclk_dly : Tmclk_ns >> 1;
p += sprintf(p, " %s Host Info:\n", dev_name(dev));
p += sprintf(p, " REG Base : %p\n", smc_host->reg_base);
p += sprintf(p, " DMA Desp : %p(%08x)\n", smc_host->sg_cpu, smc_host->sg_dma);
p += sprintf(p, " Mod Clock : %d\n", smc_host->mod_clk);
p += sprintf(p, " Card Clock: %d\n", smc_host->card_clk);
p += sprintf(p, " Oclk Delay: %d(%d.%dns)\n", smc_host->oclk_dly, odly/10, odly%10);
p += sprintf(p, " Sclk Delay: %d(%d.%dns)\n", smc_host->sclk_dly, sdly/10, odly%10);
p += sprintf(p, " Bus Width : %d\n", smc_host->bus_width);
p += sprintf(p, " DDR Mode : %d\n", smc_host->ddr);
p += sprintf(p, " Voltage : %s\n", vol[smc_host->voltage]);
p += sprintf(p, " Present : %d\n", smc_host->present);
p += sprintf(p, " CD Mode : %s\n", cd_mode[smc_host->cd_mode]);
p += sprintf(p, " Read Only : %d\n", smc_host->read_only);
p += sprintf(p, " State : %s\n", state[smc_host->state]);
p += sprintf(p, " Regulator : %s\n", smc_host->pdata->regulator);
return p - page;
}
static int sw_mci_proc_read_regs(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
char *p = page;
struct sunxi_mmc_host *smc_host = (struct sunxi_mmc_host *)data;
u32 i;
p += sprintf(p, "Dump smc regs:\n");
for (i=0; i<0x100; i+=4) {
if (!(i&0xf))
p += sprintf(p, "\n0x%08x : ", (u32)(smc_host->reg_base + i));
p += sprintf(p, "%08x ", readl(smc_host->reg_base + i));
}
p += sprintf(p, "\n");
p += sprintf(p, "Dump ccmu regs:\n");
for (i=0; i<0x170; i+=4) {
if (!(i&0xf))
p += sprintf(p, "\n0x%08x : ", SW_VA_CCM_IO_BASE + i);
p += sprintf(p, "%08x ", readl(SW_VA_CCM_IO_BASE + i));
}
p += sprintf(p, "\n");
p += sprintf(p, "Dump gpio regs:\n");
for (i=0; i<0x120; i+=4) {
if (!(i&0xf))
p += sprintf(p, "\n0x%08x : ", SW_VA_PORTC_IO_BASE + i);
p += sprintf(p, "%08x ", readl(SW_VA_PORTC_IO_BASE + i));
}
p += sprintf(p, "\n");
p += sprintf(p, "Dump gpio irqc:\n");
for (i=0x200; i<0x300; i+=4) {
if (!(i&0xf))
p += sprintf(p, "\n0x%08x : ", SW_VA_PORTC_IO_BASE + i);
p += sprintf(p, "%08x ", readl(SW_VA_PORTC_IO_BASE + i));
}
p += sprintf(p, "\n");
return p - page;
}
static int sw_mci_proc_read_dbglevel(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
char *p = page;
struct sunxi_mmc_host *smc_host = (struct sunxi_mmc_host *)data;
p += sprintf(p, "Debug-Level : 0- msg&err, 1- +info, 2- +dbg, 3- all\n");
p += sprintf(p, "current debug-level : %d\n", smc_host->debuglevel);
return p - page;
}
static int sw_mci_proc_write_dbglevel(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
u32 smc_debug;
struct sunxi_mmc_host *smc_host = (struct sunxi_mmc_host *)data;
smc_debug = simple_strtoul(buffer, NULL, 10);
smc_host->debuglevel = smc_debug;
return sizeof(smc_debug);
}
static int sw_mci_proc_read_cdmode(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
char *p = page;
struct sunxi_mmc_host *smc_host = (struct sunxi_mmc_host *)data;
p += sprintf(p, "card detect mode: %d\n", smc_host->cd_mode);
return p - page;
}
static int sw_mci_proc_write_cdmode(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
u32 cdmode;
struct sunxi_mmc_host *smc_host = (struct sunxi_mmc_host *)data;
cdmode = simple_strtoul(buffer, NULL, 10);
smc_host->cd_mode = cdmode;
return sizeof(cdmode);
}
static int sw_mci_proc_read_insert_status(char *page, char **start, off_t off,
int coutn, int *eof, void *data)
{
char *p = page;
struct sunxi_mmc_host *smc_host = (struct sunxi_mmc_host *)data;
p += sprintf(p, "Usage: \"echo 1 > insert\" to scan card and "
"\"echo 0 > insert\" to remove card\n");
if (smc_host->cd_mode != CARD_DETECT_BY_FS)
p += sprintf(p, "Sorry, this node if only for manual "
"attach mode(cd mode 4)\n");
p += sprintf(p, "card attach status: %s\n",
smc_host->present ? "inserted" : "removed");
return p - page;
}
static int sw_mci_proc_card_insert_ctrl(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
u32 insert = simple_strtoul(buffer, NULL, 10);
struct sunxi_mmc_host *smc_host = (struct sunxi_mmc_host *)data;
u32 present = insert ? 1 : 0;
if (smc_host->present ^ present) {
smc_host->present = present;
mmc_detect_change(smc_host->mmc, msecs_to_jiffies(300));
}
return sizeof(insert);
}
void sw_mci_procfs_attach(struct sunxi_mmc_host *smc_host)
{
struct device *dev = &smc_host->pdev->dev;
char sw_mci_proc_rootname[32] = {0};
//make mmc dir in proc fs path
snprintf(sw_mci_proc_rootname, sizeof(sw_mci_proc_rootname),
"driver/%s", dev_name(dev));
smc_host->proc_root = proc_mkdir(sw_mci_proc_rootname, NULL);
if (IS_ERR(smc_host->proc_root))
SMC_MSG(smc_host, "%s: failed to create procfs \"driver/mmc\".\n", dev_name(dev));
smc_host->proc_drvver = create_proc_read_entry("drv-version", 0444,
smc_host->proc_root, sw_mci_proc_drvversion, NULL);
if (IS_ERR(smc_host->proc_root))
SMC_MSG(smc_host, "%s: failed to create procfs \"drv-version\".\n", dev_name(dev));
smc_host->proc_hostinfo = create_proc_read_entry("hostinfo", 0444,
smc_host->proc_root, sw_mci_proc_hostinfo, smc_host);
if (IS_ERR(smc_host->proc_hostinfo))
SMC_MSG(smc_host, "%s: failed to create procfs \"hostinfo\".\n", dev_name(dev));
smc_host->proc_regs = create_proc_read_entry("register", 0444,
smc_host->proc_root, sw_mci_proc_read_regs, smc_host);
if (IS_ERR(smc_host->proc_regs))
SMC_MSG(smc_host, "%s: failed to create procfs \"hostinfo\".\n", dev_name(dev));
smc_host->proc_dbglevel = create_proc_entry("debug-level", 0644, smc_host->proc_root);
if (IS_ERR(smc_host->proc_dbglevel))
SMC_MSG(smc_host, "%s: failed to create procfs \"debug-level\".\n", dev_name(dev));
smc_host->proc_dbglevel->data = smc_host;
smc_host->proc_dbglevel->read_proc = sw_mci_proc_read_dbglevel;
smc_host->proc_dbglevel->write_proc = sw_mci_proc_write_dbglevel;
smc_host->proc_cdmode = create_proc_entry("cdmode", 0644, smc_host->proc_root);
if (IS_ERR(smc_host->proc_cdmode))
SMC_MSG(smc_host, "%s: failed to create procfs \"cdmode\".\n", dev_name(dev));
smc_host->proc_cdmode->data = smc_host;
smc_host->proc_cdmode->read_proc = sw_mci_proc_read_cdmode;
smc_host->proc_cdmode->write_proc = sw_mci_proc_write_cdmode;
smc_host->proc_insert = create_proc_entry("insert", 0644, smc_host->proc_root);
if (IS_ERR(smc_host->proc_insert))
SMC_MSG(smc_host, "%s: failed to create procfs \"insert\".\n", dev_name(dev));
smc_host->proc_insert->data = smc_host;
smc_host->proc_insert->read_proc = sw_mci_proc_read_insert_status;
smc_host->proc_insert->write_proc = sw_mci_proc_card_insert_ctrl;
}
void sw_mci_procfs_remove(struct sunxi_mmc_host *smc_host)
{
struct device *dev = &smc_host->pdev->dev;
char sw_mci_proc_rootname[32] = {0};
snprintf(sw_mci_proc_rootname, sizeof(sw_mci_proc_rootname),
"driver/%s", dev_name(dev));
remove_proc_entry("io-drive", smc_host->proc_root);
remove_proc_entry("insert", smc_host->proc_root);
remove_proc_entry("cdmode", smc_host->proc_root);
remove_proc_entry("debug-level", smc_host->proc_root);
remove_proc_entry("register", smc_host->proc_root);
remove_proc_entry("hostinfo", smc_host->proc_root);
remove_proc_entry("drv-version", smc_host->proc_root);
remove_proc_entry(sw_mci_proc_rootname, NULL);
}
#else
void sw_mci_procfs_attach(struct sunxi_mmc_host *smc_host) { }
void sw_mci_procfs_remove(struct sunxi_mmc_host *smc_host) { }
#endif //PROC_FS
static int __devinit sw_mci_probe(struct platform_device *pdev)
{
struct sunxi_mmc_host *smc_host = NULL;
struct mmc_host *mmc = NULL;
int ret = 0;
mmc = mmc_alloc_host(sizeof(struct sunxi_mmc_host), &pdev->dev);
if (!mmc) {
SMC_ERR(smc_host, "mmc alloc host failed\n");
ret = -ENOMEM;
goto probe_out;
}
smc_host = mmc_priv(mmc);
memset((void*)smc_host, 0, sizeof(smc_host));
smc_host->mmc = mmc;
smc_host->pdev = pdev;
smc_host->pdata = pdev->dev.platform_data;
smc_host->cd_mode = smc_host->pdata->cdmode;
smc_host->io_flag = smc_host->pdata->isiodev ? 1 : 0;
smc_host->debuglevel = CONFIG_MMC_PRE_DBGLVL_SUNXI;
spin_lock_init(&smc_host->lock);
tasklet_init(&smc_host->tasklet, sw_mci_tasklet, (unsigned long) smc_host);
if (sw_mci_resource_request(smc_host)) {
SMC_ERR(smc_host, "%s: Failed to get resouce.\n", dev_name(&pdev->dev));
goto probe_free_host;
}
smc_host->mod_clk = 400000;
if (sw_mci_set_clk(smc_host, 400000)) {
SMC_ERR(smc_host, "Failed to set clock to 400KHz\n");
ret = -ENOENT;
goto probe_free_resource;
}
clk_enable(smc_host->mclk);
clk_enable(smc_host->hclk);
sw_mci_init_host(smc_host);
sw_mci_procfs_attach(smc_host);
smc_host->irq = SMC_IRQNO(pdev->id);
if (request_irq(smc_host->irq, sw_mci_irq, 0, DRIVER_NAME, smc_host)) {
SMC_ERR(smc_host, "Failed to request smc card interrupt.\n");
ret = -ENOENT;
goto probe_free_resource;
}
disable_irq(smc_host->irq);
if (smc_host->cd_mode == CARD_ALWAYS_PRESENT) {
smc_host->present = 1;
} else if (smc_host->cd_mode == CARD_DETECT_BY_GPIO_IRQ) {
#if 0 // FIXME
u32 cd_hdle;
cd_hdle = sw_gpio_irq_request(smc_host->pdata->cd.gpio, TRIG_EDGE_DOUBLE,
&sw_mci_cd_irq, smc_host);
if (!cd_hdle) {
SMC_ERR(smc_host, "Failed to get gpio irq for card detection\n");
}
smc_host->cd_hdle = cd_hdle;
smc_host->present = !__gpio_get_value(smc_host->pdata->cd.gpio);
#else
SMC_ERR(smc_host, "irq based card detect not supported\n");
ret = -ENOENT;
goto probe_free_resource;
#endif
} else if (smc_host->cd_mode == CARD_DETECT_BY_GPIO_POLL) {
init_timer(&smc_host->cd_timer);
smc_host->cd_timer.expires = jiffies + 1*HZ;
smc_host->cd_timer.function = &sw_mci_cd_cb;
smc_host->cd_timer.data = (unsigned long)smc_host;
add_timer(&smc_host->cd_timer);
smc_host->present = 0;
}
mmc->ops = &sw_mci_ops;
mmc->ocr_avail = smc_host->pdata->ocr_avail;
mmc->caps = smc_host->pdata->caps;
mmc->caps2 = smc_host->pdata->caps2;
mmc->pm_caps = MMC_PM_KEEP_POWER|MMC_PM_WAKE_SDIO_IRQ;
mmc->f_min = smc_host->pdata->f_min;
mmc->f_max = smc_host->pdata->f_max;
mmc->max_blk_count = 8192;
mmc->max_blk_size = 4096;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
mmc->max_segs = 128;
if (smc_host->io_flag)
mmc->pm_flags = MMC_PM_IGNORE_PM_NOTIFY;
ret = mmc_add_host(mmc);
if (ret) {
SMC_ERR(smc_host, "Failed to add mmc host.\n");
goto probe_free_irq;
}
platform_set_drvdata(pdev, mmc);
mutex_lock(&sw_host_rescan_mutex);
if (sw_host_rescan_pending[pdev->id]) {
smc_host->present = 1;
mmc_detect_change(smc_host->mmc, msecs_to_jiffies(300));
}
sw_host[pdev->id] = smc_host;
mutex_unlock(&sw_host_rescan_mutex);
SMC_MSG(smc_host, "sdc%d Probe: base:0x%p irq:%u sg_cpu:%p(%x) ret %d.\n",
pdev->id, smc_host->reg_base, smc_host->irq,
smc_host->sg_cpu, smc_host->sg_dma, ret);
goto probe_out;
probe_free_irq:
if (smc_host->irq)
free_irq(smc_host->irq, smc_host);
probe_free_resource:
sw_mci_resource_release(smc_host);
probe_free_host:
mmc_free_host(mmc);
probe_out:
return ret;
}
static int __devexit sw_mci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct sunxi_mmc_host *smc_host = mmc_priv(mmc);
SMC_MSG(smc_host, "%s: Remove.\n", dev_name(&pdev->dev));
sw_mci_exit_host(smc_host);
sw_mci_procfs_remove(smc_host);
mmc_remove_host(mmc);
tasklet_disable(&smc_host->tasklet);
free_irq(smc_host->irq, smc_host);
if (smc_host->cd_mode == CARD_DETECT_BY_GPIO_POLL)
del_timer(&smc_host->cd_timer);
#if 0
else if (smc_host->cd_mode == CARD_DETECT_BY_GPIO_IRQ)
sw_gpio_irq_free(smc_host->cd_hdle);
#endif
sw_mci_resource_release(smc_host);
mmc_free_host(mmc);
sw_host[pdev->id] = NULL;
return 0;
}
#ifdef CONFIG_PM
void sw_mci_regs_save(struct sunxi_mmc_host* smc_host)
{
struct sunxi_mmc_ctrl_regs* bak_regs = &smc_host->bak_regs;
bak_regs->gctrl = mci_readl(smc_host, REG_GCTRL);
bak_regs->clkc = mci_readl(smc_host, REG_CLKCR);
bak_regs->timeout = mci_readl(smc_host, REG_TMOUT);
bak_regs->buswid = mci_readl(smc_host, REG_WIDTH);
bak_regs->waterlvl = mci_readl(smc_host, REG_FTRGL);
bak_regs->funcsel = mci_readl(smc_host, REG_FUNS);
bak_regs->debugc = mci_readl(smc_host, REG_DBGC);
bak_regs->idmacc = mci_readl(smc_host, REG_DMAC);
}
void sw_mci_regs_restore(struct sunxi_mmc_host* smc_host)
{
struct sunxi_mmc_ctrl_regs* bak_regs = &smc_host->bak_regs;
mci_writel(smc_host, REG_GCTRL, bak_regs->gctrl );
mci_writel(smc_host, REG_CLKCR, bak_regs->clkc );
mci_writel(smc_host, REG_TMOUT, bak_regs->timeout );
mci_writel(smc_host, REG_WIDTH, bak_regs->buswid );
mci_writel(smc_host, REG_FTRGL, bak_regs->waterlvl);
mci_writel(smc_host, REG_FUNS , bak_regs->funcsel );
mci_writel(smc_host, REG_DBGC , bak_regs->debugc );
mci_writel(smc_host, REG_DMAC , bak_regs->idmacc );
}
static int sw_mci_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct mmc_host *mmc = platform_get_drvdata(pdev);
int ret = 0;
if (mmc) {
struct sunxi_mmc_host *smc_host = mmc_priv(mmc);
ret = mmc_suspend_host(mmc);
smc_host->suspend = ret ? 0 : 1;
if (!ret && mmc_card_keep_power(mmc)) {
sw_mci_regs_save(smc_host);
/* gate clock for lower power */
clk_disable(smc_host->hclk);
clk_disable(smc_host->mclk);
}
SMC_MSG(NULL, "smc %d suspend\n", pdev->id);
}
return ret;
}
static int sw_mci_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct mmc_host *mmc = platform_get_drvdata(pdev);
int ret = 0;
if (mmc) {
struct sunxi_mmc_host *smc_host = mmc_priv(mmc);
smc_host->suspend = 0;
if (mmc_card_keep_power(mmc)) {
/* enable clock for resotre */
clk_enable(smc_host->mclk);
clk_enable(smc_host->hclk);
sw_mci_regs_restore(smc_host);
sw_mci_update_clk(smc_host);
}
if (smc_host->cd_mode == CARD_DETECT_BY_GPIO_IRQ)
sw_mci_cd_cb((unsigned long)smc_host);
ret = mmc_resume_host(mmc);
smc_host->suspend = ret ? 1 : 0;
SMC_MSG(NULL, "smc %d resume\n", pdev->id);
}
return ret;
}
static const struct dev_pm_ops sw_mci_pm = {
.suspend = sw_mci_suspend,
.resume = sw_mci_resume,
};
#define sw_mci_pm_ops &sw_mci_pm
#else /* CONFIG_PM */
#define sw_mci_pm_ops NULL
#endif /* CONFIG_PM */
static struct sunxi_mmc_platform_data sw_mci_pdata[4] = {
[0] = {
.ocr_avail = MMC_VDD_28_29 | MMC_VDD_29_30 | MMC_VDD_30_31 | MMC_VDD_31_32
| MMC_VDD_32_33 | MMC_VDD_33_34,
.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED
| MMC_CAP_SDIO_IRQ
| MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
| MMC_CAP_UHS_DDR50
| MMC_CAP_SET_XPC_330 | MMC_CAP_DRIVER_TYPE_A,
.f_min = 400000,
.f_max = 50000000,
.f_ddr_max = 47000000,
.dma_tl= 0x20070008,
},
[1] = {
.ocr_avail = MMC_VDD_28_29 | MMC_VDD_29_30 | MMC_VDD_30_31 | MMC_VDD_31_32
| MMC_VDD_32_33 | MMC_VDD_33_34,
.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED
| MMC_CAP_SDIO_IRQ,
.f_min = 400000,
.f_max = 50000000,
.dma_tl= 0x20070008,
},
[2] = {
.ocr_avail = MMC_VDD_28_29 | MMC_VDD_29_30 | MMC_VDD_30_31 | MMC_VDD_31_32
| MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195,
.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NONREMOVABLE
| MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED
| MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
| MMC_CAP_UHS_DDR50
| MMC_CAP_1_8V_DDR
#ifndef CONFIG_AW_FPGA_PLATFORM
| MMC_CAP_8_BIT_DATA
#endif
| MMC_CAP_SDIO_IRQ
| MMC_CAP_SET_XPC_330 | MMC_CAP_DRIVER_TYPE_A,
.caps2 = MMC_CAP2_HS200_1_8V_SDR,
.f_min = 400000,
.f_max = 120000000,
.f_ddr_max = 50000000,
.dma_tl= 0x20070008,
},
[3] = {
.ocr_avail = MMC_VDD_28_29 | MMC_VDD_29_30 | MMC_VDD_30_31 | MMC_VDD_31_32
| MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195,
.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE
| MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED
| MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
| MMC_CAP_UHS_DDR50
| MMC_CAP_1_8V_DDR
| MMC_CAP_8_BIT_DATA
| MMC_CAP_SDIO_IRQ
| MMC_CAP_SET_XPC_330 | MMC_CAP_DRIVER_TYPE_A,
.caps2 = MMC_CAP2_HS200_1_8V_SDR,
.f_min = 400000,
.f_max = 120000000,
.f_ddr_max = 50000000,
.dma_tl= MMC3_DMA_TL,
},
};
static struct platform_device sw_mci_device[4] = {
[0] = {.name = DRIVER_NAME, .id = 0, .dev.platform_data = &sw_mci_pdata[0]},
[1] = {.name = DRIVER_NAME, .id = 1, .dev.platform_data = &sw_mci_pdata[1]},
[2] = {.name = DRIVER_NAME, .id = 2, .dev.platform_data = &sw_mci_pdata[2]},
[3] = {.name = DRIVER_NAME, .id = 3, .dev.platform_data = &sw_mci_pdata[3]},
};
static struct platform_driver sw_mci_driver = {
.driver.name = DRIVER_NAME,
.driver.owner = THIS_MODULE,
.driver.pm = sw_mci_pm_ops,
.probe = sw_mci_probe,
.remove = __devexit_p(sw_mci_remove),
};
static int __init sw_mci_get_mmcinfo(int i)
{
int j, r, val;
char p[16];
struct sunxi_mmc_platform_data* mmcinfo;
script_parser_value_type_t type;
mmcinfo = &sw_mci_pdata[i];
sprintf(p, "mmc%d_para", i);
/* get used information */
r = script_parser_fetch(p, "sdc_used", &val, 1);
if (r != 0) {
SMC_MSG(NULL, "get mmc%d's used failed\n", i);
goto fail;
}
mmcinfo->used = val;
if (!mmcinfo->used)
return 0;
/* get cdmode information */
r = script_parser_fetch(p, "sdc_detmode", &val, 1);
if (r != 0) {
SMC_MSG(NULL, "get mmc%d's detmode failed\n", i);
goto fail;
}
mmcinfo->cdmode = val;
if (mmcinfo->cdmode == CARD_DETECT_BY_GPIO_POLL ||
mmcinfo->cdmode == CARD_DETECT_BY_GPIO_IRQ) {
mmcinfo->cd = gpio_request_ex(p, "sdc_det");
if (!mmcinfo->cd) {
SMC_MSG(NULL, "get mmc%d's IO(det) failed\n", i);
goto fail;
}
}
/* get buswidth information */
r = script_parser_fetch(p, "sdc_buswidth", &val, 1);
if (r == 0) {
mmcinfo->width = val;
} else {
/* No bus_width info, use old driver hardcoded defaults */
mmcinfo->width = 4;
}
/* get mmc IOs information */
for (j = 0; j < mmcinfo->width + 2; j++) {
mmcinfo->mmcio[j] = gpio_request_ex(p, mmc_para_io[j]);
if (!mmcinfo->mmcio[j]) {
SMC_MSG(NULL, "get mmc%d's IO(%s) failed\n", i,
mmc_para_io[j]);
goto fail;
}
r = gpio_get_one_pin_status(mmcinfo->mmcio[j],
&mmcinfo->mmcio_settings[j], mmc_para_io[j], 0);
if (r != 0) {
SMC_MSG(NULL, "get mmc%d's IO(%s) settings failed\n",
i, mmc_para_io[j]);
goto fail;
}
}
/* get wpmode information */
r = script_parser_fetch(p, "sdc_use_wp", &val, 1);
if (r == 0) {
mmcinfo->wpmode = val;
} else {
SMC_MSG(NULL, "get mmc%d's use_wp failed\n", i);
mmcinfo->wpmode = 0;
}
if (mmcinfo->wpmode) {
/* if wpmode==1 but cann't get the wp IO, we assume there is no
write protect detection */
mmcinfo->wp = gpio_request_ex(p, "sdc_wp");
if (!mmcinfo->wp) {
SMC_MSG(NULL, "get mmc%d's IO(sdc_wp) failed\n", i);
mmcinfo->wpmode = 0;
}
}
/* get emmc-rst information */
mmcinfo->hwrst = gpio_request_ex(p, "emmc_rst");
mmcinfo->has_hwrst = mmcinfo->hwrst != 0;
/* get sdio information */
r = script_parser_fetch(p, "sdc_isio", &val, 1);
if (r == 0) {
mmcinfo->isiodev = val;
} else {
/* No sdio info, use old driver hardcoded defaults */
int default_iodev = sunxi_is_sun5i() ? 1 : 3;
mmcinfo->isiodev = i == default_iodev;
}
/* get regulator information */
type = SCRIPT_PARSER_VALUE_TYPE_STRING;
r = script_parser_fetch_ex(p, "sdc_regulator",
(int *)&mmcinfo->regulator, &type,
sizeof(mmcinfo->regulator)/sizeof(int));
if (r |= 0 || type != SCRIPT_PARSER_VALUE_TYPE_STRING ||
strcmp(mmcinfo->regulator, "none") == 0) {
/* No regulator, clear all of the UHS features support */
mmcinfo->caps &= ~(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50);
mmcinfo->regulator[0] = 0;
} else
mmcinfo->regulator[sizeof(mmcinfo->regulator) - 1] = 0;
return 0;
fail:
SMC_MSG(NULL, "Not using mmc%d due to script.bin parse failure\n", i);
mmcinfo->used = 0;
return -1;
}
static int __init sw_mci_init(void)
{
int i;
int sdc_used = 0;
int boot_card = 0;
int io_used = 0;
struct sunxi_mmc_platform_data* mmcinfo;
SMC_MSG(NULL, "sw_mci_init\n");
/* get devices information from sys_config.fex */
for (i = 0; i < sw_host_num; i++)
sw_mci_get_mmcinfo(i);
/*
* Here we check whether there is a boot card. If the boot card exists,
* we register it firstly to make it be associatiated with the device
* node 'mmcblk0'. Then the applicantions of Android can fix the boot,
* system, data patitions on mmcblk0p1, mmcblk0p2... etc.
*/
for (i = 0; i < sw_host_num; i++) {
mmcinfo = &sw_mci_pdata[i];
if (mmcinfo->used) {
sdc_used |= 1 << i;
if (mmcinfo->cdmode == CARD_ALWAYS_PRESENT)
boot_card |= 1 << i;
if (mmcinfo->isiodev)
io_used |= 1 << i;
}
}
SMC_MSG(NULL, "MMC host used card: 0x%x, boot card: 0x%x, io_card %d\n",
sdc_used, boot_card, io_used);
/* register boot card firstly */
for (i = 0; i < sw_host_num; i++) {
if (boot_card & (1 << i))
platform_device_register(&sw_mci_device[i]);
}
/* register other cards */
for (i = 0; i < sw_host_num; i++) {
if (boot_card & (1 << i))
continue;
if (sdc_used & (1 << i))
platform_device_register(&sw_mci_device[i]);
}
return platform_driver_register(&sw_mci_driver);
}
static void __exit sw_mci_exit(void)
{
SMC_MSG(NULL, "sw_mci_exit\n");
platform_driver_unregister(&sw_mci_driver);
}
module_init(sw_mci_init);
module_exit(sw_mci_exit);
MODULE_DESCRIPTION("Winner's SD/MMC Card Controller Driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Aaron.maoye<leafy.myeh@reuuimllatech.com>");
MODULE_ALIAS("platform:sunxi-mmc");
| gpl-2.0 |
hustcalm/coreboot-hacking | src/mainboard/asus/p2b-d/romstage.c | 1 | 1673 | /*
* This file is part of the coreboot project.
*
* Copyright (C) 2009 Uwe Hermann <uwe@hermann-uwe.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include <device/pci_def.h>
#include <arch/io.h>
#include <device/pnp_def.h>
#include <arch/romcc_io.h>
#include <arch/hlt.h>
#include <stdlib.h>
#include <console/console.h>
#include "southbridge/intel/i82371eb/i82371eb.h"
#include "northbridge/intel/i440bx/raminit.h"
#include "drivers/pc80/udelay_io.c"
#include "lib/delay.c"
#include "cpu/x86/bist.h"
#include "superio/winbond/w83977tf/early_serial.c"
#include <lib.h>
#define SERIAL_DEV PNP_DEV(0x3f0, W83977TF_SP1)
int spd_read_byte(unsigned int device, unsigned int address)
{
return smbus_read_byte(device, address);
}
void main(unsigned long bist)
{
w83977tf_enable_serial(SERIAL_DEV, CONFIG_TTYS0_BASE);
console_init();
report_bist_failure(bist);
enable_smbus();
dump_spd_registers();
sdram_set_registers();
sdram_set_spd_registers();
sdram_enable();
}
| gpl-2.0 |
alexbousso/kernel_2.4.18-14 | fs/jfs/jfs_extent.c | 1 | 17419 | /*
* Copyright (c) International Business Machines Corp., 2000-2002
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include "jfs_incore.h"
#include "jfs_dmap.h"
#include "jfs_extent.h"
#include "jfs_debug.h"
/*
* forward references
*/
static int extBalloc(struct inode *, s64, s64 *, s64 *);
static int extBrealloc(struct inode *, s64, s64, s64 *, s64 *);
int extRecord(struct inode *, xad_t *);
static s64 extRoundDown(s64 nb);
/*
* external references
*/
extern int dbExtend(struct inode *, s64, s64, s64);
extern int jfs_commit_inode(struct inode *, int);
#define DPD(a) (printk("(a): %d\n",(a)))
#define DPC(a) (printk("(a): %c\n",(a)))
#define DPL1(a) \
{ \
if ((a) >> 32) \
printk("(a): %x%08x ",(a)); \
else \
printk("(a): %x ",(a) << 32); \
}
#define DPL(a) \
{ \
if ((a) >> 32) \
printk("(a): %x%08x\n",(a)); \
else \
printk("(a): %x\n",(a) << 32); \
}
#define DPD1(a) (printk("(a): %d ",(a)))
#define DPX(a) (printk("(a): %08x\n",(a)))
#define DPX1(a) (printk("(a): %08x ",(a)))
#define DPS(a) (printk("%s\n",(a)))
#define DPE(a) (printk("\nENTERING: %s\n",(a)))
#define DPE1(a) (printk("\nENTERING: %s",(a)))
#define DPS1(a) (printk(" %s ",(a)))
/*
* NAME: extAlloc()
*
* FUNCTION: allocate an extent for a specified page range within a
* file.
*
* PARAMETERS:
* ip - the inode of the file.
* xlen - requested extent length.
* pno - the starting page number with the file.
* xp - pointer to an xad. on entry, xad describes an
* extent that is used as an allocation hint if the
* xaddr of the xad is non-zero. on successful exit,
* the xad describes the newly allocated extent.
* abnr - boolean_t indicating whether the newly allocated extent
* should be marked as allocated but not recorded.
*
* RETURN VALUES:
* 0 - success
* EIO - i/o error.
* ENOSPC - insufficient disk resources.
*/
int
extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
s64 nxlen, nxaddr, xoff, hint, xaddr = 0;
int rc, nbperpage;
int xflag;
/* This blocks if we are low on resources */
txBeginAnon(ip->i_sb);
/* validate extent length */
if (xlen > MAXXLEN)
xlen = MAXXLEN;
/* get the number of blocks per page */
nbperpage = sbi->nbperpage;
/* get the page's starting extent offset */
xoff = pno << sbi->l2nbperpage;
/* check if an allocation hint was provided */
if ((hint = addressXAD(xp))) {
/* get the size of the extent described by the hint */
nxlen = lengthXAD(xp);
/* check if the hint is for the portion of the file
* immediately previous to the current allocation
* request and if hint extent has the same abnr
* value as the current request. if so, we can
* extend the hint extent to include the current
* extent if we can allocate the blocks immediately
* following the hint extent.
*/
if (offsetXAD(xp) + nxlen == xoff &&
abnr == ((xp->flag & XAD_NOTRECORDED) ? TRUE : FALSE))
xaddr = hint + nxlen;
/* adjust the hint to the last block of the extent */
hint += (nxlen - 1);
}
/* allocate the disk blocks for the extent. initially, extBalloc()
* will try to allocate disk blocks for the requested size (xlen).
* if this fails (xlen contigious free blocks not avaliable), it'll
* try to allocate a smaller number of blocks (producing a smaller
* extent), with this smaller number of blocks consisting of the
* requested number of blocks rounded down to the next smaller
* power of 2 number (i.e. 16 -> 8). it'll continue to round down
* and retry the allocation until the number of blocks to allocate
* is smaller than the number of blocks per page.
*/
nxlen = xlen;
if ((rc =
extBalloc(ip, hint ? hint : INOHINT(ip), &nxlen, &nxaddr))) {
return (rc);
}
/* determine the value of the extent flag */
xflag = (abnr == TRUE) ? XAD_NOTRECORDED : 0;
/* if we can extend the hint extent to cover the current request,
* extend it. otherwise, insert a new extent to
* cover the current request.
*/
if (xaddr && xaddr == nxaddr)
rc = xtExtend(0, ip, xoff, (int) nxlen, 0);
else
rc = xtInsert(0, ip, xflag, xoff, (int) nxlen, &nxaddr, 0);
/* if the extend or insert failed,
* free the newly allocated blocks and return the error.
*/
if (rc) {
dbFree(ip, nxaddr, nxlen);
return (rc);
}
/* update the number of blocks allocated to the file */
ip->i_blocks += LBLK2PBLK(ip->i_sb, nxlen);
/* set the results of the extent allocation */
XADaddress(xp, nxaddr);
XADlength(xp, nxlen);
XADoffset(xp, xoff);
xp->flag = xflag;
mark_inode_dirty(ip);
/*
* COMMIT_SyncList flags an anonymous tlock on page that is on
* sync list.
* We need to commit the inode to get the page written disk.
*/
if (test_and_clear_cflag(COMMIT_Synclist,ip))
jfs_commit_inode(ip, 0);
return (0);
}
/*
* NAME: extRealloc()
*
* FUNCTION: extend the allocation of a file extent containing a
* partial back last page.
*
* PARAMETERS:
* ip - the inode of the file.
* cp - cbuf for the partial backed last page.
* xlen - request size of the resulting extent.
* xp - pointer to an xad. on successful exit, the xad
* describes the newly allocated extent.
* abnr - boolean_t indicating whether the newly allocated extent
* should be marked as allocated but not recorded.
*
* RETURN VALUES:
* 0 - success
* EIO - i/o error.
* ENOSPC - insufficient disk resources.
*/
int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr)
{
struct super_block *sb = ip->i_sb;
s64 xaddr, xlen, nxaddr, delta, xoff;
s64 ntail, nextend, ninsert;
int rc, nbperpage = JFS_SBI(sb)->nbperpage;
int xflag;
/* This blocks if we are low on resources */
txBeginAnon(ip->i_sb);
/* validate extent length */
if (nxlen > MAXXLEN)
nxlen = MAXXLEN;
/* get the extend (partial) page's disk block address and
* number of blocks.
*/
xaddr = addressXAD(xp);
xlen = lengthXAD(xp);
xoff = offsetXAD(xp);
/* if the extend page is abnr and if the request is for
* the extent to be allocated and recorded,
* make the page allocated and recorded.
*/
if ((xp->flag & XAD_NOTRECORDED) && !abnr) {
xp->flag = 0;
if ((rc = xtUpdate(0, ip, xp)))
return (rc);
}
/* try to allocated the request number of blocks for the
* extent. dbRealloc() first tries to satisfy the request
* by extending the allocation in place. otherwise, it will
* try to allocate a new set of blocks large enough for the
* request. in satisfying a request, dbReAlloc() may allocate
* less than what was request but will always allocate enough
* space as to satisfy the extend page.
*/
if ((rc = extBrealloc(ip, xaddr, xlen, &nxlen, &nxaddr)))
return (rc);
delta = nxlen - xlen;
/* check if the extend page is not abnr but the request is abnr
* and the allocated disk space is for more than one page. if this
* is the case, there is a miss match of abnr between the extend page
* and the one or more pages following the extend page. as a result,
* two extents will have to be manipulated. the first will be that
* of the extent of the extend page and will be manipulated thru
* an xtExtend() or an xtTailgate(), depending upon whether the
* disk allocation occurred as an inplace extension. the second
* extent will be manipulated (created) through an xtInsert() and
* will be for the pages following the extend page.
*/
if (abnr && (!(xp->flag & XAD_NOTRECORDED)) && (nxlen > nbperpage)) {
ntail = nbperpage;
nextend = ntail - xlen;
ninsert = nxlen - nbperpage;
xflag = XAD_NOTRECORDED;
} else {
ntail = nxlen;
nextend = delta;
ninsert = 0;
xflag = xp->flag;
}
/* if we were able to extend the disk allocation in place,
* extend the extent. otherwise, move the extent to a
* new disk location.
*/
if (xaddr == nxaddr) {
/* extend the extent */
if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
dbFree(ip, xaddr + xlen, delta);
return (rc);
}
} else {
/*
* move the extent to a new location:
*
* xtTailgate() accounts for relocated tail extent;
*/
if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
dbFree(ip, nxaddr, nxlen);
return (rc);
}
}
/* check if we need to also insert a new extent */
if (ninsert) {
/* perform the insert. if it fails, free the blocks
* to be inserted and make it appear that we only did
* the xtExtend() or xtTailgate() above.
*/
xaddr = nxaddr + ntail;
if (xtInsert (0, ip, xflag, xoff + ntail, (int) ninsert,
&xaddr, 0)) {
dbFree(ip, xaddr, (s64) ninsert);
delta = nextend;
nxlen = ntail;
xflag = 0;
}
}
/* update the inode with the number of blocks allocated */
ip->i_blocks += LBLK2PBLK(sb, delta);
/* set the return results */
XADaddress(xp, nxaddr);
XADlength(xp, nxlen);
XADoffset(xp, xoff);
xp->flag = xflag;
mark_inode_dirty(ip);
return (0);
}
/*
* NAME: extHint()
*
* FUNCTION: produce an extent allocation hint for a file offset.
*
* PARAMETERS:
* ip - the inode of the file.
* offset - file offset for which the hint is needed.
* xp - pointer to the xad that is to be filled in with
* the hint.
*
* RETURN VALUES:
* 0 - success
* EIO - i/o error.
*/
int extHint(struct inode *ip, s64 offset, xad_t * xp)
{
struct super_block *sb = ip->i_sb;
xadlist_t xadl;
lxdlist_t lxdl;
lxd_t lxd;
s64 prev;
int rc, nbperpage = JFS_SBI(sb)->nbperpage;
/* init the hint as "no hint provided" */
XADaddress(xp, 0);
/* determine the starting extent offset of the page previous
* to the page containing the offset.
*/
prev = ((offset & ~POFFSET) >> JFS_SBI(sb)->l2bsize) - nbperpage;
/* if the offsets in the first page of the file,
* no hint provided.
*/
if (prev < 0)
return (0);
/* prepare to lookup the previous page's extent info */
lxdl.maxnlxd = 1;
lxdl.nlxd = 1;
lxdl.lxd = &lxd;
LXDoffset(&lxd, prev)
LXDlength(&lxd, nbperpage);
xadl.maxnxad = 1;
xadl.nxad = 0;
xadl.xad = xp;
/* perform the lookup */
if ((rc = xtLookupList(ip, &lxdl, &xadl, 0)))
return (rc);
/* check if not extent exists for the previous page.
* this is possible for sparse files.
*/
if (xadl.nxad == 0) {
// assert(ISSPARSE(ip));
return (0);
}
/* only preserve the abnr flag within the xad flags
* of the returned hint.
*/
xp->flag &= XAD_NOTRECORDED;
assert(xadl.nxad == 1);
assert(lengthXAD(xp) == nbperpage);
return (0);
}
/*
* NAME: extRecord()
*
* FUNCTION: change a page with a file from not recorded to recorded.
*
* PARAMETERS:
* ip - inode of the file.
* cp - cbuf of the file page.
*
* RETURN VALUES:
* 0 - success
* EIO - i/o error.
* ENOSPC - insufficient disk resources.
*/
int extRecord(struct inode *ip, xad_t * xp)
{
int rc;
txBeginAnon(ip->i_sb);
/* update the extent */
if ((rc = xtUpdate(0, ip, xp)))
return (rc);
#ifdef _STILL_TO_PORT
/* no longer abnr */
cp->cm_abnr = FALSE;
/* mark the cbuf as modified */
cp->cm_modified = TRUE;
#endif /* _STILL_TO_PORT */
return (0);
}
/*
* NAME: extFill()
*
* FUNCTION: allocate disk space for a file page that represents
* a file hole.
*
* PARAMETERS:
* ip - the inode of the file.
* cp - cbuf of the file page represent the hole.
*
* RETURN VALUES:
* 0 - success
* EIO - i/o error.
* ENOSPC - insufficient disk resources.
*/
int extFill(struct inode *ip, xad_t * xp)
{
int rc, nbperpage = JFS_SBI(ip->i_sb)->nbperpage;
s64 blkno = offsetXAD(xp) >> ip->i_blksize;
// assert(ISSPARSE(ip));
/* initialize the extent allocation hint */
XADaddress(xp, 0);
/* allocate an extent to fill the hole */
if ((rc = extAlloc(ip, nbperpage, blkno, xp, FALSE)))
return (rc);
assert(lengthPXD(xp) == nbperpage);
return (0);
}
/*
* NAME: extBalloc()
*
* FUNCTION: allocate disk blocks to form an extent.
*
* initially, we will try to allocate disk blocks for the
* requested size (nblocks). if this fails (nblocks
* contigious free blocks not avaliable), we'll try to allocate
* a smaller number of blocks (producing a smaller extent), with
* this smaller number of blocks consisting of the requested
* number of blocks rounded down to the next smaller power of 2
* number (i.e. 16 -> 8). we'll continue to round down and
* retry the allocation until the number of blocks to allocate
* is smaller than the number of blocks per page.
*
* PARAMETERS:
* ip - the inode of the file.
* hint - disk block number to be used as an allocation hint.
* *nblocks - pointer to an s64 value. on entry, this value specifies
* the desired number of block to be allocated. on successful
* exit, this value is set to the number of blocks actually
* allocated.
* blkno - pointer to a block address that is filled in on successful
* return with the starting block number of the newly
* allocated block range.
*
* RETURN VALUES:
* 0 - success
* EIO - i/o error.
* ENOSPC - insufficient disk resources.
*/
static int
extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
{
s64 nb, nblks, daddr, max;
int rc, nbperpage = JFS_SBI(ip->i_sb)->nbperpage;
bmap_t *mp = JFS_SBI(ip->i_sb)->bmap;
/* get the number of blocks to initially attempt to allocate.
* we'll first try the number of blocks requested unless this
* number is greater than the maximum number of contigious free
* blocks in the map. in that case, we'll start off with the
* maximum free.
*/
max = (s64) 1 << mp->db_maxfreebud;
if (*nblocks >= max && *nblocks > nbperpage)
nb = nblks = (max > nbperpage) ? max : nbperpage;
else
nb = nblks = *nblocks;
/* try to allocate blocks */
while ((rc = dbAlloc(ip, hint, nb, &daddr))) {
/* if something other than an out of space error,
* stop and return this error.
*/
if (rc != ENOSPC)
return (rc);
/* decrease the allocation request size */
nb = min(nblks, extRoundDown(nb));
/* give up if we cannot cover a page */
if (nb < nbperpage)
return (rc);
}
*nblocks = nb;
*blkno = daddr;
return (0);
}
/*
* NAME: extBrealloc()
*
* FUNCTION: attempt to extend an extent's allocation.
*
* initially, we will try to extend the extent's allocation
* in place. if this fails, we'll try to move the extent
* to a new set of blocks. if moving the extent, we initially
* will try to allocate disk blocks for the requested size
* (nnew). if this fails (nnew contigious free blocks not
* avaliable), we'll try to allocate a smaller number of
* blocks (producing a smaller extent), with this smaller
* number of blocks consisting of the requested number of
* blocks rounded down to the next smaller power of 2
* number (i.e. 16 -> 8). we'll continue to round down and
* retry the allocation until the number of blocks to allocate
* is smaller than the number of blocks per page.
*
* PARAMETERS:
* ip - the inode of the file.
* blkno - starting block number of the extents current allocation.
* nblks - number of blocks within the extents current allocation.
* newnblks - pointer to a s64 value. on entry, this value is the
* the new desired extent size (number of blocks). on
* successful exit, this value is set to the extent's actual
* new size (new number of blocks).
* newblkno - the starting block number of the extents new allocation.
*
* RETURN VALUES:
* 0 - success
* EIO - i/o error.
* ENOSPC - insufficient disk resources.
*/
static int
extBrealloc(struct inode *ip,
s64 blkno, s64 nblks, s64 * newnblks, s64 * newblkno)
{
int rc;
/* try to extend in place */
if ((rc = dbExtend(ip, blkno, nblks, *newnblks - nblks)) == 0) {
*newblkno = blkno;
return (0);
} else {
if (rc != ENOSPC)
return (rc);
}
/* in place extension not possible.
* try to move the extent to a new set of blocks.
*/
return (extBalloc(ip, blkno, newnblks, newblkno));
}
/*
* NAME: extRoundDown()
*
* FUNCTION: round down a specified number of blocks to the next
* smallest power of 2 number.
*
* PARAMETERS:
* nb - the inode of the file.
*
* RETURN VALUES:
* next smallest power of 2 number.
*/
static s64 extRoundDown(s64 nb)
{
int i;
u64 m, k;
for (i = 0, m = (u64) 1 << 63; i < 64; i++, m >>= 1) {
if (m & nb)
break;
}
i = 63 - i;
k = (u64) 1 << i;
k = ((k - 1) & nb) ? k : k >> 1;
return (k);
}
| gpl-2.0 |
Orion116/kernel_samsung_lt03wifi_rebase | arch/arm/mm/init.c | 1 | 19595 | /*
* linux/arch/arm/mm/init.c
*
* Copyright (C) 1995-2005 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mman.h>
#include <linux/export.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/of_fdt.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <asm/mach-types.h>
#include <asm/memblock.h>
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include "mm.h"
static phys_addr_t phys_initrd_start __initdata = 0;
static unsigned long phys_initrd_size __initdata = 0;
static int __init early_initrd(char *p)
{
phys_addr_t start;
unsigned long size;
char *endp;
start = memparse(p, &endp);
if (*endp == ',') {
size = memparse(endp + 1, NULL);
phys_initrd_start = start;
phys_initrd_size = size;
}
return 0;
}
early_param("initrd", early_initrd);
static int __init parse_tag_initrd(const struct tag *tag)
{
printk(KERN_WARNING "ATAG_INITRD is deprecated; "
"please update your bootloader.\n");
phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
phys_initrd_size = tag->u.initrd.size;
return 0;
}
__tagtable(ATAG_INITRD, parse_tag_initrd);
static int __init parse_tag_initrd2(const struct tag *tag)
{
phys_initrd_start = tag->u.initrd.start;
phys_initrd_size = tag->u.initrd.size;
return 0;
}
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
#ifdef CONFIG_OF_FLATTREE
void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
{
phys_initrd_start = start;
phys_initrd_size = end - start;
}
#endif /* CONFIG_OF_FLATTREE */
/*
* This keeps memory configuration data used by a couple memory
* initialization functions, as well as show_mem() for the skipping
* of holes in the memory map. It is populated by arm_add_memory().
*/
struct meminfo meminfo;
void show_mem(unsigned int filter)
{
int free = 0, total = 0, reserved = 0;
int shared = 0, cached = 0, slab = 0, i;
struct meminfo * mi = &meminfo;
printk("Mem-info:\n");
show_free_areas(filter);
if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
return;
for_each_bank (i, mi) {
struct membank *bank = &mi->bank[i];
unsigned int pfn1, pfn2;
struct page *page, *end;
pfn1 = bank_pfn_start(bank);
pfn2 = bank_pfn_end(bank);
page = pfn_to_page(pfn1);
end = pfn_to_page(pfn2 - 1) + 1;
do {
total++;
if (PageReserved(page))
reserved++;
else if (PageSwapCache(page))
cached++;
else if (PageSlab(page))
slab++;
else if (!page_count(page))
free++;
else
shared += page_count(page) - 1;
page++;
} while (page < end);
}
printk("%d pages of RAM\n", total);
printk("%d free pages\n", free);
printk("%d reserved pages\n", reserved);
printk("%d slab pages\n", slab);
printk("%d pages shared\n", shared);
printk("%d pages swap cached\n", cached);
}
static void __init find_limits(unsigned long *min, unsigned long *max_low,
unsigned long *max_high)
{
struct meminfo *mi = &meminfo;
int i;
/* This assumes the meminfo array is properly sorted */
*min = bank_pfn_start(&mi->bank[0]);
for_each_bank (i, mi)
if (mi->bank[i].highmem)
break;
*max_low = bank_pfn_end(&mi->bank[i - 1]);
*max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
}
static void __init arm_bootmem_init(unsigned long start_pfn,
unsigned long end_pfn)
{
struct memblock_region *reg;
unsigned int boot_pages;
phys_addr_t bitmap;
pg_data_t *pgdat;
/*
* Allocate the bootmem bitmap page. This must be in a region
* of memory which has already been mapped.
*/
boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
__pfn_to_phys(end_pfn));
/*
* Initialise the bootmem allocator, handing the
* memory banks over to bootmem.
*/
node_set_online(0);
pgdat = NODE_DATA(0);
init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
/* Free the lowmem regions from memblock into bootmem. */
for_each_memblock(memory, reg) {
unsigned long start = memblock_region_memory_base_pfn(reg);
unsigned long end = memblock_region_memory_end_pfn(reg);
if (end >= end_pfn)
end = end_pfn;
if (start >= end)
break;
free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
}
/* Reserve the lowmem memblock reserved regions in bootmem. */
for_each_memblock(reserved, reg) {
unsigned long start = memblock_region_reserved_base_pfn(reg);
unsigned long end = memblock_region_reserved_end_pfn(reg);
if (end >= end_pfn)
end = end_pfn;
if (start >= end)
break;
reserve_bootmem(__pfn_to_phys(start),
(end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
}
}
#ifdef CONFIG_ZONE_DMA
unsigned long arm_dma_zone_size __read_mostly;
EXPORT_SYMBOL(arm_dma_zone_size);
/*
* The DMA mask corresponding to the maximum bus address allocatable
* using GFP_DMA. The default here places no restriction on DMA
* allocations. This must be the smallest DMA mask in the system,
* so a successful GFP_DMA allocation will always satisfy this.
*/
u32 arm_dma_limit;
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
unsigned long dma_size)
{
if (size[0] <= dma_size)
return;
size[ZONE_NORMAL] = size[0] - dma_size;
size[ZONE_DMA] = dma_size;
hole[ZONE_NORMAL] = hole[0];
hole[ZONE_DMA] = 0;
}
#endif
static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
unsigned long max_high)
{
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
struct memblock_region *reg;
/*
* initialise the zones.
*/
memset(zone_size, 0, sizeof(zone_size));
/*
* The memory size has already been determined. If we need
* to do anything fancy with the allocation of this memory
* to the zones, now is the time to do it.
*/
zone_size[0] = max_low - min;
#ifdef CONFIG_HIGHMEM
zone_size[ZONE_HIGHMEM] = max_high - max_low;
#endif
/*
* Calculate the size of the holes.
* holes = node_size - sum(bank_sizes)
*/
memcpy(zhole_size, zone_size, sizeof(zhole_size));
for_each_memblock(memory, reg) {
unsigned long start = memblock_region_memory_base_pfn(reg);
unsigned long end = memblock_region_memory_end_pfn(reg);
if (start < max_low) {
unsigned long low_end = min(end, max_low);
zhole_size[0] -= low_end - start;
}
#ifdef CONFIG_HIGHMEM
if (end > max_low) {
unsigned long high_start = max(start, max_low);
zhole_size[ZONE_HIGHMEM] -= end - high_start;
}
#endif
}
#ifdef CONFIG_ZONE_DMA
/*
* Adjust the sizes according to any special requirements for
* this machine type.
*/
if (arm_dma_zone_size) {
arm_adjust_dma_zone(zone_size, zhole_size,
arm_dma_zone_size >> PAGE_SHIFT);
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
#endif
free_area_init_node(0, zone_size, min, zhole_size);
}
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
return memblock_is_memory(__pfn_to_phys(pfn));
}
EXPORT_SYMBOL(pfn_valid);
#endif
#ifndef CONFIG_SPARSEMEM
static void __init arm_memory_present(void)
{
}
#else
static void __init arm_memory_present(void)
{
struct memblock_region *reg;
for_each_memblock(memory, reg)
memory_present(0, memblock_region_memory_base_pfn(reg),
memblock_region_memory_end_pfn(reg));
}
#endif
static bool arm_memblock_steal_permitted = true;
phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
{
phys_addr_t phys;
BUG_ON(!arm_memblock_steal_permitted);
phys = memblock_alloc(size, align);
memblock_free(phys, size);
memblock_remove(phys, size);
return phys;
}
void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
{
int i;
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
/* Register the kernel text, kernel data and initrd with memblock. */
#ifdef CONFIG_XIP_KERNEL
memblock_reserve(__pa(_sdata), _end - _sdata);
#else
memblock_reserve(__pa(_stext), _end - _stext);
#endif
#ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size &&
!memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
(u64)phys_initrd_start, phys_initrd_size);
phys_initrd_start = phys_initrd_size = 0;
}
if (phys_initrd_size &&
memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
(u64)phys_initrd_start, phys_initrd_size);
phys_initrd_start = phys_initrd_size = 0;
}
if (phys_initrd_size) {
memblock_reserve(phys_initrd_start, phys_initrd_size);
/* Now convert initrd to virtual addresses */
initrd_start = __phys_to_virt(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size;
}
#endif
arm_mm_memblock_reserve();
arm_dt_memblock_reserve();
/* reserve any platform specific memblock areas */
if (mdesc->reserve)
mdesc->reserve();
arm_memblock_steal_permitted = false;
memblock_allow_resize();
memblock_dump_all();
}
void __init bootmem_init(void)
{
unsigned long min, max_low, max_high;
max_low = max_high = 0;
find_limits(&min, &max_low, &max_high);
arm_bootmem_init(min, max_low);
/*
* Sparsemem tries to allocate bootmem in memory_present(),
* so must be done after the fixed reservations
*/
arm_memory_present();
/*
* sparse_init() needs the bootmem allocator up and running.
*/
sparse_init();
/*
* Now free the memory - free_area_init_node needs
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
arm_bootmem_free(min, max_low, max_high);
/*
* This doesn't seem to be used by the Linux memory manager any
* more, but is used by ll_rw_block. If we can get rid of it, we
* also get rid of some of the stuff above as well.
*
* Note: max_low_pfn and max_pfn reflect the number of _pages_ in
* the system, not the maximum PFN.
*/
max_low_pfn = max_low - PHYS_PFN_OFFSET;
max_pfn = max_high - PHYS_PFN_OFFSET;
}
static inline int free_area(unsigned long pfn, unsigned long end, char *s)
{
unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
for (; pfn < end; pfn++) {
struct page *page = pfn_to_page(pfn);
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
pages++;
}
if (size && s)
printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
return pages;
}
/*
* Poison init memory with an undefined instruction (ARM) or a branch to an
* undefined instruction (Thumb).
*/
static inline void poison_init_mem(void *s, size_t count)
{
u32 *p = (u32 *)s;
for (; count != 0; count -= 4)
*p++ = 0xe7fddef0;
}
static inline void
free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{
struct page *start_pg, *end_pg;
phys_addr_t pg, pgend;
/*
* Convert start_pfn/end_pfn to a struct page pointer.
*/
start_pg = pfn_to_page(start_pfn - 1) + 1;
end_pg = pfn_to_page(end_pfn - 1) + 1;
/*
* Convert to physical addresses, and
* round start upwards and end downwards.
*/
pg = PAGE_ALIGN(__pa(start_pg));
pgend = __pa(end_pg) & PAGE_MASK;
/*
* If there are free pages between these,
* free the section of the memmap array.
*/
if (pg < pgend)
free_bootmem(pg, pgend - pg);
}
/*
* The mem_map array can get very big. Free the unused area of the memory map.
*/
static void __init free_unused_memmap(struct meminfo *mi)
{
unsigned long bank_start, prev_bank_end = 0;
unsigned int i;
/*
* This relies on each bank being in address order.
* The banks are sorted previously in bootmem_init().
*/
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
bank_start = bank_pfn_start(bank);
#ifdef CONFIG_SPARSEMEM
/*
* Take care not to free memmap entries that don't exist
* due to SPARSEMEM sections which aren't present.
*/
bank_start = min(bank_start,
ALIGN(prev_bank_end, PAGES_PER_SECTION));
#else
/*
* Align down here since the VM subsystem insists that the
* memmap entries are valid from the bank start aligned to
* MAX_ORDER_NR_PAGES.
*/
bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
#endif
/*
* If we had a previous bank, and there is a space
* between the current bank and the previous, free it.
*/
if (prev_bank_end && prev_bank_end < bank_start)
free_memmap(prev_bank_end, bank_start);
/*
* Align up here since the VM subsystem insists that the
* memmap entries are valid from the bank end aligned to
* MAX_ORDER_NR_PAGES.
*/
prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
}
#ifdef CONFIG_SPARSEMEM
if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
free_memmap(prev_bank_end,
ALIGN(prev_bank_end, PAGES_PER_SECTION));
#endif
}
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
struct memblock_region *mem, *res;
/* set highmem page free */
for_each_memblock(memory, mem) {
unsigned long start = memblock_region_memory_base_pfn(mem);
unsigned long end = memblock_region_memory_end_pfn(mem);
/* Ignore complete lowmem entries */
if (end <= max_low)
continue;
/* Truncate partial highmem entries */
if (start < max_low)
start = max_low;
/* Find and exclude any reserved regions */
for_each_memblock(reserved, res) {
unsigned long res_start, res_end;
res_start = memblock_region_reserved_base_pfn(res);
res_end = memblock_region_reserved_end_pfn(res);
if (res_end < start)
continue;
if (res_start < start)
res_start = start;
if (res_start > end)
res_start = end;
if (res_end > end)
res_end = end;
if (res_start != start)
totalhigh_pages += free_area(start, res_start,
NULL);
start = res_end;
if (start == end)
break;
}
/* And now free anything which remains */
if (start < end)
totalhigh_pages += free_area(start, end, NULL);
}
totalram_pages += totalhigh_pages;
#endif
}
/*
* mem_init() marks the free areas in the mem_map and tells us how much
* memory is free. This is done after various parts of the system have
* claimed their memory after the kernel image.
*/
void __init mem_init(void)
{
unsigned long reserved_pages, free_pages;
struct memblock_region *reg;
int i;
#ifdef CONFIG_HAVE_TCM
/* These pointers are filled in on TCM detection */
extern u32 dtcm_end;
extern u32 itcm_end;
#endif
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
/* this will put all unused low memory onto the freelists */
free_unused_memmap(&meminfo);
totalram_pages += free_all_bootmem();
#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
totalram_pages += free_area(PHYS_PFN_OFFSET,
__phys_to_pfn(__pa(swapper_pg_dir)), NULL);
#endif
free_highpages();
reserved_pages = free_pages = 0;
for_each_bank(i, &meminfo) {
struct membank *bank = &meminfo.bank[i];
unsigned int pfn1, pfn2;
struct page *page, *end;
pfn1 = bank_pfn_start(bank);
pfn2 = bank_pfn_end(bank);
page = pfn_to_page(pfn1);
end = pfn_to_page(pfn2 - 1) + 1;
do {
if (PageReserved(page))
reserved_pages++;
else if (!page_count(page))
free_pages++;
page++;
} while (page < end);
}
/*
* Since our memory may not be contiguous, calculate the
* real number of pages we have in this system
*/
printk(KERN_INFO "Memory:");
num_physpages = 0;
for_each_memblock(memory, reg) {
unsigned long pages = memblock_region_memory_end_pfn(reg) -
memblock_region_memory_base_pfn(reg);
num_physpages += pages;
printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
}
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
nr_free_pages() << (PAGE_SHIFT-10),
free_pages << (PAGE_SHIFT-10),
reserved_pages << (PAGE_SHIFT-10),
totalhigh_pages << (PAGE_SHIFT-10));
#define MLK(b, t) b, t, ((t) - (b)) >> 10
#define MLM(b, t) b, t, ((t) - (b)) >> 20
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
printk(KERN_NOTICE "Virtual kernel memory layout:\n"
" vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
#ifdef CONFIG_HAVE_TCM
" DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
" ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
#endif
" fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
" vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
" lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
#ifdef CONFIG_HIGHMEM
" pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
#endif
#ifdef CONFIG_MODULES
" modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
#endif
" .text : 0x%p" " - 0x%p" " (%4d kB)\n"
" .init : 0x%p" " - 0x%p" " (%4d kB)\n"
" .data : 0x%p" " - 0x%p" " (%4d kB)\n"
" .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
(PAGE_SIZE)),
#ifdef CONFIG_HAVE_TCM
MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
MLK(ITCM_OFFSET, (unsigned long) itcm_end),
#endif
MLK(FIXADDR_START, FIXADDR_TOP),
MLM(VMALLOC_START, VMALLOC_END),
MLM(PAGE_OFFSET, (unsigned long)high_memory),
#ifdef CONFIG_HIGHMEM
MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
(PAGE_SIZE)),
#endif
#ifdef CONFIG_MODULES
MLM(MODULES_VADDR, MODULES_END),
#endif
MLK_ROUNDUP(_text, _etext),
MLK_ROUNDUP(__init_begin, __init_end),
MLK_ROUNDUP(_sdata, _edata),
MLK_ROUNDUP(__bss_start, __bss_stop));
#undef MLK
#undef MLM
#undef MLK_ROUNDUP
/*
* Check boundaries twice: Some fundamental inconsistencies can
* be detected at build time already.
*/
#ifdef CONFIG_MMU
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
BUG_ON(TASK_SIZE > MODULES_VADDR);
#endif
#ifdef CONFIG_HIGHMEM
BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
#endif
if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
extern int sysctl_overcommit_memory;
/*
* On a machine this small we won't get
* anywhere without overcommit, so turn
* it on by default.
*/
sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
}
}
void free_initmem(void)
{
#ifdef CONFIG_HAVE_TCM
extern char __tcm_start, __tcm_end;
poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
__phys_to_pfn(__pa(&__tcm_end)),
"TCM link");
#endif
poison_init_mem(__init_begin, __init_end - __init_begin);
if (!machine_is_integrator() && !machine_is_cintegrator())
totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
__phys_to_pfn(__pa(__init_end)),
"init");
}
#ifdef CONFIG_BLK_DEV_INITRD
static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd) {
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
totalram_pages += free_area(__phys_to_pfn(__pa(start)),
__phys_to_pfn(__pa(end)),
"initrd");
}
}
static int __init keepinitrd_setup(char *__unused)
{
keep_initrd = 1;
return 1;
}
__setup("keepinitrd", keepinitrd_setup);
#endif
| gpl-2.0 |
mangosthree/server | src/modules/SD3/include/sc_instance.cpp | 1 | 14947 | /**
* ScriptDev3 is an extension for mangos providing enhanced features for
* area triggers, creatures, game objects, instances, items, and spells beyond
* the default database scripting in mangos.
*
* Copyright (C) 2006-2013 ScriptDev2 <http://www.scriptdev2.com/>
* Copyright (C) 2014-2022 MaNGOS <https://getmangos.eu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* World of Warcraft, and all World of Warcraft or Warcraft art, images,
* and lore are copyrighted by Blizzard Entertainment, Inc.
*/
#include "precompiled.h"
/**
Function that uses a door or a button
@param guid The ObjectGuid of the Door/ Button that will be used
@param uiWithRestoreTime (in seconds) if == 0 autoCloseTime will be used (if not 0 by default in *_template)
@param bUseAlternativeState Use to alternative state
*/
void ScriptedInstance::DoUseDoorOrButton(ObjectGuid guid, uint32 uiWithRestoreTime, bool bUseAlternativeState)
{
if (!guid)
{
return;
}
if (GameObject* pGo = instance->GetGameObject(guid))
{
#if defined (CLASSIC) || defined (TBC)
if (pGo->GetGoType() == GAMEOBJECT_TYPE_DOOR || pGo->GetGoType() == GAMEOBJECT_TYPE_BUTTON)
#endif
#if defined (WOTLK) || defined (CATA) || defined(MISTS)
if (pGo->GetGoType() == GAMEOBJECT_TYPE_DOOR || pGo->GetGoType() == GAMEOBJECT_TYPE_BUTTON || pGo->GetGoType() == GAMEOBJECT_TYPE_TRAPDOOR)
#endif
{
if (pGo->getLootState() == GO_READY)
{
pGo->UseDoorOrButton(uiWithRestoreTime, bUseAlternativeState);
}
else if (pGo->getLootState() == GO_ACTIVATED)
{
pGo->ResetDoorOrButton();
}
}
else
{
script_error_log("Script call DoUseDoorOrButton, but gameobject entry %u is type %u.", pGo->GetEntry(), pGo->GetGoType());
}
}
}
/// Function that uses a door or button that is stored in m_mGoEntryGuidStore
void ScriptedInstance::DoUseDoorOrButton(uint32 uiEntry, uint32 uiWithRestoreTime /*= 0*/, bool bUseAlternativeState /*= false*/)
{
EntryGuidMap::iterator find = m_mGoEntryGuidStore.find(uiEntry);
if (find != m_mGoEntryGuidStore.end())
{
DoUseDoorOrButton(find->second, uiWithRestoreTime, bUseAlternativeState);
}
else
// Output log, possible reason is not added GO to storage, or not yet loaded
{
debug_log("SD3: Script call DoUseDoorOrButton(by Entry), but no gameobject of entry %u was created yet, or it was not stored by script for map %u.", uiEntry, instance->GetId());
}
}
/**
Function that respawns a despawned GameObject with given time
@param guid The ObjectGuid of the GO that will be respawned
@param uiTimeToDespawn (in seconds) Despawn the GO after this time, default is a minute
*/
void ScriptedInstance::DoRespawnGameObject(ObjectGuid guid, uint32 uiTimeToDespawn)
{
if (!guid)
{
return;
}
if (GameObject* pGo = instance->GetGameObject(guid))
{
// not expect any of these should ever be handled
if (pGo->GetGoType() == GAMEOBJECT_TYPE_FISHINGNODE || pGo->GetGoType() == GAMEOBJECT_TYPE_DOOR ||
#if defined (CLASSIC) || defined (TBC)
pGo->GetGoType() == GAMEOBJECT_TYPE_BUTTON || pGo->GetGoType() == GAMEOBJECT_TYPE_TRAP)
#endif
#if defined (WOTLK) || defined (CATA) || defined(MISTS)
pGo->GetGoType() == GAMEOBJECT_TYPE_BUTTON)
#endif
{
return;
}
if (pGo->isSpawned())
{
return;
}
pGo->SetRespawnTime(uiTimeToDespawn);
pGo->Refresh();
}
}
/// Function that uses a door or button that is stored in m_mGoEntryGuidStore
void ScriptedInstance::DoToggleGameObjectFlags(uint32 uiEntry, uint32 uiGOflags, bool bApply)
{
EntryGuidMap::iterator find = m_mGoEntryGuidStore.find(uiEntry);
if (find != m_mGoEntryGuidStore.end())
{
DoToggleGameObjectFlags(find->second, uiGOflags, bApply);
}
else
// Output log, possible reason is not added GO to storage, or not yet loaded
{
debug_log("SD3: Script call ToogleTameObjectFlags (by Entry), but no gameobject of entry %u was created yet, or it was not stored by script for map %u.", uiEntry, instance->GetId());
}
}
/**
Function that toggles the GO-flags of a GameObject
@param guid The ObjectGuid of the GO that will be respawned
@param uiGOflags Which GO-flags to toggle
@param bApply should the GO-flags be applied or removed?
*/
void ScriptedInstance::DoToggleGameObjectFlags(ObjectGuid guid, uint32 uiGOflags, bool bApply)
{
if (!guid)
{
return;
}
if (GameObject* pGo = instance->GetGameObject(guid))
{
if (bApply)
{
pGo->SetFlag(GAMEOBJECT_FLAGS, uiGOflags);
}
else
{
pGo->RemoveFlag(GAMEOBJECT_FLAGS, uiGOflags);
}
}
}
/// Function that respawns a despawned GO that is stored in m_mGoEntryGuidStore
void ScriptedInstance::DoRespawnGameObject(uint32 uiEntry, uint32 uiTimeToDespawn)
{
EntryGuidMap::iterator find = m_mGoEntryGuidStore.find(uiEntry);
if (find != m_mGoEntryGuidStore.end())
{
DoRespawnGameObject(find->second, uiTimeToDespawn);
}
else
{
// Output log, possible reason is not added GO to storage, or not yet loaded;
debug_log("SD3: Script call DoRespawnGameObject(by Entry), but no gameobject of entry %u was created yet, or it was not stored by script for map %u.", uiEntry, instance->GetId());
}
}
/**
Helper function to update a world state for all players in the map
@param uiStateId The WorldState that will be set for all players in the map
@param uiStateData The Value to which the State will be set to
*/
void ScriptedInstance::DoUpdateWorldState(uint32 uiStateId, uint32 uiStateData)
{
Map::PlayerList const& lPlayers = instance->GetPlayers();
if (!lPlayers.isEmpty())
{
for (Map::PlayerList::const_iterator itr = lPlayers.begin(); itr != lPlayers.end(); ++itr)
{
if (Player* pPlayer = itr->getSource())
{
pPlayer->SendUpdateWorldState(uiStateId, uiStateData);
}
}
}
else
{
debug_log("SD3: DoUpdateWorldState attempt send data but no players in map.");
}
}
/// Get the first found Player* (with requested properties) in the map. Can return nullptr.
Player* ScriptedInstance::GetPlayerInMap(bool bOnlyAlive /*=false*/, bool bCanBeGamemaster /*=true*/)
{
Map::PlayerList const& lPlayers = instance->GetPlayers();
for (Map::PlayerList::const_iterator itr = lPlayers.begin(); itr != lPlayers.end(); ++itr)
{
Player* pPlayer = itr->getSource();
if (pPlayer && (!bOnlyAlive || pPlayer->IsAlive()) && (bCanBeGamemaster || !pPlayer->isGameMaster()))
{
return pPlayer;
}
}
return nullptr;
}
/// Returns a pointer to a loaded GameObject that was stored in m_mGoEntryGuidStore. Can return nullptr
GameObject* ScriptedInstance::GetSingleGameObjectFromStorage(uint32 uiEntry) const
{
EntryGuidMap::const_iterator find = m_mGoEntryGuidStore.find(uiEntry);
if (find != m_mGoEntryGuidStore.end())
{
return instance->GetGameObject(find->second);
}
// Output log, possible reason is not added GO to map, or not yet loaded;
script_error_log("Script requested gameobject with entry %u, but no gameobject of this entry was created yet, or it was not stored by script for map %u.", uiEntry, instance->GetId());
return nullptr;
}
/// Returns a pointer to a loaded Creature that was stored in m_mGoEntryGuidStore. Can return nullptr
Creature* ScriptedInstance::GetSingleCreatureFromStorage(uint32 uiEntry, bool bSkipDebugLog /*=false*/) const
{
EntryGuidMap::const_iterator find = m_mNpcEntryGuidStore.find(uiEntry);
if (find != m_mNpcEntryGuidStore.end())
{
return instance->GetCreature(find->second);
}
// Output log, possible reason is not added GO to map, or not yet loaded;
if (!bSkipDebugLog)
{
script_error_log("Script requested creature with entry %u, but no npc of this entry was created yet, or it was not stored by script for map %u.", uiEntry, instance->GetId());
}
return nullptr;
}
#if defined (WOTLK) || defined (CATA) || defined(MISTS)
/**
Helper function to start a timed achievement criteria for players in the map
@param criteriaType The Type that is required to complete the criteria, see enum AchievementCriteriaTypes in MaNGOS
@param uiTimedCriteriaMiscId The ID that identifies how the criteria is started
*/
void ScriptedInstance::DoStartTimedAchievement(AchievementCriteriaTypes criteriaType, uint32 uiTimedCriteriaMiscId)
{
Map::PlayerList const& lPlayers = instance->GetPlayers();
if (!lPlayers.isEmpty())
{
for (Map::PlayerList::const_iterator itr = lPlayers.begin(); itr != lPlayers.end(); ++itr)
{
if (Player* pPlayer = itr->getSource())
{
pPlayer->StartTimedAchievementCriteria(criteriaType, uiTimedCriteriaMiscId);
}
}
}
else
{
debug_log("SD3: DoStartTimedAchievement attempt start achievements but no players in map.");
}
}
#endif
/**
Constructor for DialogueHelper
@param pDialogueArray The static const array of DialogueEntry holding the information about the dialogue. This array MUST be terminated by {0,0,0}
*/
DialogueHelper::DialogueHelper(DialogueEntry const* pDialogueArray) :
m_pInstance(nullptr),
m_pDialogueArray(pDialogueArray),
m_pCurrentEntry(nullptr),
m_pDialogueTwoSideArray(nullptr),
m_pCurrentEntryTwoSide(nullptr),
m_uiTimer(0),
m_bIsFirstSide(true),
m_bCanSimulate(false)
{}
/**
Constructor for DialogueHelper (Two Sides)
@param pDialogueTwoSideArray The static const array of DialogueEntryTwoSide holding the information about the dialogue. This array MUST be terminated by {0,0,0,0,0}
*/
DialogueHelper::DialogueHelper(DialogueEntryTwoSide const* pDialogueTwoSideArray) :
m_pInstance(nullptr),
m_pDialogueArray(nullptr),
m_pCurrentEntry(nullptr),
m_pDialogueTwoSideArray(pDialogueTwoSideArray),
m_pCurrentEntryTwoSide(nullptr),
m_uiTimer(0),
m_bIsFirstSide(true),
m_bCanSimulate(false)
{}
/**
Function to start a (part of a) dialogue
@param iTextEntry The TextEntry of the dialogue that will be started (must be always the entry of first side)
*/
void DialogueHelper::StartNextDialogueText(int32 iTextEntry)
{
// Find iTextEntry
bool bFound = false;
if (m_pDialogueArray) // One Side
{
for (DialogueEntry const* pEntry = m_pDialogueArray; pEntry->iTextEntry; ++pEntry)
{
if (pEntry->iTextEntry == iTextEntry)
{
m_pCurrentEntry = pEntry;
bFound = true;
break;
}
}
}
else // Two Sides
{
for (DialogueEntryTwoSide const* pEntry = m_pDialogueTwoSideArray; pEntry->iTextEntry; ++pEntry)
{
if (pEntry->iTextEntry == iTextEntry)
{
m_pCurrentEntryTwoSide = pEntry;
bFound = true;
break;
}
}
}
if (!bFound)
{
script_error_log("Script call DialogueHelper::StartNextDialogueText, but textEntry %i is not in provided dialogue (on map id %u)", iTextEntry, m_pInstance ? m_pInstance->instance->GetId() : 0);
return;
}
DoNextDialogueStep();
}
/// Internal helper function to do the actual say of a DialogueEntry
void DialogueHelper::DoNextDialogueStep()
{
// Last Dialogue Entry done?
if ((m_pCurrentEntry && !m_pCurrentEntry->iTextEntry) || (m_pCurrentEntryTwoSide && !m_pCurrentEntryTwoSide->iTextEntry))
{
m_uiTimer = 0;
return;
}
// Get Text, SpeakerEntry and Timer
int32 iTextEntry = 0;
uint32 uiSpeakerEntry = 0;
if (m_pDialogueArray) // One Side
{
uiSpeakerEntry = m_pCurrentEntry->uiSayerEntry;
iTextEntry = m_pCurrentEntry->iTextEntry;
m_uiTimer = m_pCurrentEntry->uiTimer;
}
else // Two Sides
{
// Second Entries can be 0, if they are the entry from first side will be taken
uiSpeakerEntry = !m_bIsFirstSide && m_pCurrentEntryTwoSide->uiSayerEntryAlt ? m_pCurrentEntryTwoSide->uiSayerEntryAlt : m_pCurrentEntryTwoSide->uiSayerEntry;
iTextEntry = !m_bIsFirstSide && m_pCurrentEntryTwoSide->iTextEntryAlt ? m_pCurrentEntryTwoSide->iTextEntryAlt : m_pCurrentEntryTwoSide->iTextEntry;
m_uiTimer = m_pCurrentEntryTwoSide->uiTimer;
}
// Simulate Case
if (uiSpeakerEntry && iTextEntry < 0)
{
// Use Speaker if directly provided
Creature* pSpeaker = GetSpeakerByEntry(uiSpeakerEntry);
if (m_pInstance && !pSpeaker) // Get Speaker from instance
{
if (m_bCanSimulate) // Simulate case
{
m_pInstance->DoOrSimulateScriptTextForThisInstance(iTextEntry, uiSpeakerEntry);
}
else
{
pSpeaker = m_pInstance->GetSingleCreatureFromStorage(uiSpeakerEntry);
}
}
if (pSpeaker)
{
DoScriptText(iTextEntry, pSpeaker);
}
}
JustDidDialogueStep(m_pDialogueArray ? m_pCurrentEntry->iTextEntry : m_pCurrentEntryTwoSide->iTextEntry);
// Increment position
if (m_pDialogueArray)
{
++m_pCurrentEntry;
}
else
{
++m_pCurrentEntryTwoSide;
}
}
/// Call this function within any DialogueUpdate method. This is required for saying next steps in a dialogue
void DialogueHelper::DialogueUpdate(uint32 uiDiff)
{
if (m_uiTimer)
{
if (m_uiTimer <= uiDiff)
{
DoNextDialogueStep();
}
else
{
m_uiTimer -= uiDiff;
}
}
}
| gpl-2.0 |
vrtadmin/clamav-devel | clambc/bcrun.c | 1 | 12298 | /*
* ClamAV bytecode handler tool.
*
* Copyright (C) 2015, 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
* Copyright (C) 2009-2012 Sourcefire, Inc.
*
* Authors: Török Edvin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#if HAVE_CONFIG_H
#include "clamav-config.h"
#endif
#include "cltypes.h"
#ifndef _WIN32
#include <sys/time.h>
#endif
#include <stdlib.h>
#include "bytecode.h"
#include "bytecode_priv.h"
#include "clamav.h"
#include "shared/optparser.h"
#include "shared/misc.h"
#include "libclamav/dconf.h"
#include "libclamav/others.h"
#include <fcntl.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
static void help(void)
{
printf("\n");
printf(" Clam AntiVirus: Bytecode Testing Tool %s\n", get_version());
printf(" By The ClamAV Team: https://www.clamav.net/about.html#credits\n");
printf(" (C) 2009-2018 Cisco Systems, Inc.\n");
printf("\n");
printf(" clambc <file> [function] [param1 ...]\n");
printf("\n");
printf(" --help -h Show this help\n");
printf(" --version -V Show version\n");
printf(" --debug Show debug\n");
printf(" --force-interpreter -f Force using the interpreter instead of the JIT\n");
printf(" --trust-bytecode -t Trust loaded bytecode (default yes)\n");
printf(" --info -i Print information about bytecode\n");
printf(" --printsrc -p Print bytecode source\n");
printf(" --printbcir -c Print IR of bytecode signature\n");
printf(" --input -c Input file to run the bytecode on\n");
printf(" --trace <level> -T Set bytecode trace level 0..7 (default 7)\n");
printf(" --no-trace-showsource -s Don't show source line during tracing\n");
printf(" --statistics=bytecode Collect and print bytecode execution statistics\n");
printf(" file File to test\n");
printf("\n");
return;
}
static struct dbg_state {
const char *directory;
const char *file;
const char *scope;
uint32_t scopeid;
unsigned line;
unsigned col;
unsigned showline;
} dbg_state;
static void tracehook(struct cli_bc_ctx *ctx, unsigned event)
{
dbg_state.directory = ctx->directory;
if (*ctx->file == '?')
return;
switch (event) {
case trace_func:
fprintf(stderr, "[trace] %s:%u:%u -> %s:%u:%u Entered function %s\n",
dbg_state.file, dbg_state.line, dbg_state.col,
ctx->file, ctx->line, ctx->col, ctx->scope);
dbg_state.scope = ctx->scope;
break;
case trace_param:
fprintf(stderr, "[trace] function parameter:\n");
return;
case trace_scope:
fprintf(stderr, "[trace] %s:%u:%u -> %s:%u:%u\n",
dbg_state.file, dbg_state.line, dbg_state.col,
ctx->file, ctx->line, ctx->col);
dbg_state.scope = ctx->scope;
break;
case trace_line:
case trace_col:
if (dbg_state.showline)
cli_bytecode_debug_printsrc(ctx);
else
fprintf(stderr, "[trace] %s:%u:%u\n",
dbg_state.file, dbg_state.line, dbg_state.col);
break;
default:
break;
}
dbg_state.file = ctx->file;
dbg_state.line = ctx->line;
dbg_state.col = ctx->col;
}
static void tracehook_op(struct cli_bc_ctx *ctx, const char *op)
{
UNUSEDPARAM(ctx);
fprintf(stderr, "[trace] %s\n", op);
}
static void tracehook_val(struct cli_bc_ctx *ctx, const char *name, uint32_t value)
{
UNUSEDPARAM(ctx);
fprintf(stderr, "[trace] %s = %u\n", name, value);
}
static void tracehook_ptr(struct cli_bc_ctx *ctx, const void *ptr)
{
UNUSEDPARAM(ctx);
fprintf(stderr, "[trace] %p\n", ptr);
}
static uint8_t debug_flag = 0;
static void print_src(const char *file)
{
char buf[4096];
int nread, i, found = 0, lcnt = 0;
FILE *f = fopen(file, "r");
if (!f) {
fprintf(stderr,"Unable to reopen %s\n", file);
return;
}
do {
nread = fread(buf, 1, sizeof(buf), f);
for (i=0;i<nread-1;i++) {
if (buf[i] == '\n') {
lcnt++;
}
/* skip over the logical trigger */
if (lcnt >= 2 && buf[i] == '\n' && buf[i+1] == 'S') {
found = 1;
i+=2;
break;
}
}
} while (!found && (nread == sizeof(buf)));
if (debug_flag)
printf("[clambc] Source code:");
do {
for (;i+1<nread;i++) {
if (buf[i] == 'S' || buf[i] == '\n') {
putc('\n', stdout);
continue;
}
putc(((buf[i]&0xf) | ((buf[i+1]&0xf)<<4)), stdout);
i++;
}
if (i == nread-1 && nread != 1)
fseek(f, -1, SEEK_CUR);
i=0;
nread = fread(buf, 1, sizeof(buf), f);
} while (nread > 0);
fclose(f);
}
static uint32_t deadbeefcounts[64] = {
0xdeadbeef,
0,
0xbeefdead,
0,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
0xdeadbeef,
};
int main(int argc, char *argv[])
{
FILE *f;
struct cli_bc *bc;
struct cli_bc_ctx *ctx;
int rc, dbgargc, bc_stats=0;
struct optstruct *opts;
const struct optstruct *opt;
unsigned funcid=0, i;
struct cli_all_bc bcs;
int fd = -1;
unsigned tracelevel;
if(check_flevel())
exit(1);
opts = optparse(NULL, argc, argv, 1, OPT_CLAMBC, 0, NULL);
if (!opts) {
fprintf(stderr, "ERROR: Can't parse command line options\n");
exit(1);
}
if(optget(opts, "version")->enabled) {
printf("Clam AntiVirus Bytecode Testing Tool %s\n", get_version());
cl_init(CL_INIT_DEFAULT);
cli_bytecode_printversion();
optfree(opts);
exit(0);
}
if(optget(opts, "help")->enabled || !opts->filename) {
optfree(opts);
help();
exit(0);
}
f = fopen(opts->filename[0], "r");
if (!f) {
fprintf(stderr, "Unable to load %s\n", argv[1]);
optfree(opts);
exit(2);
}
bc = malloc(sizeof(*bc));
if (!bc) {
fprintf(stderr, "Out of memory\n");
optfree(opts);
exit(3);
}
if (optget(opts,"debug")->enabled) {
cl_debug();
debug_flag=1;
}
rc = cl_init(CL_INIT_DEFAULT);
if (rc != CL_SUCCESS) {
fprintf(stderr,"Unable to init libclamav: %s\n", cl_strerror(rc));
optfree(opts);
exit(4);
}
dbgargc=1;
while (opts->filename[dbgargc]) dbgargc++;
if (dbgargc > 1)
cli_bytecode_debug(dbgargc, opts->filename);
if (optget(opts, "force-interpreter")->enabled) {
bcs.engine = NULL;
} else {
rc = cli_bytecode_init(&bcs);
if (rc != CL_SUCCESS) {
fprintf(stderr,"Unable to init bytecode engine: %s\n", cl_strerror(rc));
optfree(opts);
exit(4);
}
}
bcs.all_bcs = bc;
bcs.count = 1;
if((opt = optget(opts, "statistics"))->enabled) {
while(opt) {
if (!strcasecmp(opt->strarg, "bytecode"))
bc_stats=1;
opt = opt->nextarg;
}
}
rc = cli_bytecode_load(bc, f, NULL, optget(opts, "trust-bytecode")->enabled, bc_stats);
if (rc != CL_SUCCESS) {
fprintf(stderr,"Unable to load bytecode: %s\n", cl_strerror(rc));
optfree(opts);
exit(4);
}
fclose(f);
if (bc->state == bc_skip) {
fprintf(stderr,"bytecode load skipped\n");
exit(0);
}
if (debug_flag)
printf("[clambc] Bytecode loaded\n");
if (optget(opts, "info")->enabled) {
cli_bytecode_describe(bc);
} else if (optget(opts, "printsrc")->enabled) {
print_src(opts->filename[0]);
} else if (optget(opts, "printbcir")->enabled) {
cli_bytetype_describe(bc);
cli_bytevalue_describe(bc, 0);
cli_bytefunc_describe(bc, 0);
} else {
cli_ctx cctx;
struct cl_engine *engine = cl_engine_new();
fmap_t *map = NULL;
memset(&cctx, 0, sizeof(cctx));
if (!engine) {
fprintf(stderr,"Unable to create engine\n");
optfree(opts);
exit(3);
}
rc = cl_engine_compile(engine);
if (rc) {
fprintf(stderr,"Unable to compile engine: %s\n", cl_strerror(rc));
optfree(opts);
exit(4);
}
rc = cli_bytecode_prepare2(engine, &bcs, BYTECODE_ENGINE_MASK);
if (rc != CL_SUCCESS) {
fprintf(stderr,"Unable to prepare bytecode: %s\n", cl_strerror(rc));
optfree(opts);
exit(4);
}
if (debug_flag)
printf("[clambc] Bytecode prepared\n");
ctx = cli_bytecode_context_alloc();
if (!ctx) {
fprintf(stderr,"Out of memory\n");
exit(3);
}
ctx->ctx = &cctx;
cctx.engine = engine;
cctx.fmap = cli_calloc(sizeof(fmap_t*), engine->maxreclevel+2);
if (!cctx.fmap) {
fprintf(stderr,"Out of memory\n");
exit(3);
}
memset(&dbg_state, 0, sizeof(dbg_state));
dbg_state.file = "<libclamav>";
dbg_state.line = 0;
dbg_state.col = 0;
dbg_state.showline = !optget(opts, "no-trace-showsource")->enabled;
tracelevel = optget(opts, "trace")->numarg;
cli_bytecode_context_set_trace(ctx, tracelevel,
tracehook,
tracehook_op,
tracehook_val,
tracehook_ptr);
if (opts->filename[1]) {
funcid = atoi(opts->filename[1]);
}
cli_bytecode_context_setfuncid(ctx, bc, funcid);
if (debug_flag)
printf("[clambc] Running bytecode function :%u\n", funcid);
if (opts->filename[1]) {
i=2;
while (opts->filename[i]) {
rc = cli_bytecode_context_setparam_int(ctx, i-2, atoi(opts->filename[i]));
if (rc != CL_SUCCESS) {
fprintf(stderr,"Unable to set param %u: %s\n", i-2, cl_strerror(rc));
}
i++;
}
}
if ((opt = optget(opts,"input"))->enabled) {
fd = open(opt->strarg, O_RDONLY);
if (fd == -1) {
fprintf(stderr, "Unable to open input file %s: %s\n", opt->strarg, strerror(errno));
optfree(opts);
exit(5);
}
map = fmap(fd, 0, 0);
if (!map) {
fprintf(stderr, "Unable to map input file %s\n", opt->strarg);
exit(5);
}
rc = cli_bytecode_context_setfile(ctx, map);
if (rc != CL_SUCCESS) {
fprintf(stderr, "Unable to set file %s: %s\n", opt->strarg, cl_strerror(rc));
optfree(opts);
exit(5);
}
}
/* for testing */
ctx->hooks.match_counts = deadbeefcounts;
ctx->hooks.match_offsets = deadbeefcounts;
rc = cli_bytecode_run(&bcs, bc, ctx);
if (rc != CL_SUCCESS) {
fprintf(stderr,"Unable to run bytecode: %s\n", cl_strerror(rc));
} else {
uint64_t v;
if (debug_flag)
printf("[clambc] Bytecode run finished\n");
v = cli_bytecode_context_getresult_int(ctx);
if (debug_flag)
printf("[clambc] Bytecode returned: 0x%llx\n", (long long)v);
}
cli_bytecode_context_destroy(ctx);
if (map)
funmap(map);
cl_engine_free(engine);
free(cctx.fmap);
}
cli_bytecode_destroy(bc);
cli_bytecode_done(&bcs);
free(bc);
optfree(opts);
if (fd != -1)
close(fd);
if (debug_flag)
printf("[clambc] Exiting\n");
cl_cleanup_crypto();
return 0;
}
| gpl-2.0 |
pavel-pimenov/flylinkdc-r5xx | zlib-ng/deflate_quick.c | 1 | 4268 | /*
* The deflate_quick deflate strategy, designed to be used when cycles are
* at a premium.
*
* Copyright (C) 2013 Intel Corporation. All rights reserved.
* Authors:
* Wajdi Feghali <wajdi.k.feghali@intel.com>
* Jim Guilford <james.guilford@intel.com>
* Vinodh Gopal <vinodh.gopal@intel.com>
* Erdinc Ozturk <erdinc.ozturk@intel.com>
* Jim Kukunas <james.t.kukunas@linux.intel.com>
*
* Portions are Copyright (C) 2016 12Sided Technology, LLC.
* Author:
* Phil Vachon <pvachon@12sidedtech.com>
*
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#include "zbuild.h"
#include "deflate.h"
#include "deflate_p.h"
#include "functable.h"
#include "trees_emit.h"
extern const ct_data static_ltree[L_CODES+2];
extern const ct_data static_dtree[D_CODES];
#define QUICK_START_BLOCK(s, last) { \
zng_tr_emit_tree(s, STATIC_TREES, last); \
s->block_open = 1 + (int)last; \
s->block_start = (int)s->strstart; \
}
#define QUICK_END_BLOCK(s, last) { \
if (s->block_open) { \
zng_tr_emit_end_block(s, static_ltree, last); \
s->block_open = 0; \
s->block_start = (int)s->strstart; \
flush_pending(s->strm); \
if (s->strm->avail_out == 0) \
return (last) ? finish_started : need_more; \
} \
}
Z_INTERNAL block_state deflate_quick(deflate_state *s, int flush) {
Pos hash_head;
int64_t dist;
unsigned match_len, last;
last = (flush == Z_FINISH) ? 1 : 0;
if (UNLIKELY(last && s->block_open != 2)) {
/* Emit end of previous block */
QUICK_END_BLOCK(s, 0);
/* Emit start of last block */
QUICK_START_BLOCK(s, last);
} else if (UNLIKELY(s->block_open == 0 && s->lookahead > 0)) {
/* Start new block only when we have lookahead data, so that if no
input data is given an empty block will not be written */
QUICK_START_BLOCK(s, last);
}
for (;;) {
if (UNLIKELY(s->pending + ((BIT_BUF_SIZE + 7) >> 3) >= s->pending_buf_size)) {
flush_pending(s->strm);
if (s->strm->avail_out == 0) {
return (last && s->strm->avail_in == 0 && s->bi_valid == 0 && s->block_open == 0) ? finish_started : need_more;
}
}
if (UNLIKELY(s->lookahead < MIN_LOOKAHEAD)) {
fill_window(s);
if (UNLIKELY(s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH)) {
return need_more;
}
if (UNLIKELY(s->lookahead == 0))
break;
if (UNLIKELY(s->block_open == 0)) {
/* Start new block when we have lookahead data, so that if no
input data is given an empty block will not be written */
QUICK_START_BLOCK(s, last);
}
}
if (LIKELY(s->lookahead >= WANT_MIN_MATCH)) {
hash_head = functable.quick_insert_string(s, s->strstart);
dist = (int64_t)s->strstart - hash_head;
if (dist <= MAX_DIST(s) && dist > 0) {
const uint8_t *str_start = s->window + s->strstart;
const uint8_t *match_start = s->window + hash_head;
if (zmemcmp_2(str_start, match_start) == 0) {
match_len = functable.compare256(str_start+2, match_start+2) + 2;
if (match_len >= WANT_MIN_MATCH) {
if (UNLIKELY(match_len > s->lookahead))
match_len = s->lookahead;
check_match(s, s->strstart, hash_head, match_len);
zng_tr_emit_dist(s, static_ltree, static_dtree, match_len - STD_MIN_MATCH, (uint32_t)dist);
s->lookahead -= match_len;
s->strstart += match_len;
continue;
}
}
}
}
zng_tr_emit_lit(s, static_ltree, s->window[s->strstart]);
s->strstart++;
s->lookahead--;
}
s->insert = s->strstart < (STD_MIN_MATCH - 1) ? s->strstart : (STD_MIN_MATCH - 1);
if (UNLIKELY(last)) {
QUICK_END_BLOCK(s, 1);
return finish_done;
}
QUICK_END_BLOCK(s, 0);
return block_done;
}
| gpl-2.0 |
gittup/tup | src/compat/win32/mmap.c | 1 | 1640 | /* vim: set ts=8 sw=8 sts=8 noet tw=78:
*
* tup - A file-based build system
*
* Copyright (C) 2013-2022 Mike Shal <marfey@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <windows.h>
#include <sys/mman.h>
#include <stdio.h>
void *mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset)
{
union {
HANDLE h;
intptr_t v;
} handle;
HANDLE mapping;
if(addr) {/*unused */}
if(prot) {/*unused */}
if(flags) {/*unused */}
if(offset) {/*unused */}
handle.v = _get_osfhandle(fd);
if(handle.h == INVALID_HANDLE_VALUE) {
fprintf(stderr, "tup mmap windows error: Failed to call _get_osfhandle. Error code=0x%08lx\n", GetLastError());
return MAP_FAILED;
}
mapping = CreateFileMapping(handle.h, NULL, PAGE_READONLY, 0, length, NULL);
if(mapping == INVALID_HANDLE_VALUE) {
fprintf(stderr, "tup mmap windows error: Failed to call CreateFileMapping. Error code=0x%08lx\n", GetLastError());
return MAP_FAILED;
}
return MapViewOfFile(mapping, FILE_MAP_READ, 0, 0, length);
}
| gpl-2.0 |
greedyliz/SpecialK-APPatch | src/D3D9/texmgr.cpp | 1 | 105961 | /**
* This file is part of Special K.
*
* Special K is free software : you can redistribute it
* and/or modify it under the terms of the GNU General Public License
* as published by The Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* Special K is distributed in the hope that it will be useful,
*
* But WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Special K.
*
* If not, see <http://www.gnu.org/licenses/>.
*
**/
#include <d3d9.h>
#include <SpecialK/d3d9_backend.h>
#include <SpecialK/d3d9/texmgr.h>
#include <SpecialK/config.h>
#include <SpecialK/hooks.h>
#include <SpecialK/log.h>
#include <SpecialK/utility.h>
#include <SpecialK/framerate.h>
#include <SpecialK/core.h>
#include <process.h>
#include <cstdint>
#include <algorithm>
#include "command.h"
#include <lzma/7z.h>
#include <lzma/7zAlloc.h>
#include <lzma/7zBuf.h>
#include <lzma/7zCrc.h>
#include <lzma/7zFile.h>
#include <lzma/7zVersion.h>
static ISzAlloc g_Alloc = { SzAlloc, SzFree };
#include <atlbase.h>
#include <memory>
#include <ctime>
#include <map>
#include <set>
#include <queue>
#include <vector>
#include <unordered_set>
#include <unordered_map>
#include <comdef.h>
using namespace SK::D3D9;
iSK_Logger tex_log;
extern std::wstring SK_D3D11_res_root;
static int d3d9_max_cache_in_mib = 3072;
static D3DXCreateTextureFromFileInMemoryEx_pfn D3DXCreateTextureFromFileInMemoryEx_Original = nullptr;
static BeginScene_pfn D3D9BeginScene_Original = nullptr;
static EndScene_pfn D3D9EndScene_Original = nullptr;
static SetRenderState_pfn D3D9SetRenderState_Original = nullptr;
static StretchRect_pfn D3D9StretchRect_Original = nullptr;
static CreateTexture_pfn D3D9CreateTexture_Original = nullptr;
static CreateRenderTarget_pfn D3D9CreateRenderTarget_Original = nullptr;
static CreateDepthStencilSurface_pfn D3D9CreateDepthStencilSurface_Original = nullptr;
static SetTexture_pfn D3D9SetTexture_Original = nullptr;
static SetRenderTarget_pfn D3D9SetRenderTarget_Original = nullptr;
static SetDepthStencilSurface_pfn D3D9SetDepthStencilSurface_Original = nullptr;
extern SetSamplerState_pfn D3D9SetSamplerState_Original;
using QueryPerformanceCounter_pfn = BOOL (WINAPI *)( _Out_ LARGE_INTEGER *lpPerformanceCount );
extern QueryPerformanceCounter_pfn QueryPerformanceCounter_Original;
// D3DXSaveSurfaceToFile issues a StretchRect, but we don't want to log that...
bool dumping = false;
bool __remap_textures = true;
bool __need_purge = false;
bool __log_used = false;
bool __show_cache = true;//false;
// Cleanup
std::queue <std::wstring> screenshots_to_delete;
// Textures that are missing mipmaps
std::set <IDirect3DBaseTexture9 *> incomplete_textures;
SK::D3D9::TextureThreadPool *SK::D3D9::resample_pool = nullptr;
SK::D3D9::StreamSplitter SK::D3D9::stream_pool;
TextureManager SK::D3D9::tex_mgr;
std::unordered_map <uint32_t, IDirect3DTexture9*> injected_textures;
std::unordered_map <uint32_t, float> injected_load_times;
std::unordered_map <uint32_t, size_t> injected_sizes;
SK_ThreadSafe_HashSet <IDirect3DSurface9 *> outstanding_screenshots;
// Not excellent screenshots, but screenhots
// that aren't finished yet and we can't reset
// the D3D9 device because of.
size_t
SK::D3D9::TextureManager::getTextureArchives (std::vector <std::wstring>& arcs)
{
arcs = archives;
return arcs.size ();
}
size_t
SK::D3D9::TextureManager::getInjectableTextures (SK::D3D9::TexList& texture_list) const
{
for ( auto&& it : injectable_textures )
{
texture_list.emplace_back (std::make_pair (it.first, it.second));
}
return texture_list.size ();
}
SK::D3D9::TexRecord&
SK::D3D9::TextureManager::getInjectableTexture (uint32_t checksum)
{
static TexRecord nulref = { };
TexRecord& injectable = nulref;
injector.lockInjection ();
bool new_tex = false;
if (! injectable_textures.count (checksum))
new_tex = true;
injectable =
injectable_textures [checksum];
if (new_tex)
injectable = { };
injector.unlockInjection ();
return injectable;
}
#if 0
COM_DECLSPEC_NOTHROW
__declspec (noinline)
HRESULT
STDMETHODCALLTYPE
D3D9StretchRect_Detour ( IDirect3DDevice9 *This,
IDirect3DSurface9 *pSourceSurface,
const RECT *pSourceRect,
IDirect3DSurface9 *pDestSurface,
const RECT *pDestRect,
D3DTEXTUREFILTERTYPE Filter )
{
#if 0
if (tzf::RenderFix::tracer.log && (! dumping))
{
RECT source, dest;
if (pSourceRect == nullptr) {
D3DSURFACE_DESC desc;
pSourceSurface->GetDesc (&desc);
source.left = 0;
source.top = 0;
source.bottom = desc.Height;
source.right = desc.Width;
} else
source = *pSourceRect;
if (pDestRect == nullptr) {
D3DSURFACE_DESC desc;
pDestSurface->GetDesc (&desc);
dest.left = 0;
dest.top = 0;
dest.bottom = desc.Height;
dest.right = desc.Width;
} else
dest = *pDestRect;
dll_log->Log ( L"[FrameTrace] StretchRect - "
L"%s[%lu,%lu/%lu,%lu] ==> %s[%lu,%lu/%lu,%lu]",
pSourceRect != nullptr ?
L" " : L" *",
source.left, source.top, source.right, source.bottom,
pDestRect != nullptr ?
L" " : L" *",
dest.left, dest.top, dest.right, dest.bottom );
}
#endif
dumping = false;
return D3D9StretchRect (This, pSourceSurface, pSourceRect,
pDestSurface, pDestRect,
Filter);
}
#endif
COM_DECLSPEC_NOTHROW
HRESULT
STDMETHODCALLTYPE
D3D9CreateRenderTarget_Detour (IDirect3DDevice9 *This,
UINT Width,
UINT Height,
D3DFORMAT Format,
D3DMULTISAMPLE_TYPE MultiSample,
DWORD MultisampleQuality,
BOOL Lockable,
IDirect3DSurface9 **ppSurface,
HANDLE *pSharedHandle)
{
tex_log.Log (L"[Unexpected][!] IDirect3DDevice9::CreateRenderTarget (%lu, %lu, "
L"%lu, %lu, %lu, %lu, %08Xh, %08Xh)",
Width, Height, Format, MultiSample, MultisampleQuality,
Lockable, ppSurface, pSharedHandle);
return D3D9CreateRenderTarget_Original (This, Width, Height, Format,
MultiSample, MultisampleQuality,
Lockable, ppSurface, pSharedHandle);
}
COM_DECLSPEC_NOTHROW
HRESULT
STDMETHODCALLTYPE
D3D9CreateDepthStencilSurface_Detour (IDirect3DDevice9 *This,
UINT Width,
UINT Height,
D3DFORMAT Format,
D3DMULTISAMPLE_TYPE MultiSample,
DWORD MultisampleQuality,
BOOL Discard,
IDirect3DSurface9 **ppSurface,
HANDLE *pSharedHandle)
{
tex_log.Log (L"[Unexpected][!] IDirect3DDevice9::CreateDepthStencilSurface (%lu, %lu, "
L"%lu, %lu, %lu, %lu, %08Xh, %08Xh)",
Width, Height, Format, MultiSample, MultisampleQuality,
Discard, ppSurface, pSharedHandle);
return D3D9CreateDepthStencilSurface_Original (This, Width, Height, Format,
MultiSample, MultisampleQuality,
Discard, ppSurface, pSharedHandle);
}
int
SK::D3D9::TextureManager::numInjectedTextures (void) const
{
return ReadNoFence (&injected_count);
}
int64_t
SK::D3D9::TextureManager::cacheSizeInjected (void) const
{
return ReadNoFence64 (&injected_size);
}
int64_t
SK::D3D9::TextureManager::cacheSizeBasic (void) const
{
return ReadNoFence64 (&basic_size);
}
int64_t
SK::D3D9::TextureManager::cacheSizeTotal (void) const
{
return std::max (0LL, cacheSizeBasic ()) + std::max (0LL, cacheSizeInjected ());
}
bool
SK::D3D9::TextureManager::isRenderTarget (IDirect3DBaseTexture9* pTex) const
{
return known.render_targets.count (pTex) != 0;
}
void
SK::D3D9::TextureManager::trackRenderTarget (IDirect3DBaseTexture9* pTex)
{
if (! known.render_targets.count (pTex))
{
known.render_targets.try_emplace ( pTex,
(uint32_t)known.render_targets.size () );
}
}
void
SK::D3D9::TextureManager::applyTexture (IDirect3DBaseTexture9* pTex)
{
if (known.render_targets.count (pTex) != 0)
{
used.render_targets.emplace (pTex);
}
}
bool
SK::D3D9::TextureManager::isUsedRenderTarget (IDirect3DBaseTexture9* pTex) const
{
return used.render_targets.count (pTex) != 0;
}
void
SK::D3D9::TextureManager::resetUsedTextures (void)
{
used.render_targets.clear ();
}
size_t
SK::D3D9::TextureManager::getUsedRenderTargets (std::vector <IDirect3DBaseTexture9 *>& targets) const
{
targets = {
used.render_targets.cbegin (), used.render_targets.cend ()
};
return targets.size ();
}
uint32_t
SK::D3D9::TextureManager::getRenderTargetCreationTime (IDirect3DBaseTexture9* pTex)
{
if (known.render_targets.count (pTex))
return known.render_targets [pTex];
return 0xFFFFFFFFUL;
}
COM_DECLSPEC_NOTHROW
__declspec (noinline)
HRESULT
STDMETHODCALLTYPE
D3D9StretchRect_Detour ( IDirect3DDevice9 *This,
IDirect3DSurface9 *pSourceSurface,
const RECT *pSourceRect,
IDirect3DSurface9 *pDestSurface,
const RECT *pDestRect,
D3DTEXTUREFILTERTYPE Filter )
{
dumping = false;
return D3D9StretchRect_Original (This, pSourceSurface, pSourceRect,
pDestSurface, pDestRect,
Filter);
}
std::set <UINT> SK::D3D9::active_samplers;
COM_DECLSPEC_NOTHROW
HRESULT
STDMETHODCALLTYPE
D3D9SetDepthStencilSurface_Detour (
_In_ IDirect3DDevice9 *This,
_In_ IDirect3DSurface9 *pNewZStencil
)
{
// Ignore anything that's not the primary render device.
if (This != SK_GetCurrentRenderBackend ().device)
{
return D3D9SetDepthStencilSurface_Original (This, pNewZStencil);
}
return D3D9SetDepthStencilSurface_Original (This, pNewZStencil);
}
uint32_t debug_tex_id = 0UL;
uint32_t current_tex [256] = { 0ui32 };
COM_DECLSPEC_NOTHROW
HRESULT
STDMETHODCALLTYPE
D3D9SetTexture_Detour (
_In_ IDirect3DDevice9 *This,
_In_ DWORD Sampler,
_In_ IDirect3DBaseTexture9 *pTexture
)
{
if (pTexture == nullptr)
return D3D9SetTexture_Original (This, Sampler, pTexture);
//if (tzf::RenderFix::tracer.log) {
//dll_log.Log ( L"[FrameTrace] SetTexture - Sampler: %lu, pTexture: %ph",
// Sampler, pTexture );
//}
tex_mgr.applyTexture (pTexture);
tracked_rt.active = (pTexture == tracked_rt.tracking_tex);
if (tracked_rt.active)
{
tracked_rt.vertex_shaders.emplace (Shaders.vertex.current.crc32c);
tracked_rt.pixel_shaders.emplace (Shaders.pixel.current.crc32c);
}
if (Shaders.vertex.current.crc32c == tracked_vs.crc32c)
tracked_vs.current_textures [std::min (15UL, Sampler)] = pTexture;
if (Shaders.pixel.current.crc32c == tracked_ps.crc32c)
tracked_ps.current_textures [std::min (15UL, Sampler)] = pTexture;
uint32_t tex_crc32c = 0x0;
void* dontcare;
if ( pTexture != nullptr &&
pTexture->QueryInterface (IID_SKTextureD3D9, &dontcare) == S_OK )
{
auto* pSKTex =
dynamic_cast <ISKTextureD3D9 *> (pTexture);
current_tex [std::min (255UL, Sampler)] = pSKTex->tex_crc32c;
//if (Shaders.vertex.current.crc32c == tracked_vs.crc32c)
// tracked_vs.current_textures [std::min (15UL, Sampler)] = pSKTex->tex_crc32c;
//
//if (Shaders.pixel.current.crc32c == tracked_ps.crc32c)
// tracked_ps.current_textures [std::min (15UL, Sampler)] = pSKTex->tex_crc32c;
if (pSKTex->tex_crc32c != 0x00)
{
tex_mgr.textures_used.emplace (pSKTex->tex_crc32c);
////tex_log.Log (L"Used: %x", pSKTex->tex_crc32c);
}
tex_crc32c =
pSKTex->tex_crc32c;
pTexture =
pSKTex->use ();
//
// This is how blocking is implemented -- only do it when a texture that needs
// this feature is being applied.
//
while ( __remap_textures && pSKTex->must_block &&
pSKTex->pTexOverride == nullptr )
{
if (tex_mgr.injector.hasPendingLoads ())
{
tex_mgr.loadQueuedTextures ();
}
else
{
SwitchToThread ();
//YieldProcessor ();
}
}
pTexture = pSKTex->getDrawTexture ();
}
#if 0
if (pTexture != nullptr) tsf::RenderFix::active_samplers.insert (Sampler);
else tsf::RenderFix::active_samplers.erase (Sampler);
#endif
bool clamp = false;
if (Shaders.pixel.current.crc32c == tracked_ps.crc32c && tracked_ps.clamp_coords)
clamp = true;
if (Shaders.vertex.current.crc32c ==tracked_vs.crc32c && tracked_vs.clamp_coords)
clamp = true;
if ( clamp )
{
float fMin = -3.0f;
D3D9SetSamplerState_Original (This, Sampler, D3DSAMP_ADDRESSU, D3DTADDRESS_CLAMP );
D3D9SetSamplerState_Original (This, Sampler, D3DSAMP_ADDRESSV, D3DTADDRESS_CLAMP );
D3D9SetSamplerState_Original (This, Sampler, D3DSAMP_ADDRESSW, D3DTADDRESS_CLAMP );
D3D9SetSamplerState_Original (This, Sampler, D3DSAMP_MIPMAPLODBIAS, *reinterpret_cast <DWORD *>(&fMin) );
}
return D3D9SetTexture_Original (This, Sampler, pTexture);
}
IDirect3DSurface9* pOld = nullptr;
COM_DECLSPEC_NOTHROW
HRESULT
STDMETHODCALLTYPE
D3D9CreateTexture_Detour (IDirect3DDevice9 *This,
UINT Width,
UINT Height,
UINT Levels,
DWORD Usage,
D3DFORMAT Format,
D3DPOOL Pool,
IDirect3DTexture9 **ppTexture,
HANDLE *pSharedHandle)
{
#if 0
if (Usage == D3DUSAGE_RENDERTARGET)
dll_log->Log (L" [!] IDirect3DDevice9::CreateTexture (%lu, %lu, %lu, %lu, "
L"%lu, %lu, %08Xh, %08Xh)",
Width, Height, Levels, Usage, Format, Pool, ppTexture,
pSharedHandle);
#endif
#if 0
//if (config.textures.log) {
tex_log->Log ( L"[Load Trace] >> Creating Texture: "
L"(%d x %d), Format: %s, Usage: [%s], Pool: %s",
Width, Height,
SK_D3D9_FormatToStr (Format),
SK_D3D9_UsageToStr (Usage).c_str (),
SK_D3D9_PoolToStr (Pool) );
//}
//#endif
#endif
int levels = Levels;
if (SK_GetCurrentGameID () == SK_GAME_ID::YS_Seven)
{
if ( ( Usage & D3DUSAGE_RENDERTARGET ) )
{
if (Format == D3DFMT_R5G6B5 && ( Width != 2048 ) )
{
Format = D3DFMT_X8R8G8B8;
#if 0
Width <<= 1;
Height <<= 1;
#endif
}
}
}
HRESULT result =
D3D9CreateTexture_Original (This, Width, Height, levels, Usage,
Format, Pool, ppTexture, pSharedHandle);
if ( SUCCEEDED (result) &&
( ( Usage & D3DUSAGE_RENDERTARGET ) ||
( Usage & D3DUSAGE_DEPTHSTENCIL ) /*||
( Usage & D3DUSAGE_DYNAMIC )*/ ) )
{
tex_mgr.trackRenderTarget (*ppTexture);
}
else if (SUCCEEDED (result))
{
if (! SK::D3D9::tex_mgr.injector.isInjectionThread ())
{
static HMODULE hModSteamOverlay =
#ifndef _WIN64
GetModuleHandle (L"gameoverlayrenderer.dll");
#else
GetModuleHandle (L"gameoverlayrenderer64.dll");
#endif
// Recent changes to the Steam overlay cause a million
// textures to be pre-loaded at start, we need to ignore
// them or the overlay will kill performance.
HMODULE hModCaller = SK_GetCallingDLL ();
if ( hModCaller != hModSteamOverlay &&
hModCaller != SK_GetDLL () )
new ISKTextureD3D9 (ppTexture, 0, 0x00);
}
}
return result;
}
COM_DECLSPEC_NOTHROW
HRESULT
STDMETHODCALLTYPE
D3D9BeginScene_Detour (IDirect3DDevice9* This)
{
// Ignore anything that's not the primary render device.
if (This != SK_GetCurrentRenderBackend ().device)
{
dll_log.Log (L"[D3D9 BkEnd] >> WARNING: D3D9 BeginScene came from unknown IDirect3DDevice9! << ");
return D3D9BeginScene_Original (This);
}
draw_state.draws = 0;
HRESULT result =
D3D9BeginScene_Original (This);
return result;
}
#if 0
COM_DECLSPEC_NOTHROW
HRESULT
STDMETHODCALLTYPE
D3D9EndScene_Detour (IDirect3DDevice9* This)
{
// Ignore anything that's not the primary render device.
if (This != tzf::RenderFix::pDevice) {
return D3D9EndScene_Original (This);
}
return D3D9EndScene_Original (This);
}
#endif
#define __PTR_SIZE sizeof LPCVOID
#define __PAGE_PRIVS PAGE_EXECUTE_READWRITE
#define D3D9_VIRTUAL_OVERRIDE(_Base,_Index,_Name,_Override,_Original,_Type) { \
void** vftable = *(void***)*_Base; \
\
if (vftable [_Index] != _Override) { \
DWORD dwProtect; \
\
VirtualProtect (&vftable [_Index], __PTR_SIZE, __PAGE_PRIVS, &dwProtect); \
\
/*dll_log->Log (L" Old VFTable entry for %s: %08Xh (Memory Policy: %s)",*/\
/*L##_Name, vftable [_Index], */\
/*SK_DescribeVirtualProtectFlags (dwProtect)); */\
\
if (_Original == NULL) \
_Original = (##_Type)vftable [_Index]; \
\
/*dll_log->Log (L" + %s: %08Xh", L#_Original, _Original);*/ \
\
vftable [_Index] = _Override; \
\
VirtualProtect (&vftable [_Index], __PTR_SIZE, dwProtect, &dwProtect); \
\
/*dll_log->Log (L" New VFTable entry for %s: %08Xh (Memory Policy: %s)\n",*/\
/*L##_Name, vftable [_Index], */\
/*SK_DescribeVirtualProtectFlags (dwProtect)); */\
} \
}
SK::D3D9::TextureWorkerThread::TextureWorkerThread (SK::D3D9::TextureThreadPool* pool)
{
pool_ = pool;
job_ = nullptr;
control_.start =
CreateEvent (nullptr, FALSE, FALSE, nullptr);
control_.trim =
CreateEvent (nullptr, FALSE, FALSE, nullptr);
control_.shutdown =
CreateEvent (nullptr, FALSE, FALSE, nullptr);
thread_ =
(HANDLE)_beginthreadex ( nullptr,
0,
ThreadProc,
this,
0,
&thread_id_ );
}
SK::D3D9::TextureWorkerThread::~TextureWorkerThread (void)
{
shutdown ();
WaitForSingleObject (thread_, INFINITE);
CloseHandle (control_.shutdown);
CloseHandle (control_.trim);
CloseHandle (control_.start);
CloseHandle (thread_);
}
bool
SK::D3D9::TextureManager::Injector::hasPendingLoads (void) const
{
bool ret = false;
return
( stream_pool.working () ||
( resample_pool != nullptr && resample_pool->working () ) );
// EnterCriticalSection (&cs_tex_inject);
// ret = (! finished_loads.empty ());
// LeaveCriticalSection (&cs_tex_inject);
return ret;
}
void
SK::D3D9::TextureManager::Injector::beginLoad (void)
{
SK_TLS_Bottom ()->texture_management.injection_thread = TRUE;
}
void
SK::D3D9::TextureManager::Injector::endLoad (void)
{
SK_TLS_Bottom ()->texture_management.injection_thread = FALSE;
}
bool
SK::D3D9::TextureManager::Injector::hasPendingStreams (void) const
{
bool ret = false;
if (ReadAcquire (&streaming) || stream_pool.queueLength () || (resample_pool && resample_pool->queueLength ()))
ret = true;
return ret;
}
bool
SK::D3D9::TextureManager::Injector::isStreaming (uint32_t checksum) const
{
bool ret = false;
lockStreaming ();
if (textures_in_flight.count (checksum))
ret = true;
unlockStreaming ();
return ret;
}
void
SK::D3D9::TextureManager::Injector::finishedStreaming (uint32_t checksum)
{
lockStreaming ();
if (isStreaming (checksum))
textures_in_flight.erase (checksum);
unlockStreaming ();
}
void
SK::D3D9::TextureManager::Injector::addTextureInFlight ( SK::D3D9::TexLoadRequest* load_op )
{
lockStreaming ();
textures_in_flight.emplace (std::make_pair (load_op->checksum, load_op));
unlockStreaming ();
}
SK::D3D9::TexLoadRequest*
SK::D3D9::TextureManager::Injector::getTextureInFlight (uint32_t checksum)
{
lockStreaming ();
SK::D3D9::TexLoadRequest* pLoadRequest = nullptr;
// What to do if this load finishes before the thing that acquired the lock is done?
if (isStreaming (checksum))
pLoadRequest = textures_in_flight [checksum];
unlockStreaming ();
return pLoadRequest;
}
HANDLE decomp_semaphore;
#include <SpecialK/tls.h>
// Keep a pool of memory around so that we are not allocating and freeing
// memory constantly...
namespace streaming_memory {
bool alloc (size_t len)
{
SK_TLS::tex_mgmt_s::stream_pool_s* mpool =
&SK_TLS_Bottom ()->texture_management.streaming_memory;
if (mpool->data_len < len)
{
if (mpool->data != nullptr)
free (mpool->data);
if (len < 8192 * 1024)
mpool->data_len = 8192 * 1024;
else
mpool->data_len = len;
mpool->data =
malloc (mpool->data_len);
mpool->data_age = timeGetTime ();
if (mpool->data != nullptr)
{
return true;
}
else
{
mpool->data_len = 0;
return false;
}
}
else
{
return true;
}
}
void*& data (void)
{
SK_TLS::tex_mgmt_s::stream_pool_s* mpool =
&SK_TLS_Bottom ()->texture_management.streaming_memory;
return mpool->data;
}
size_t& data_len (void)
{
SK_TLS::tex_mgmt_s::stream_pool_s* mpool =
&SK_TLS_Bottom ()->texture_management.streaming_memory;
return mpool->data_len;
}
uint32_t& data_age (void)
{
SK_TLS::tex_mgmt_s::stream_pool_s* mpool =
&SK_TLS_Bottom ()->texture_management.streaming_memory;
return mpool->data_age;
}
void trim (size_t max_size, uint32_t min_age)
{
SK_TLS::tex_mgmt_s::stream_pool_s* mpool =
&SK_TLS_Bottom ()->texture_management.streaming_memory;
if ( mpool->data_age < min_age )
{
if (mpool->data_len > max_size)
{
free (mpool->data);
mpool->data = nullptr;
if (max_size > 0)
mpool->data = malloc (max_size);
if (mpool->data != nullptr)
{
mpool->data_len = max_size;
mpool->data_age = timeGetTime ();
}
else
{
mpool->data_len = 0;
mpool->data_age = 0;
}
}
}
}
}
bool
SK::D3D9::TextureManager::isTextureBlacklisted (uint32_t/* checksum*/) const
{
return false;
// bool bRet = false;
//
// injector.lockBlacklist ();
//
// bRet = ( inject_blacklist.count (checksum) != 0 );
//
// injector.unlockBlacklist ();
//
// return bRet;
}
bool
SK::D3D9::TextureManager::isTextureInjectable (uint32_t checksum) const
{
bool bRet = false;
injector.lockInjection ();
bRet = (injectable_textures.count (checksum) != 0);
injector.unlockInjection ();
return bRet;
}
bool
SK::D3D9::TextureManager::removeInjectableTexture (uint32_t checksum)
{
bool bRet = false;
injector.lockInjection ();
bRet = (injectable_textures.count (checksum) != 0);
if (bRet)
injectable_textures.erase (checksum);
injector.unlockInjection ();
return bRet;
}
HRESULT
SK::D3D9::TextureManager::injectTexture (TexLoadRequest* load)
{
D3DXIMAGE_INFO img_info = { };
bool streamed = false;
size_t size = 0;
HRESULT hr = E_FAIL;
auto inject =
injectable_textures.find (load->checksum);
if (inject == injectable_textures.end ())
{
tex_log.Log ( L"[Inject Tex] >> Load Request for Checksum: %X "
L"has no Injection Record !!",
load->checksum );
return E_NOT_VALID_STATE;
}
const TexRecord* inj_tex =
&(*inject).second;
streamed =
(inj_tex->method == Streaming);
//
// Load: From Regular Filesystem
//
if ( inj_tex->archive == std::numeric_limits <unsigned int>::max () )
{
HANDLE hTexFile =
CreateFile ( load->wszFilename,
GENERIC_READ,
FILE_SHARE_READ,
nullptr,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL |
FILE_FLAG_SEQUENTIAL_SCAN,
nullptr );
DWORD read = 0UL;
if (hTexFile != INVALID_HANDLE_VALUE)
{
size = GetFileSize (hTexFile, nullptr);
if (streaming_memory::alloc (size))
{
load->pSrcData = streaming_memory::data ();
ReadFile (hTexFile, load->pSrcData, (DWORD)size, &read, nullptr);
load->SrcDataSize = read;
if (streamed && size > (256 * 1024))
{
SetThreadPriority ( GetCurrentThread (),
THREAD_PRIORITY_BELOW_NORMAL |
THREAD_MODE_BACKGROUND_BEGIN );
}
D3DXGetImageInfoFromFileInMemory (
load->pSrcData,
load->SrcDataSize,
&img_info );
hr = D3DXCreateTextureFromFileInMemoryEx_Original (
load->pDevice,
load->pSrcData, load->SrcDataSize,
D3DX_DEFAULT, D3DX_DEFAULT, img_info.MipLevels,
0, D3DFMT_FROM_FILE,
D3DPOOL_DEFAULT,
D3DX_DEFAULT, D3DX_DEFAULT,
0,
&img_info, nullptr,
&load->pSrc );
load->pSrcData = nullptr;
}
else {
// OUT OF MEMORY ?!
}
CloseHandle (hTexFile);
}
}
//
// Load: From (Compressed) Archive (.7z or .zip)
//
else
{
wchar_t arc_name [MAX_PATH] = { };
CFileInStream arc_stream = { };
CLookToRead look_stream = { };
ISzAlloc thread_alloc = { };
ISzAlloc thread_tmp_alloc = { };
FileInStream_CreateVTable (&arc_stream);
LookToRead_CreateVTable (&look_stream, False);
look_stream.realStream = &arc_stream.s;
LookToRead_Init (&look_stream);
thread_alloc.Alloc = SzAlloc;
thread_alloc.Free = SzFree;
thread_tmp_alloc.Alloc = SzAllocTemp;
thread_tmp_alloc.Free = SzFreeTemp;
CSzArEx arc = { };
size = inj_tex->size;
int fileno = inj_tex->fileno;
if (inj_tex->archive <= archives.size ())
wcscpy (arc_name, archives [inj_tex->archive].c_str ());
else
wcscpy (arc_name, L"INVALID");
if (streamed && size > (256 * 1024))
{
SetThreadPriority ( GetCurrentThread (),
THREAD_PRIORITY_LOWEST |
THREAD_MODE_BACKGROUND_BEGIN );
}
if (InFile_OpenW (&arc_stream.file, arc_name))
{
tex_log.Log ( L"[Inject Tex] ** Cannot open archive file: %s",
arc_name );
return E_FAIL;
}
SzArEx_Init (&arc);
if (SzArEx_Open (&arc, &look_stream.s, &thread_alloc, &thread_tmp_alloc) != SZ_OK)
{
tex_log.Log ( L"[Inject Tex] ** Cannot open archive file: %s",
arc_name );
File_Close (&arc_stream.file);
SzArEx_Free (&arc, &thread_alloc);
return E_INVALIDARG;
}
if (streaming_memory::alloc (size))
{
load->pSrcData = streaming_memory::data ();
bool wait = true;
while (wait)
{
DWORD dwResult = WAIT_OBJECT_0;
if (streamed && size > (256 * 1024))
{
dwResult =
WaitForSingleObject ( decomp_semaphore, INFINITE );
}
switch (dwResult)
{
case WAIT_OBJECT_0:
{
UInt32 block_idx = 0xFFFFFFFF;
auto* out = static_cast <Byte *> (streaming_memory::data ());
size_t out_len = streaming_memory::data_len ();
size_t offset = 0;
size_t decomp_size = 0;
if (SZ_OK == SzArEx_Extract ( &arc, &look_stream.s, fileno,
&block_idx, &out, &out_len,
&offset, &decomp_size,
&thread_alloc, &thread_tmp_alloc ) )
{
if (streamed && size > (96 * 1024))
ReleaseSemaphore (decomp_semaphore, 1, nullptr);
wait = false;
load->pSrcData = static_cast <Byte *> (streaming_memory::data ()) + offset;
load->SrcDataSize = static_cast <UINT> (decomp_size);
if (SUCCEEDED ( D3DXGetImageInfoFromFileInMemory (
load->pSrcData,
load->SrcDataSize,
&img_info )
)
)
{
load->pSrc = nullptr;
hr =
D3DXCreateTextureFromFileInMemoryEx_Original (
load->pDevice,
load->pSrcData, load->SrcDataSize,
img_info.Width, img_info.Height, img_info.MipLevels,
0, img_info.Format,
D3DPOOL_DEFAULT,
D3DX_DEFAULT, D3DX_DEFAULT,
0,
&img_info, nullptr,
&load->pSrc );
}
}
else
{
tex_log.Log ( L"[ Tex. Mgr ] Unable to read from 7-Zip File... for texture %x",
load->checksum );
}
} break;
default:
tex_log.Log ( L"[ Tex. Mgr ] Unexpected Wait Status: %X (crc32=%x)",
dwResult,
load->checksum );
wait = false;
break;
}
}
load->pSrcData = nullptr;
}
File_Close (&arc_stream.file);
SzArEx_Free (&arc, &thread_alloc);
}
if (streamed && size > (256 * 1024))
{
SetThreadPriority ( GetCurrentThread (),
THREAD_MODE_BACKGROUND_END );
}
return hr;
}
CRITICAL_SECTION osd_cs = { };
DWORD last_queue_update = 0;
std::string mod_text;
void
SK::D3D9::TextureManager::updateQueueOSD (void)
{
if (false)//true)//config.textures.show_loading_text)
{
DWORD dwTime = timeGetTime ();
//if (TryEnterCriticalSection (&osd_cs))
{
LONG resample_count = ReadAcquire (&injector.resampling); size_t queue_len = resample_pool->queueLength ();
LONG stream_count = ReadAcquire (&injector.streaming); size_t to_stream = injector.textures_to_stream.size ();
bool is_resampling = (resample_pool->working () || resample_count || queue_len);
bool is_streaming = (stream_pool.working () || stream_count || to_stream);
static std::string resampling_text; static DWORD dwLastResample = 0;
static std::string streaming_text; static DWORD dwLastStream = 0;
if (is_resampling)
{
size_t count = queue_len + resample_count;
char szFormatted [64];
sprintf (szFormatted, " Resampling: %zu texture", count);
resampling_text = szFormatted;
resampling_text += (count != 1) ? 's' : ' ';
if (queue_len)
{
sprintf (szFormatted, " (%zu queued)", queue_len);
resampling_text += szFormatted;
}
resampling_text += "\n";
if (count)
dwLastResample = dwTime;
}
if (is_streaming)
{
size_t count = stream_count + to_stream;
char szFormatted [64];
sprintf (szFormatted, " Streaming: %zu texture", count);
streaming_text = szFormatted;
streaming_text += (count != 1) ? 's' : ' ';
sprintf (szFormatted, " [%7.2f MiB]", (double)ReadAcquire ((volatile LONG *)&injector.streaming_bytes) / (1024.0f * 1024.0f));
streaming_text += szFormatted;
if (to_stream)
{
sprintf (szFormatted, " (%zu queued)", to_stream);
streaming_text += szFormatted;
}
if (count)
dwLastStream = dwTime;
}
if (dwLastResample < dwTime - 150)
resampling_text = "";
if (dwLastStream < dwTime - 150)
streaming_text = "";
mod_text = resampling_text + streaming_text;
if (mod_text != "")
last_queue_update = dwTime;
//LeaveCriticalSection (&osd_cs);
}
}
}
int
SK::D3D9::TextureManager::loadQueuedTextures (void)
{
updateQueueOSD ();
int loads = 0;
std::vector <TexLoadRequest *> finished_resamples;
std::vector <TexLoadRequest *> finished_streams;
stream_pool.getFinished (finished_streams);
if (resample_pool != nullptr)
resample_pool->getFinished (finished_resamples);
for ( auto it : finished_resamples )
{
TexLoadRequest* load =
it;
QueryPerformanceCounter_Original (&load->end);
if (true)
{
tex_log.Log ( L"[%s] Finished %s texture %08x (%5.2f MiB in %9.4f ms)",
(load->type == TexLoadRequest::Stream) ? L"Inject Tex" :
(load->type == TexLoadRequest::Immediate) ? L"Inject Tex" :
L" Resample ",
(load->type == TexLoadRequest::Stream) ? L"streaming" :
(load->type == TexLoadRequest::Immediate) ? L"loading" :
L"filtering",
load->checksum,
(double)load->SrcDataSize / (1024.0f * 1024.0f),
1000.0f * (double)(load->end.QuadPart - load->start.QuadPart) /
(double)SK_GetPerfFreq ().QuadPart );
}
Texture* pTex =
getTexture (load->checksum);
if (pTex != nullptr)
{
pTex->load_time = (float)(1000.0 * (double)(load->end.QuadPart - load->start.QuadPart) /
(double)SK_GetPerfFreq ().QuadPart);
}
auto* pSKTex =
static_cast <ISKTextureD3D9 *> (load->pDest);
if (pSKTex != nullptr)
{
if (pSKTex->refs == 0 && load->pSrc != nullptr)
{
tex_log.Log (L"[ Tex. Mgr ] >> Original texture no longer referenced, discarding new one!");
load->pSrc->Release ();
}
else
{
QueryPerformanceCounter_Original (&pSKTex->last_used);
pSKTex->pTexOverride = load->pSrc;
pSKTex->override_size = load->SrcDataSize;
addInjected (load->SrcDataSize);
}
injector.finishedStreaming (load->checksum);
updateOSD ();
++loads;
// Remove the temporary reference
load->pDest->Release ();
}
delete load;
}
int max_per_frame = 128;
int loaded = 0;
static int frame = 0;
frame++;
for ( auto it : finished_streams )
{
if ((! __need_purge) && (loaded > max_per_frame || (frame % 2) == 0))
{
it->pDest->AddRef ();
stream_pool.lrg_tex->postFinished (it);
continue;
}
TexLoadRequest* load =
it;
QueryPerformanceCounter_Original (&load->end);
if (true)
{
tex_log.Log ( L"[%s] Finished %s texture %08x (%5.2f MiB in %9.4f ms)",
(load->type == TexLoadRequest::Stream) ? L"Inject Tex" :
(load->type == TexLoadRequest::Immediate) ? L"Inject Tex" :
L" Resample ",
(load->type == TexLoadRequest::Stream) ? L"streaming" :
(load->type == TexLoadRequest::Immediate) ? L"loading" :
L"filtering",
load->checksum,
(double)load->SrcDataSize / (1024.0f * 1024.0f),
1000.0f * (double)(load->end.QuadPart - load->start.QuadPart) /
(double)SK_GetPerfFreq ().QuadPart );
}
Texture* pTex =
getTexture (load->checksum);
if (pTex != nullptr)
{
pTex->load_time = (float)(1000.0 * (double)(load->end.QuadPart - load->start.QuadPart) /
(double)SK_GetPerfFreq ().QuadPart);
}
auto* pSKTex =
static_cast <ISKTextureD3D9 *> (load->pDest);
if (pSKTex != nullptr)
{
if (pSKTex->refs == 0 && load->pSrc != nullptr)
{
tex_log.Log (L"[ Tex. Mgr ] >> Original texture no longer referenced, discarding new one!");
load->pSrc->Release ();
}
else
{
QueryPerformanceCounter_Original (&pSKTex->last_used);
pSKTex->pTexOverride = load->pSrc;
pSKTex->override_size = load->SrcDataSize;
addInjected (load->SrcDataSize);
injected_textures [pSKTex->tex_crc32c] = pSKTex->pTexOverride;
injected_sizes [pSKTex->tex_crc32c] = pSKTex->override_size;
injected_load_times [pSKTex->tex_crc32c] = pTex->load_time;
}
injector.finishedStreaming (load->checksum);
updateOSD ();
++loads;
// Remove the temporary reference
load->pDest->Release ();
//++loaded;
}
delete load;
}
//
// If the size changes, check to see if we need a purge - if so, schedule one.
//
static int64_t last_size = 0LL;
if (last_size != cacheSizeTotal ())
{
last_size = cacheSizeTotal ();
if ( last_size >
(1024LL * 1024ULL) * (int64_t)d3d9_max_cache_in_mib/*config.textures.max_cache_in_mib*/ )
__need_purge = true;
}
if ( (! ReadAcquire (&injector.streaming)) &&
(! ReadAcquire (&injector.resampling)) &&
(! injector.hasPendingLoads ()) )
{
if (__need_purge)
{
purge ();
__need_purge = false;
}
}
osdStats ();
return loads;
}
#include <set>
static bool TOS = ( GetModuleHandle (L"TOS.exe") != nullptr );
uint32_t
safe_crc32c (uint32_t seed, const void* pData, size_t size)
{
__try
{
if (TOS)
return crc32 (seed, pData, size);
return crc32c (seed, pData, size);
}
__except (EXCEPTION_EXECUTE_HANDLER)
{
return 0x00;
}
}
std::set <uint32_t> resample_blacklist;
bool resample_blacklist_init = false;
COM_DECLSPEC_NOTHROW
HRESULT
STDMETHODCALLTYPE
D3DXCreateTextureFromFileInMemoryEx_Detour (
_In_ LPDIRECT3DDEVICE9 pDevice,
_In_ LPCVOID pSrcData,
_In_ UINT SrcDataSize,
_In_ UINT Width,
_In_ UINT Height,
_In_ UINT MipLevels,
_In_ DWORD Usage,
_In_ D3DFORMAT Format,
_In_ D3DPOOL Pool,
_In_ DWORD Filter,
_In_ DWORD MipFilter,
_In_ D3DCOLOR ColorKey,
_Inout_ D3DXIMAGE_INFO *pSrcInfo,
_Out_ PALETTEENTRY *pPalette,
_Out_ LPDIRECT3DTEXTURE9 *ppTexture
)
{
// Injection would recurse slightly and cause impossible to diagnose reference counting problems
// with texture caching if we did not check for this!
if (SK::D3D9::tex_mgr.injector.isInjectionThread ())
{
return
D3DXCreateTextureFromFileInMemoryEx_Original (
pDevice,
pSrcData, SrcDataSize,
Width, Height, MipLevels,
Usage,
Format,
Pool,
Filter, MipFilter, ColorKey,
pSrcInfo, pPalette,
ppTexture
);
}
if (resample_blacklist_init == false)
{
resample_blacklist_init = true;
}
// Performance statistics for caching system
LARGE_INTEGER start, end;
QueryPerformanceCounter_Original (&start);
uint32_t checksum = 0xdeadbeef;
// Don't dump or cache these
if ( (Usage & D3DUSAGE_DYNAMIC) || (Usage & D3DUSAGE_RENDERTARGET) || pSrcData == nullptr || SrcDataSize == 0 )
checksum = 0x00;
else
checksum = safe_crc32c (0, pSrcData, SrcDataSize);
if (true/*config.textures.d3d11.cache*/ && checksum != 0x00)
{
Texture* pTex =
tex_mgr.getTexture (checksum);
if (pTex != nullptr)
{
tex_mgr.refTexture (pTex);
*ppTexture = pTex->d3d9_tex;
return S_OK;
}
tex_mgr.missTexture ();
}
bool resample = false;
// Necessary to make D3DX texture write functions work
if ( Pool == D3DPOOL_DEFAULT && ( config.textures.dump_on_load &&
(! tex_mgr.isTextureDumped (checksum)) &&
(! tex_mgr.isTextureInjectable (checksum)) ) || (
config.textures.on_demand_dump ) )
Usage = D3DUSAGE_DYNAMIC;
tex_mgr.injector.beginLoad ();
D3DXIMAGE_INFO info = { };
D3DXGetImageInfoFromFileInMemory (pSrcData, SrcDataSize, &info);
tex_mgr.injector.endLoad ();
#if 0
D3DFORMAT fmt_real = info.Format;
bool power_of_two_in_one_way =
(! (info.Width & (info.Width - 1))) != (! (info.Height & (info.Height - 1)));
// Textures that would be incorrectly filtered if resampled
if (power_of_two_in_one_way)
tex_mgr.addNonPowerOfTwoTexture (checksum);
// Generate complete mipmap chains for best image quality
// (will increase load-time on uncached textures)
if ((Pool == D3DPOOL_DEFAULT) && false)//config.textures.remaster)
{
{
bool power_of_two_in =
(! (info.Width & (info.Width - 1))) && (! (info.Height & (info.Height - 1)));
bool power_of_two_out =
(! (Width & (Width - 1))) && (! (Height & (Height - 1)));
if (power_of_two_in && power_of_two_out)
{
if (true)//info.MipLevels > 1/* || config.textures.uncompressed*/)
{
if ( resample_blacklist.count (checksum) == 0 )
resample = true;
}
}
}
}
#endif
HRESULT hr = E_FAIL;
TexLoadRequest* load_op = nullptr;
wchar_t wszInjectFileName [MAX_PATH] = { L'\0' };
bool remap_stream =
tex_mgr.injector.isStreaming (checksum);
//
// Generic injectable textures
//
if ( tex_mgr.isTextureInjectable (checksum) )
{
tex_log.LogEx ( true, L"[Inject Tex] Injectable texture for checksum (%08x)... ",
checksum );
TexRecord& record =
tex_mgr.getInjectableTexture (checksum);
if (record.method == TexLoadMethod::DontCare)
record.method = TexLoadMethod::Streaming;
// If -1, load from disk...
if (record.archive == -1)
{
if (record.method == TexLoadMethod::Streaming)
{
_swprintf ( wszInjectFileName, L"%s\\inject\\textures\\streaming\\%08x%s",
SK_D3D11_res_root.c_str (),
checksum,
L".dds" );
}
else if (record.method == TexLoadMethod::Blocking)
{
_swprintf ( wszInjectFileName, L"%s\\inject\\textures\\blocking\\%08x%s",
SK_D3D11_res_root.c_str (),
checksum,
L".dds" );
}
}
load_op = new TexLoadRequest ();
load_op->pDevice = pDevice;
load_op->checksum = checksum;
if (record.method == TexLoadMethod::Streaming)
load_op->type = TexLoadRequest::Stream;
else
load_op->type = TexLoadRequest::Immediate;
wcscpy (load_op->wszFilename, wszInjectFileName);
if (load_op->type == TexLoadRequest::Stream)
{
if ((! remap_stream))
tex_log.LogEx ( false, L"streaming\n" );
else
tex_log.LogEx ( false, L"in-flight already\n" );
}
else
{
tex_log.LogEx ( false, L"blocking (deferred)\n" );
}
}
bool will_replace = (load_op != nullptr || resample);
if (checksum != 0x00)
tex_mgr.injector.beginLoad ();
//tex_log->Log (L"D3DXCreateTextureFromFileInMemoryEx (... MipLevels=%lu ...)", MipLevels);
hr =
D3DXCreateTextureFromFileInMemoryEx_Original ( pDevice,
pSrcData, SrcDataSize,
Width, Height, will_replace ? 1 : MipLevels,
Usage, Format, Pool,
Filter, MipFilter, ColorKey,
pSrcInfo, pPalette,
ppTexture );
if (checksum != 0x00)
tex_mgr.injector.endLoad ();
if (SUCCEEDED (hr))
{
new ISKTextureD3D9 (ppTexture, SrcDataSize, checksum);
if ( load_op != nullptr && ( load_op->type == TexLoadRequest::Stream ||
load_op->type == TexLoadRequest::Immediate ) )
{
load_op->SrcDataSize =
static_cast <UINT> (
tex_mgr.isTextureInjectable (checksum) ?
tex_mgr.getInjectableTexture (checksum).size : 0
);
load_op->pDest =
*ppTexture;
if (load_op->type == TexLoadRequest::Immediate)
((ISKTextureD3D9 *)*ppTexture)->must_block = true;
if (tex_mgr.injector.isStreaming (load_op->checksum))
{
tex_mgr.injector.lockStreaming ();
auto* pTexOrig =
static_cast <ISKTextureD3D9 *> (
tex_mgr.injector.getTextureInFlight (load_op->checksum)->pDest
);
tex_mgr.injector.unlockStreaming ();
tex_mgr.injector.lockStreaming ();
// Remap the output of the in-flight texture
tex_mgr.injector.getTextureInFlight (load_op->checksum)->pDest =
*ppTexture;
tex_mgr.injector.unlockStreaming ();
Texture* pTex =
tex_mgr.getTexture (load_op->checksum);
if (pTex != nullptr)
{
for ( int i = 0;
i < pTex->refs;
++i )
{
(*ppTexture)->AddRef ();
}
}
tex_mgr.removeTexture (pTexOrig);
}
else
{
tex_mgr.injector.addTextureInFlight (load_op);
stream_pool.postJob (load_op);
//resample_pool->postJob (load_op);
}
}
#if 0
//
// TODO: Actually stream these, but block if the game tries to call SetTexture (...)
// while the texture is in-flight.
//
else if (load_op != nullptr && load_op->type == tsf_tex_load_s::Immediate) {
QueryPerformanceFrequency (&load_op->freq);
QueryPerformanceCounter_Original (&load_op->start);
EnterCriticalSection (&cs_tex_inject);
inject_tids.insert (GetCurrentThreadId ());
LeaveCriticalSection (&cs_tex_inject);
load_op->pDest = *ppTexture;
hr = InjectTexture (load_op);
EnterCriticalSection (&cs_tex_inject);
inject_tids.erase (GetCurrentThreadId ());
LeaveCriticalSection (&cs_tex_inject);
QueryPerformanceCounter_Original (&load_op->end);
if (SUCCEEDED (hr)) {
tex_log->Log ( L"[Inject Tex] Finished synchronous texture %08x (%5.2f MiB in %9.4f ms)",
load_op->checksum,
(double)load_op->SrcDataSize / (1024.0f * 1024.0f),
1000.0f * (double)(load_op->end.QuadPart - load_op->start.QuadPart) /
(double) load_op->freq.QuadPart );
ISKTextureD3D9* pSKTex =
(ISKTextureD3D9 *)*ppTexture;
pSKTex->pTexOverride = load_op->pSrc;
pSKTex->override_size = load_op->SrcDataSize;
pSKTex->last_used = load_op->end;
tsf::RenderFix::tex_mgr.addInjected (load_op->SrcDataSize);
} else {
tex_log->Log ( L"[Inject Tex] *** FAILED synchronous texture %08x",
load_op->checksum );
}
delete load_op;
load_op = nullptr;
}
#endif
else if (resample)
{
load_op = new TexLoadRequest ();
load_op->pDevice = pDevice;
load_op->checksum = checksum;
load_op->type = TexLoadRequest::Resample;
load_op->pSrcData = new uint8_t [SrcDataSize];
load_op->SrcDataSize = SrcDataSize;
swprintf (load_op->wszFilename, L"Resample_%x.dds", checksum);
memcpy (load_op->pSrcData, pSrcData, SrcDataSize);
(*ppTexture)->AddRef ();
load_op->pDest = *ppTexture;
resample_pool->postJob (load_op);
}
}
else if (load_op != nullptr)
{
delete load_op;
load_op = nullptr;
}
QueryPerformanceCounter_Original (&end);
if (SUCCEEDED (hr))
{
if (/*config.textures.cache &&*/ checksum != 0x00)
{
auto* pTex =
new Texture ();
pTex->crc32c = checksum;
pTex->d3d9_tex = *(ISKTextureD3D9 **)ppTexture;
pTex->d3d9_tex->AddRef ();
pTex->refs++;
pTex->load_time = (float)( 1000.0 *
(double)(end.QuadPart - start.QuadPart) /
(double)SK_GetPerfFreq ().QuadPart );
tex_mgr.addTexture (checksum, pTex, SrcDataSize);
}
if (true)
{//config.textures.log) {
tex_log.Log ( L"[Load Trace] Texture: (%lu x %lu) * <LODs: %lu> - FAST_CRC32: %X",
info.Width, info.Height, (*ppTexture)->GetLevelCount (), checksum );
tex_log.Log ( L"[Load Trace] Usage: %-20s - Format: %-20s",
SK_D3D9_UsageToStr (Usage).c_str (),
SK_D3D9_FormatToStr (Format).c_str () );
tex_log.Log ( L"[Load Trace] Pool: %s",
SK_D3D9_PoolToStr (Pool) );
tex_log.Log ( L"[Load Trace] Load Time: %6.4f ms",
1000.0f * (double)(end.QuadPart - start.QuadPart) / (double)SK_GetPerfFreq ().QuadPart );
}
}
bool dump = config.textures.dump_on_load;
if ( dump && (! tex_mgr.isTextureInjectable (checksum)) &&
(! tex_mgr.isTextureDumped (checksum)) )
{
D3DXIMAGE_INFO info_ = { };
D3DFORMAT fmt_real_ = D3DFMT_UNKNOWN;
tex_mgr.injector.beginLoad ();
D3DXGetImageInfoFromFileInMemory (pSrcData, SrcDataSize, &info_);
fmt_real_ = info_.Format;
tex_mgr.dumpTexture (fmt_real_, checksum, *ppTexture);
tex_mgr.injector.endLoad ();
}
return hr;
}
bool
SK::D3D9::TextureManager::deleteDumpedTexture (D3DFORMAT fmt, uint32_t checksum)
{
wchar_t wszPath [MAX_PATH];
_swprintf ( wszPath, L"%s\\dump",
SK_D3D11_res_root.c_str () );
if (GetFileAttributesW (wszPath) != FILE_ATTRIBUTE_DIRECTORY)
CreateDirectoryW (wszPath, nullptr);
_swprintf ( wszPath, L"%s\\dump\\textures",
SK_D3D11_res_root.c_str () );
if (GetFileAttributesW (wszPath) != FILE_ATTRIBUTE_DIRECTORY)
CreateDirectoryW (wszPath, nullptr);
_swprintf ( wszPath, L"%s\\%s",
wszPath,
SK_D3D9_FormatToStr (fmt, false).c_str () );
if (GetFileAttributesW (wszPath) != FILE_ATTRIBUTE_DIRECTORY)
CreateDirectoryW (wszPath, nullptr);
wchar_t wszFileName [MAX_PATH] = { L'\0' };
_swprintf ( wszFileName, L"%s\\dump\\textures\\%s\\%08x%s",
SK_D3D11_res_root.c_str (),
SK_D3D9_FormatToStr (fmt, false).c_str (),
checksum,
L".dds" );
if (GetFileAttributesW (wszFileName) != INVALID_FILE_ATTRIBUTES)
{
if (DeleteFileW (wszFileName))
{
// TODO: Add critical section to guard this
dumped_textures.erase (checksum);
return true;
}
}
return false;
}
bool
SK::D3D9::TextureManager::isTextureDumped (uint32_t checksum)
{
return dumped_textures.contains (checksum);
}
HRESULT
SK::D3D9::TextureManager::dumpTexture (D3DFORMAT fmt, uint32_t checksum, IDirect3DTexture9* pTex)
{
if ( (! isTextureInjectable (checksum)) &&
(! isTextureDumped (checksum)) )
{
injector.lockDumping ();
D3DFORMAT fmt_real = fmt;
// bool compressed =
// (fmt_real >= D3DFMT_DXT1 && fmt_real <= D3DFMT_DXT5);
wchar_t wszPath [MAX_PATH];
_swprintf ( wszPath, L"%s\\dump",
SK_D3D11_res_root.c_str () );
if (GetFileAttributesW (wszPath) != FILE_ATTRIBUTE_DIRECTORY)
CreateDirectoryW (wszPath, nullptr);
_swprintf ( wszPath, L"%s\\dump\\textures",
SK_D3D11_res_root.c_str () );
if (GetFileAttributesW (wszPath) != FILE_ATTRIBUTE_DIRECTORY)
CreateDirectoryW (wszPath, nullptr);
_swprintf ( wszPath, L"%s\\%s",
wszPath,
SK_D3D9_FormatToStr (fmt_real, false).c_str () );
if (GetFileAttributesW (wszPath) != FILE_ATTRIBUTE_DIRECTORY)
CreateDirectoryW (wszPath, nullptr);
wchar_t wszFileName [MAX_PATH] = { L'\0' };
_swprintf ( wszFileName, L"%s\\dump\\textures\\%s\\%08x%s",
SK_D3D11_res_root.c_str (),
SK_D3D9_FormatToStr (fmt_real, false).c_str (),
checksum,
L".dds" );
injector.beginLoad ();
HRESULT hr =
D3DXSaveTextureToFile (wszFileName, D3DXIFF_DDS, pTex, nullptr);
injector.endLoad ();
if (SUCCEEDED (hr))
dumped_textures.emplace (checksum);
injector.unlockDumping ();
return hr;
}
return E_FAIL;
}
std::vector <ISKTextureD3D9 *> remove_textures;
SK::D3D9::Texture*
SK::D3D9::TextureManager::getTexture (uint32_t checksum)
{
if (checksum == 0x00)
{
std::vector <ISKTextureD3D9 *> unremove_textures;
EnterCriticalSection (&cs_free_list);
auto rem = remove_textures.begin ();
while (rem != remove_textures.end ())
{
if (! (*rem)->can_free)
{
unremove_textures.emplace_back (*rem);
++rem;
continue;
}
if ((*rem)->pTexOverride != nullptr)
{
InterlockedDecrement (&injected_count);
InterlockedAdd64 (&injected_size, -(*rem)->override_size);
}
if ((*rem)->pTex) (*rem)->pTex->Release ();
//if ((*rem)->pTexOverride) (*rem)->pTexOverride->Release ();
(*rem)->pTex = nullptr;
(*rem)->pTexOverride = nullptr;
InterlockedAdd64 (&basic_size, -(*rem)->tex_size);
{
textures.erase ((*rem)->tex_crc32c);
}
//delete *rem;
++rem;
}
remove_textures = unremove_textures;
LeaveCriticalSection (&cs_free_list);
}
EnterCriticalSection (&cs_cache);
auto tex = textures.find (checksum);
if (tex != textures.end ())
{
LeaveCriticalSection (&cs_cache);
return tex->second;
}
LeaveCriticalSection (&cs_cache);
return nullptr;
}
void
SK::D3D9::TextureManager::removeTexture (ISKTextureD3D9* pTexD3D9)
{
EnterCriticalSection (&cs_free_list);
remove_textures.emplace_back (pTexD3D9);
LeaveCriticalSection (&cs_free_list);
updateOSD ();
}
void
SK::D3D9::TextureManager::addTexture (uint32_t checksum, Texture* pTex, size_t size)
{
pTex->size = size;
InterlockedAdd64 (&basic_size, pTex->size);
EnterCriticalSection (&cs_cache);
{
if (textures.count (checksum))
{
if (textures [checksum] != pTex)
{
removeTexture (textures [checksum]->d3d9_tex);
}
}
textures [checksum] = pTex;
}
LeaveCriticalSection (&cs_cache);
updateOSD ();
}
void
SK::D3D9::TextureManager::refTexture (Texture* pTex)
{
refTextureEx (pTex, true);
}
void
SK::D3D9::TextureManager::refTextureEx (Texture* pTex, bool add_to_ref_count)
{
if (add_to_ref_count)
{
pTex->d3d9_tex->AddRef ();
pTex->refs++;
}
pTex->d3d9_tex->can_free = false;
InterlockedIncrement (&hits);
#if 0
if (true)
{//config.textures.log) {
tex_log.Log ( L"[CacheTrace] Cache hit (%X), saved %2.1f ms",
pTex->crc32c,
pTex->load_time );
}
#endif
InterlockedAdd64 ( &bytes_saved,
std::max ( pTex->size, static_cast <size_t> (
pTex->d3d9_tex->override_size )
)
);
time_saved += pTex->load_time;
updateOSD ();
}
COM_DECLSPEC_NOTHROW
HRESULT
STDMETHODCALLTYPE
D3D9SetRenderTarget_Detour (
_In_ IDirect3DDevice9 *This,
_In_ DWORD RenderTargetIndex,
_In_ IDirect3DSurface9 *pRenderTarget
)
{
static int draw_counter = 0;
// Ignore anything that's not the primary render device.
if (This != SK_GetCurrentRenderBackend ().device)
{
return D3D9SetRenderTarget_Original (This, RenderTargetIndex, pRenderTarget);
}
//if (tsf::RenderFix::tracer.log) {
#ifdef DUMP_RT
if (D3DXSaveSurfaceToFileW == nullptr) {
D3DXSaveSurfaceToFileW =
(D3DXSaveSurfaceToFile_pfn)
GetProcAddress ( tsf::RenderFix::d3dx9_43_dll,
"D3DXSaveSurfaceToFileW" );
}
wchar_t wszDumpName [MAX_PATH];
if (pRenderTarget != pOld) {
if (pOld != nullptr) {
wsprintf (wszDumpName, L"dump\\%03d_out_%p.png", draw_counter, pOld);
dll_log->Log ( L"[FrameTrace] >>> Dumped: Output RT to %s >>>", wszDumpName );
dumping = true;
//D3DXSaveSurfaceToFile (wszDumpName, D3DXIFF_PNG, pOld, nullptr, nullptr);
}
}
#endif
//dll_log->Log ( L"[FrameTrace] SetRenderTarget - RenderTargetIndex: %lu, pRenderTarget: %ph",
//RenderTargetIndex, pRenderTarget );
#ifdef DUMP_RT
if (pRenderTarget != pOld) {
pOld = pRenderTarget;
wsprintf (wszDumpName, L"dump\\%03d_in_%p.png", ++draw_counter, pRenderTarget);
dll_log->Log ( L"[FrameTrace] <<< Dumped: Input RT to %s <<<", wszDumpName );
dumping = true;
//D3DXSaveSurfaceToFile (wszDumpName, D3DXIFF_PNG, pRenderTarget, nullptr, nullptr);
}
#endif
//}
return D3D9SetRenderTarget_Original (This, RenderTargetIndex, pRenderTarget);
}
HMODULE d3dx9_43_dll;
void
SK::D3D9::TextureManager::Init (void)
{
textures.reserve (4096);
textures_used.reserve (2048);
textures_last_frame.reserve (1024);
//non_power_of_two_textures.reserve (512);
tracked_ps.used_textures.reserve (512);
tracked_vs.used_textures.reserve (512);
known.render_targets.reserve (128);
used.render_targets.reserve (64);
injector.textures_in_flight.reserve (32);
tracked_rt.pixel_shaders.reserve (32);
tracked_rt.vertex_shaders.reserve (32);
InitializeCriticalSectionAndSpinCount (&cs_cache, 10240UL);
InitializeCriticalSectionAndSpinCount (&cs_free_list, 6144UL);
InitializeCriticalSectionAndSpinCount (&cs_unreferenced, 16384UL);
InitializeCriticalSectionAndSpinCount (&osd_cs, 32UL);
InitializeCriticalSectionAndSpinCount (&injector.cs_tex_inject, 1000000);
InitializeCriticalSectionAndSpinCount (&injector.cs_tex_blacklist, 1000000);
InitializeCriticalSectionAndSpinCount (&injector.cs_tex_resample, 10000);
InitializeCriticalSectionAndSpinCount (&injector.cs_tex_stream, 10000);
InitializeCriticalSectionAndSpinCount (&injector.cs_tex_dump, 1000);
void WINAPI SK_D3D11_SetResourceRoot (const wchar_t* root);
SK_D3D11_SetResourceRoot (config.textures.d3d11.res_root.c_str ());
// Create the directory to store dumped textures
if (config.textures.d3d11.dump)
CreateDirectoryW (SK_D3D11_res_root.c_str (), nullptr);
tex_log.init (L"logs/textures.log", L"w+");
d3dx9_43_dll =
LoadLibraryW (L"D3DX9_43.DLL");
init = true;
refreshDataSources ();
if ( GetFileAttributesW ((SK_D3D11_res_root + L"\\dump\\textures").c_str ()) !=
INVALID_FILE_ATTRIBUTES )
{
WIN32_FIND_DATA fd;
WIN32_FIND_DATA fd_sub;
HANDLE hSubFind = INVALID_HANDLE_VALUE;
HANDLE hFind = INVALID_HANDLE_VALUE;
int files = 0;
LARGE_INTEGER liSize = { 0 };
tex_log.LogEx ( true, L"[ Dump Tex ] Enumerating dumped textures..." );
hFind =
FindFirstFileW ((SK_D3D11_res_root + L"\\dump\\textures\\*").c_str (), &fd);
if (hFind != INVALID_HANDLE_VALUE)
{
do
{
if (fd.dwFileAttributes != INVALID_FILE_ATTRIBUTES)
{
wchar_t wszSubDir [MAX_PATH] = { };
_swprintf (wszSubDir, L"%s\\dump\\textures\\%s\\*", SK_D3D11_res_root.c_str (), fd.cFileName);
hSubFind =
FindFirstFileW (wszSubDir, &fd_sub);
if (hSubFind != INVALID_HANDLE_VALUE)
{
do
{
if (wcsstr (_wcslwr (fd_sub.cFileName), L".dds"))
{
uint32_t checksum = 0x00;
swscanf (fd_sub.cFileName, L"%08x.dds", &checksum);
++files;
LARGE_INTEGER fsize;
fsize.HighPart = fd_sub.nFileSizeHigh;
fsize.LowPart = fd_sub.nFileSizeLow;
liSize.QuadPart += fsize.QuadPart;
dumped_textures.emplace (checksum);
}
} while (FindNextFileW (hSubFind, &fd_sub) != 0);
FindClose (hSubFind);
}
}
} while (FindNextFileW (hFind, &fd) != 0);
FindClose (hFind);
}
tex_log.LogEx ( false, L" %lu files (%3.1f MiB)\n",
files, (double)liSize.QuadPart / (1024.0 * 1024.0) );
}
InterlockedExchange64 (&bytes_saved, 0LL);
time_saved = 0.0f;
decomp_semaphore =
CreateSemaphore ( nullptr,
2,//config.textures.worker_threads,
2,//config.textures.worker_threads,
nullptr );
resample_pool = new TextureThreadPool ();
stream_pool.lrg_tex = new TextureThreadPool ();
stream_pool.sm_tex = new TextureThreadPool ();
SK_ICommandProcessor& command =
*SK_GetCommandProcessor ();
command.AddVariable (
"Textures.Remap",
SK_CreateVar (SK_IVariable::Boolean, &__remap_textures) );
command.AddVariable (
"Textures.Purge",
SK_CreateVar (SK_IVariable::Boolean, &__need_purge) );
command.AddVariable (
"Textures.Trace",
SK_CreateVar (SK_IVariable::Boolean, &__log_used) );
command.AddVariable (
"Textures.ShowCache",
SK_CreateVar (SK_IVariable::Boolean, &__show_cache) );
command.AddVariable (
"Textures.MaxCacheSize",
SK_CreateVar (SK_IVariable::Int, &d3d9_max_cache_in_mib) );//&config.textures.max_cache_in_mib) );
}
#include <utility.h>
extern
COM_DECLSPEC_NOTHROW
HRESULT
STDMETHODCALLTYPE
D3D9SetTexture_Override (
_In_ IDirect3DDevice9 *This,
_In_ DWORD Sampler,
_In_ IDirect3DBaseTexture9 *pTexture );
extern
COM_DECLSPEC_NOTHROW
HRESULT
STDMETHODCALLTYPE
D3D9SetRenderTarget_Override (
_In_ IDirect3DDevice9 *This,
_In_ DWORD RenderTargetIndex,
_In_ IDirect3DSurface9 *pRenderTarget );
extern
COM_DECLSPEC_NOTHROW
HRESULT
STDMETHODCALLTYPE
D3D9CreateTexture_Override (IDirect3DDevice9 *This,
UINT Width,
UINT Height,
UINT Levels,
DWORD Usage,
D3DFORMAT Format,
D3DPOOL Pool,
IDirect3DTexture9 **ppTexture,
HANDLE *pSharedHandle);
extern
COM_DECLSPEC_NOTHROW
HRESULT
STDMETHODCALLTYPE
D3D9CreateDepthStencilSurface_Override (IDirect3DDevice9 *This,
UINT Width,
UINT Height,
D3DFORMAT Format,
D3DMULTISAMPLE_TYPE MultiSample,
DWORD MultisampleQuality,
BOOL Discard,
IDirect3DSurface9 **ppSurface,
HANDLE *pSharedHandle);
extern
COM_DECLSPEC_NOTHROW
HRESULT
STDMETHODCALLTYPE
D3D9CreateRenderTarget_Override (IDirect3DDevice9 *This,
UINT Width,
UINT Height,
D3DFORMAT Format,
D3DMULTISAMPLE_TYPE MultiSample,
DWORD MultisampleQuality,
BOOL Lockable,
IDirect3DSurface9 **ppSurface,
HANDLE *pSharedHandle);
extern
COM_DECLSPEC_NOTHROW
HRESULT
STDMETHODCALLTYPE
D3D9SetDepthStencilSurface_Override (
_In_ IDirect3DDevice9 *This,
_In_ IDirect3DSurface9 *pNewZStencil
);
void
SK::D3D9::TextureManager::Hook (void)
{
//SK_CreateDLLHook2 ( SK_GetModuleFullName (SK_GetDLL ()).c_str (),
// "D3D9BeginScene_Override",
// D3D9BeginScene_Detour,
// static_cast_p2p <void> (&D3D9BeginScene_Original) );
//
//SK_CreateDLLHook2 ( SK_GetModuleFullName (SK_GetDLL ()).c_str (),
// "D3D9StretchRect_Override",
// D3D9StretchRect_Detour,
// static_cast_p2p <void> (&D3D9StretchRect_Original) );
//
SK_CreateFuncHook ( L"D3D9CreateRenderTarget_Override",
&D3D9CreateRenderTarget_Override,
&D3D9CreateRenderTarget_Detour,
static_cast_p2p <void> (&D3D9CreateRenderTarget_Original) );
MH_QueueEnableHook ( D3D9CreateRenderTarget_Override );
SK_CreateFuncHook ( L"D3D9CreateDepthStencilSurface_Override",
&D3D9CreateDepthStencilSurface_Override,
&D3D9CreateDepthStencilSurface_Detour,
static_cast_p2p <void> (&D3D9CreateDepthStencilSurface_Original) );
MH_QueueEnableHook ( D3D9CreateDepthStencilSurface_Override );
SK_CreateFuncHook ( L"D3D9CreateTexture_Override",
&D3D9CreateTexture_Override,
&D3D9CreateTexture_Detour,
static_cast_p2p <void> (&D3D9CreateTexture_Original) );
MH_QueueEnableHook ( D3D9CreateTexture_Override );
SK_CreateFuncHook ( L"D3D9SetTexture_Override",
&D3D9SetTexture_Override,
&D3D9SetTexture_Detour,
static_cast_p2p <void> (&D3D9SetTexture_Original) );
MH_QueueEnableHook ( D3D9SetTexture_Override );
SK_CreateFuncHook ( L"D3D9SetRenderTarget_Override",
&D3D9SetRenderTarget_Override,
&D3D9SetRenderTarget_Detour,
static_cast_p2p <void> (&D3D9SetRenderTarget_Original) );
MH_QueueEnableHook ( D3D9SetRenderTarget_Override );
//SK_CreateFuncHook ( L"D3D9SetDepthStencilSurface_Override",
// &D3D9SetDepthStencilSurface_Override,
// &D3D9SetDepthStencilSurface_Detour,
// &D3D9SetDepthStencilSurface);
//MH_QueueEnableHook ( D3D9SetDepthStencilSurface_Detour);
SK_CreateDLLHook2 ( L"D3DX9_43.DLL",
"D3DXCreateTextureFromFileInMemoryEx",
D3DXCreateTextureFromFileInMemoryEx_Detour,
static_cast_p2p <void> (&D3DXCreateTextureFromFileInMemoryEx_Original) );
SK_ApplyQueuedHooks ();
}
// Skip the purge step on shutdown
bool shutting_down = false;
void
SK::D3D9::TextureManager::Shutdown (void)
{
// 16.6 ms per-frame (60 FPS)
const float frame_time = 16.6f;
while (! injector.textures_to_stream.empty ())
injector.textures_to_stream.pop ();
shutting_down = true;
tex_mgr.reset ();
DeleteCriticalSection (&injector.cs_tex_stream);
DeleteCriticalSection (&injector.cs_tex_resample);
DeleteCriticalSection (&injector.cs_tex_inject);
DeleteCriticalSection (&injector.cs_tex_blacklist);
DeleteCriticalSection (&cs_cache);
DeleteCriticalSection (&osd_cs);
CloseHandle (decomp_semaphore);
tex_log.Log ( L"[Perf Stats] At shutdown: %7.2f seconds (%7.2f frames)"
L" saved by cache",
time_saved / 1000.0f,
time_saved / frame_time );
tex_log.close ();
while (! screenshots_to_delete.empty ())
{
std::wstring file_to_delete = screenshots_to_delete.front ();
screenshots_to_delete.pop ();
DeleteFileW (file_to_delete.c_str ());
}
FreeLibrary (d3dx9_43_dll);
}
void
SK::D3D9::TextureManager::purge (void)
{
if (shutting_down)
return;
int released = 0;
int released_injected = 0;
int64_t reclaimed = 0;
int64_t reclaimed_injected = 0;
tex_log.Log (L"[ Tex. Mgr ] -- TextureManager::purge (...) -- ");
// Purge any pending removes
getTexture (0);
tex_log.Log ( L"[ Tex. Mgr ] *** Current Cache Size: %6.2f MiB "
L"(User Limit: %6.2f MiB)",
(double)cacheSizeTotal () / (1024.0 * 1024.0),
(double)d3d9_max_cache_in_mib /*config.textures.max_cache_in_mib*/ );
tex_log.Log (L"[ Tex. Mgr ] Releasing textures...");
EnterCriticalSection (&cs_unreferenced);
EnterCriticalSection (&cs_cache);
std::vector <Texture *> unreferenced_textures;
for ( auto& it : textures )
{
if (it.second->d3d9_tex->can_free)
{
unreferenced_textures.emplace_back (it.second);
}
}
LeaveCriticalSection (&cs_cache);
std::sort ( unreferenced_textures.begin (),
unreferenced_textures.end (),
[]( Texture *a,
Texture *b )
{
return a->d3d9_tex->last_used.QuadPart <
b->d3d9_tex->last_used.QuadPart;
}
);
auto free_it =
unreferenced_textures.begin ();
// We need to over-free, or we will likely be purging every other texture load
int64_t target_size =
std::max (128, d3d9_max_cache_in_mib - 64) * 1024LL * 1024LL;
int64_t start_size =
cacheSizeTotal ();
while ( start_size - reclaimed > target_size &&
free_it != unreferenced_textures.end () )
{
int tex_refs = -1;
ISKTextureD3D9* pSKTex = (*free_it)->d3d9_tex;
//
// Skip loads that are in-flight so that we do not hitch
//
if (injector.isStreaming ((*free_it)->crc32c))
{
++free_it;
continue;
}
//
// Do not evict blocking loads, they are generally small and
// will cause performance problems if we have to reload them
// again later.
//
if (pSKTex->must_block)
{
++free_it;
continue;
}
int64_t ovr_size = 0;
int64_t base_size = 0;
++free_it;
base_size = pSKTex->tex_size;
ovr_size = pSKTex->override_size;
tex_refs = pSKTex->Release ();
if (tex_refs == 0)
{
if (ovr_size != 0)
{
reclaimed += ovr_size;
released_injected++;
reclaimed_injected += ovr_size;
}
}
else
{
tex_log.Log (L"[ Tex. Mgr ] Invalid reference count (%lu)!", tex_refs);
}
++released;
reclaimed += base_size;
}
LeaveCriticalSection (&cs_unreferenced);
tex_log.Log ( L"[ Tex. Mgr ] %4d textures (%4zu remain)",
released,
textures.size () );
tex_log.Log ( L"[ Tex. Mgr ] >> Reclaimed %6.2f MiB of memory (%6.2f MiB from %lu inject)",
(double)reclaimed / (1024.0 * 1024.0),
(double)reclaimed_injected / (1024.0 * 1024.0),
released_injected );
updateOSD ();
tex_log.Log (L"[ Tex. Mgr ] ----------- Finished ------------ ");
}
void
SK::D3D9::TextureManager::reset (void)
{
if (! init)
return;
if (! outstanding_screenshots.empty ())
{
tex_log.LogEx (true, L"[Screenshot] A queued screenshot has not finished, delaying device reset...");
while (! outstanding_screenshots.empty ())
;
tex_log.LogEx (false, L"done!\n");
}
int iters = 0;
while ((! injector.textures_in_flight.empty ()) || injector.hasPendingLoads ())
{
loadQueuedTextures ();
SleepEx (16, TRUE);
getTexture (0);
if (++iters > 10)
break;
}
known.render_targets.clear ();
used.render_targets.clear ();
//int underflows = 0;
int ext_refs = 0;
int ext_textures = 0;
int release_count = 0;
int unreleased_count = 0;
int ref_count = 0;
int released_injected = 0;
int64_t reclaimed = 0;
int64_t reclaimed_injected = 0;
tex_log.Log (L"[ Tex. Mgr ] -- TextureManager::reset (...) -- ");
int64_t original_cache_size = cacheSizeTotal ();
tex_log.Log (L"[ Tex. Mgr ] Releasing textures...");
for (auto& it : injected_textures)
{
it.second->Release ();
reclaimed += injected_sizes [it.first];
released_injected++;
reclaimed_injected += injected_sizes [it.first];
injected_size -= injected_sizes [it.first];
}
injected_textures.clear ();
injected_load_times.clear ();
injected_sizes.clear ();
InterlockedExchange64 (&injected_size, 0);
InterlockedExchange (&injected_count, 0);
for ( auto& it : textures )
{
ISKTextureD3D9* pSKTex =
it.second->d3d9_tex;
bool can_free = false;
int64_t base_size = 0;
int64_t ovr_size = 0;
int tex_refs =
pSKTex->Release ();
if (pSKTex->can_free)
{
can_free = true;
base_size = pSKTex->tex_size;
ovr_size = pSKTex->override_size;
}
else
{
ext_refs += pSKTex->refs;
ext_textures ++;
++unreleased_count;
continue;
}
if (tex_refs == 0 || pSKTex->can_free)
{
++release_count;
reclaimed += base_size;
ref_count += 1;
}
else
{
++unreleased_count;
ext_refs += tex_refs;
ext_textures ++;
}
}
getTexture (0);
// Commit this immediately, such that D3D9 Reset will not fail in
// fullscreen mode...
loadQueuedTextures ();
tex_log.Log ( L"[ Tex. Mgr ] %4d textures (%4d references)",
release_count + unreleased_count,
ref_count + ext_refs );
if (ext_refs > 0)
{
tex_log.Log ( L"[ Tex. Mgr ] >> WARNING: The game is still holding references (%d) to %d textures !!!",
ext_refs, ext_textures );
}
if ((int32_t)cacheSizeBasic () < 0)
InterlockedExchange64 (&basic_size, 0);
tex_log.Log ( L"[ Mem. Mgr ] === Memory Management Summary ===");
tex_log.Log ( L"[ Mem. Mgr ] %12.2f MiB Freed",
(double)std::max (0LL, reclaimed) / (1048576.0) );
tex_log.Log ( L"[ Mem. Mgr ] %12.2f MiB Leaked",
(double)(original_cache_size - std::max (0LL, reclaimed))
/ (1048576.0) );
updateOSD ();
textures_used.clear ();
textures_last_frame.clear ();
tex_log.Log (L"[ Tex. Mgr ] ----------- Finished ------------ ");
}
void
SK::D3D9::TextureManager::updateOSD (void)
{
return;
if (! init)
return;
double cache_basic = (double)cacheSizeBasic () / (1048576.0f);
double cache_injected = (double)cacheSizeInjected () / (1048576.0f);
double cache_total = cache_basic + cache_injected;
osd_stats = "";
char szFormatted [64];
sprintf ( szFormatted, "%6zu Total Textures : %8.2f MiB",
numTextures () + numInjectedTextures (),
cache_total );
osd_stats += szFormatted;
CComPtr <IDirect3DDevice9> pDevice = nullptr;
SK_RenderBackend& rb =
SK_GetCurrentRenderBackend ();
if ( rb.device != nullptr &&
SUCCEEDED (rb.device->QueryInterface <IDirect3DDevice9> (&pDevice)) &&
pDevice->GetAvailableTextureMem () / 1048576UL != 4095 )
{
sprintf ( szFormatted, " (%4lu MiB Available)\n",
pDevice->GetAvailableTextureMem () / 1048576UL );
}
else
sprintf (szFormatted, "\n");
osd_stats += szFormatted;
sprintf ( szFormatted, "%6zu Base Textures : %8.2f MiB %s\n",
numTextures (),
cache_basic,
__remap_textures ? "" : "<----" );
osd_stats += szFormatted;
sprintf ( szFormatted, "%6lu New Textures : %8.2f MiB %s\n",
numInjectedTextures (),
cache_injected,
__remap_textures ? "<----" : "" );
osd_stats += szFormatted;
sprintf ( szFormatted, "%6lu Cache Hits : %8.2f Seconds Saved",
hits,
time_saved / 1000.0f );
osd_stats += szFormatted;
if (debug_tex_id != 0x00)
{
osd_stats += "\n\n";
sprintf ( szFormatted, " Debug Texture : %08x",
debug_tex_id );
osd_stats += szFormatted;
}
}
std::vector <uint32_t> textures_used_last_dump;
int32_t tex_dbg_idx = 0L;
void
SK::D3D9::TextureManager::logUsedTextures (void)
{
if (! init)
return;
if (__log_used)
{
textures_used_last_dump.clear ();
tex_dbg_idx = 0;
tex_log.Log (L"[ Tex. Log ] ---------- FrameTrace ----------- ");
for (const uint32_t it : textures_used)
{
auto tex_record =
getTexture (it);
// Handle the RARE case where a purge happens immediately following
// the last frame
if ( tex_record != nullptr &&
tex_record->d3d9_tex != nullptr )
{
auto* pSKTex =
static_cast <ISKTextureD3D9 *> (tex_record->d3d9_tex);
textures_used_last_dump.emplace_back (it);
tex_log.Log ( L"[ Tex. Log ] %08x.dds { Base: %6.2f MiB, "
L"Inject: %6.2f MiB, Load Time: %8.3f ms }",
it,
(double)pSKTex->tex_size /
(1024.0 * 1024.0),
pSKTex->override_size != 0 ?
(double)pSKTex->override_size /
(1024.0 * 1024.0) : 0.0,
getTexture (it)->load_time );
}
}
tex_log.Log (L"[ Tex. Log ] ---------- FrameTrace ----------- ");
__log_used = false;
}
textures_used.clear ();
}
volatile LONG TextureWorkerThread::num_threads_init = 0UL;
HRESULT
WINAPI
ResampleTexture (TexLoadRequest* load)
{
QueryPerformanceCounter (&load->start);
D3DXIMAGE_INFO img_info = { };
D3DXGetImageInfoFromFileInMemory (
load->pSrcData,
load->SrcDataSize,
&img_info );
HRESULT hr = E_FAIL;
if (img_info.Depth == 1)
{
hr =
D3DXCreateTextureFromFileInMemoryEx_Original (
load->pDevice,
load->pSrcData, load->SrcDataSize,
img_info.Width, img_info.Height, 0,
0, false/*config.textures.uncompressed*/ ? D3DFMT_A8R8G8B8 : img_info.Format,
D3DPOOL_DEFAULT,
D3DX_FILTER_TRIANGLE | D3DX_FILTER_DITHER,
D3DX_FILTER_BOX | D3DX_FILTER_DITHER,
0,
nullptr, nullptr,
&load->pSrc );
}
else
{
tex_log.Log (L"[ Tex. Mgr ] Will not resample cubemap...");
}
delete [] load->pSrcData;
return hr;
}
unsigned int
__stdcall
SK::D3D9::TextureWorkerThread::ThreadProc (LPVOID user)
{
{
if (! streaming_memory::data_len ())
{
streaming_memory::data_len () = 0;
streaming_memory::data () = nullptr;
streaming_memory::data_age () = 0;
}
}
SYSTEM_INFO sysinfo = { };
GetSystemInfo (&sysinfo);
ULONG thread_num =
InterlockedIncrement (&num_threads_init);
// If a system has more than 4 CPUs (logical or otherwise), let the last one
// be dedicated to rendering.
ULONG processor_num = thread_num % ( sysinfo.dwNumberOfProcessors > 4 ?
sysinfo.dwNumberOfProcessors - 1 :
sysinfo.dwNumberOfProcessors );
// Tales of Symphonia and Zestiria both pin the render thread to the last
// CPU... let's try to keep our worker threads OFF that CPU.
SetThreadIdealProcessor (GetCurrentThread (), processor_num);
SetThreadAffinityMask (GetCurrentThread (), (1UL << processor_num) & 0xFFFFFFFF);
auto* pThread =
static_cast <TextureWorkerThread *> (user);
DWORD dwWaitStatus = 0;
struct {
const DWORD job_start = WAIT_OBJECT_0;
const DWORD mem_trim = WAIT_OBJECT_0 + 1;
const DWORD thread_end = WAIT_OBJECT_0 + 2;
} wait;
do
{
dwWaitStatus =
WaitForMultipleObjects ( 3,
pThread->control_.ops,
FALSE,
INFINITE );
// New Work Ready
if (dwWaitStatus == wait.job_start)
{
TexLoadRequest* pStream = pThread->job_;
tex_mgr.injector.beginLoad ();
{
if (pStream->type == TexLoadRequest::Resample)
{
InterlockedIncrement (&tex_mgr.injector.resampling);
QueryPerformanceCounter (&pStream->start);
HRESULT hr =
ResampleTexture (pStream);
QueryPerformanceCounter (&pStream->end);
InterlockedDecrement (&tex_mgr.injector.resampling);
if (SUCCEEDED (hr))
pThread->pool_->postFinished (pStream);
else
{
tex_log.Log ( L"[ Tex. Mgr ] Texture Resample Failure (hr=%x) for texture %x, blacklisting from future resamples...",
hr, pStream->checksum );
resample_blacklist.emplace (pStream->checksum);
pStream->pDest->Release ();
pStream->pSrc = pStream->pDest;
((ISKTextureD3D9 *)pStream->pSrc)->must_block = false;
((ISKTextureD3D9 *)pStream->pSrc)->refs--;
tex_mgr.injector.finishedStreaming (pStream->checksum);
}
pThread->finishJob ();
}
else
{
InterlockedIncrement (&tex_mgr.injector.streaming);
InterlockedExchangeAdd (&tex_mgr.injector.streaming_bytes, pStream->SrcDataSize);
QueryPerformanceCounter (&pStream->start);
HRESULT hr = S_OK;
hr =
tex_mgr.injectTexture (pStream);
QueryPerformanceCounter (&pStream->end);
InterlockedExchangeSubtract (&tex_mgr.injector.streaming_bytes, pStream->SrcDataSize);
InterlockedDecrement (&tex_mgr.injector.streaming);
if (SUCCEEDED (hr))
pThread->pool_->postFinished (pStream);
else
{
tex_log.Log ( L"[ Tex. Mgr ] Texture Injection Failure (hr=%x) for texture %x, removing from injectable list...",
pStream->checksum);
if (tex_mgr.isTextureInjectable (pStream->checksum))
tex_mgr.removeInjectableTexture (pStream->checksum);
pStream->pSrc->Release ();
pStream->pSrc = pStream->pDest;
((ISKTextureD3D9 *)pStream->pSrc)->must_block = false;
((ISKTextureD3D9 *)pStream->pSrc)->refs--;
tex_mgr.injector.finishedStreaming (pStream->checksum);
}
pThread->finishJob ();
}
}
tex_mgr.injector.endLoad ();
}
else if (dwWaitStatus == (wait.mem_trim))
{
// Yay for magic numbers :P ==> (8 MiB Min Size, 5 Seconds Between Trims)
//
const size_t MIN_SIZE = 8192 * 1024;
const uint32_t MIN_AGE = 5000UL;
size_t before = streaming_memory::data_len ();
streaming_memory::trim ( MIN_SIZE,
timeGetTime () - MIN_AGE );
size_t now = streaming_memory::data_len ();
if (before != now)
{
#ifdef _WIN64
tex_log.Log ( L"[ Mem. Mgr ] Trimmed %9lzu bytes of temporary memory for tid=%x",
before - now,
GetCurrentThreadId () );
#else
tex_log.Log ( L"[ Mem. Mgr ] Trimmed %9zu bytes of temporary memory for tid=%x",
before - now,
GetCurrentThreadId () );
#endif
}
}
else if (dwWaitStatus != (wait.thread_end))
{
dll_log.Log ( L"[ Tex. Mgr ] Unexpected Worker Thread Wait Status: %X",
dwWaitStatus );
}
} while (dwWaitStatus != (wait.thread_end));
streaming_memory::trim (0, timeGetTime ());
//CloseHandle (GetCurrentThread ());
return 0;
}
unsigned int
__stdcall
SK::D3D9::TextureThreadPool::Spooler (LPVOID user)
{
auto* pPool =
static_cast <TextureThreadPool *> (user);
WaitForSingleObject (pPool->events_.jobs_added, INFINITE);
while (WaitForSingleObject (pPool->events_.shutdown, 0) == WAIT_TIMEOUT)
{
TexLoadRequest* pJob =
pPool->getNextJob ();
while (pJob != nullptr)
{
bool started = false;
for ( auto it : pPool->workers_ )
{
if (! it->isBusy ())
{
if (! started)
{
it->startJob (pJob);
started = true;
}
else
{
it->trim ();
}
}
}
// All worker threads are busy, so wait...
if (! started)
{
WaitForSingleObject (pPool->events_.results_waiting, INFINITE);
}
else
{
pJob =
pPool->getNextJob ();
}
}
const int MAX_TIME_BETWEEN_TRIMS = 1500UL;
while ( WaitForSingleObject (
pPool->events_.jobs_added,
MAX_TIME_BETWEEN_TRIMS ) ==
WAIT_TIMEOUT )
{
for ( auto it : pPool->workers_ )
{
if (! it->isBusy ())
{
it->trim ();
}
}
}
}
//CloseHandle (GetCurrentThread ());
return 0;
}
void
SK::D3D9::TextureWorkerThread::finishJob (void)
{
InterlockedExchangeAdd64 (&bytes_loaded_,
((TexLoadRequest *)InterlockedCompareExchangePointer ((PVOID *)&job_, nullptr, nullptr))
->SrcDataSize);
InterlockedIncrement (&jobs_retired_);
InterlockedExchangePointer ((PVOID *)&job_, nullptr);
}
void
SK::D3D9::TextureManager::refreshDataSources (void)
{
if (! init)
return;
static bool crc_init = false;
if (! crc_init)
{
CrcGenerateTable ();
crc_init = true;
}
CFileInStream arc_stream = { };
CLookToRead look_stream = { };
FileInStream_CreateVTable (&arc_stream);
LookToRead_CreateVTable (&look_stream, false);
look_stream.realStream = &arc_stream.s;
LookToRead_Init (&look_stream);
injectable_textures.clear ();
archives.clear ();
injector.lockInjection ();
//
// Walk injectable textures so we don't have to query the filesystem on every
// texture load to check if a injectable one exists.
//
if ( GetFileAttributesW ((SK_D3D11_res_root + L"\\inject").c_str ()) !=
INVALID_FILE_ATTRIBUTES )
{
WIN32_FIND_DATA fd;
HANDLE hFind = INVALID_HANDLE_VALUE;
int files = 0;
LARGE_INTEGER liSize = { 0 };
tex_log.LogEx ( true, L"[Inject Tex] Enumerating injectable textures..." );
hFind =
FindFirstFileW ((SK_D3D11_res_root + L"\\inject\\textures\\blocking\\*").c_str (), &fd);
if (hFind != INVALID_HANDLE_VALUE)
{
do
{
if (fd.dwFileAttributes != INVALID_FILE_ATTRIBUTES)
{
if (wcsstr (_wcslwr (fd.cFileName), L".dds"))
{
uint32_t checksum;
swscanf (fd.cFileName, L"%x.dds", &checksum);
// Already got this texture...
if (injectable_textures.count (checksum))
continue;
++files;
LARGE_INTEGER fsize;
fsize.HighPart = fd.nFileSizeHigh;
fsize.LowPart = fd.nFileSizeLow;
liSize.QuadPart += fsize.QuadPart;
TexRecord rec = { };
rec.size = liSize.LowPart;
rec.archive = std::numeric_limits <unsigned int>::max ();
rec.method = Blocking;
injectable_textures.emplace (std::make_pair (checksum, rec));
}
}
} while (FindNextFileW (hFind, &fd) != 0);
FindClose (hFind);
}
hFind =
FindFirstFileW ((SK_D3D11_res_root + L"\\inject\\textures\\streaming\\*").c_str (), &fd);
if (hFind != INVALID_HANDLE_VALUE)
{
do
{
if (fd.dwFileAttributes != INVALID_FILE_ATTRIBUTES)
{
if (wcsstr (_wcslwr (fd.cFileName), L".dds"))
{
uint32_t checksum;
swscanf (fd.cFileName, L"%x.dds", &checksum);
// Already got this texture...
if (injectable_textures.count (checksum))
continue;
++files;
LARGE_INTEGER fsize;
fsize.HighPart = fd.nFileSizeHigh;
fsize.LowPart = fd.nFileSizeLow;
liSize.QuadPart += fsize.QuadPart;
TexRecord rec = { };
rec.size = fsize.LowPart;
rec.archive = std::numeric_limits <unsigned int>::max ();
rec.method = Streaming;
injectable_textures.emplace (std::make_pair (checksum, rec));
}
}
} while (FindNextFileW (hFind, &fd) != 0);
FindClose (hFind);
}
hFind =
FindFirstFileW ((SK_D3D11_res_root + L"\\inject\\textures\\*").c_str (), &fd);
if (hFind != INVALID_HANDLE_VALUE)
{
do
{
if (fd.dwFileAttributes != INVALID_FILE_ATTRIBUTES)
{
if (wcsstr (_wcslwr (fd.cFileName), L".dds"))
{
uint32_t checksum;
swscanf (fd.cFileName, L"%x.dds", &checksum);
// Already got this texture...
if (injectable_textures.count (checksum))
continue;
++files;
LARGE_INTEGER fsize;
fsize.HighPart = fd.nFileSizeHigh;
fsize.LowPart = fd.nFileSizeLow;
liSize.QuadPart += fsize.QuadPart;
TexRecord rec = { };
rec.size = fsize.LowPart;
rec.archive = std::numeric_limits <unsigned int>::max ();
rec.method = DontCare;
if (! injectable_textures.count (checksum))
injectable_textures.emplace (std::make_pair (checksum, rec));
}
}
} while (FindNextFileW (hFind, &fd) != 0);
FindClose (hFind);
}
hFind =
FindFirstFileW ((SK_D3D11_res_root + L"\\inject\\*.*").c_str (), &fd);
if (hFind != INVALID_HANDLE_VALUE)
{
int archive = 0;
do
{
if (fd.dwFileAttributes != INVALID_FILE_ATTRIBUTES)
{
wchar_t* wszArchiveNameLwr =
_wcslwr (_wcsdup (fd.cFileName));
if ( wcsstr (wszArchiveNameLwr, L".7z") )
{
int tex_count = 0;
CSzArEx arc = { };
ISzAlloc thread_alloc;
ISzAlloc thread_tmp_alloc;
thread_alloc.Alloc = SzAlloc;
thread_alloc.Free = SzFree;
thread_tmp_alloc.Alloc = SzAllocTemp;
thread_tmp_alloc.Free = SzFreeTemp;
wchar_t wszQualifiedArchiveName [MAX_PATH] = { };
_swprintf ( wszQualifiedArchiveName,
L"%s\\inject\\%s",
SK_D3D11_res_root.c_str (),
fd.cFileName );
if (InFile_OpenW (&arc_stream.file, wszQualifiedArchiveName))
{
tex_log.Log ( L"[Inject Tex] ** Cannot open archive file: %s",
wszQualifiedArchiveName );
continue;
}
SzArEx_Init (&arc);
if ( SzArEx_Open ( &arc,
&look_stream.s,
&thread_alloc,
&thread_tmp_alloc ) == SZ_OK )
{
uint32_t i;
wchar_t wszEntry [MAX_PATH];
for (i = 0; i < arc.NumFiles; i++)
{
if (SzArEx_IsDir (&arc, i))
continue;
SzArEx_GetFileNameUtf16 (&arc, i, (UInt16 *)wszEntry);
// Truncate to 32-bits --> there's no way in hell a texture will ever be >= 2 GiB
UInt64 fileSize = SzArEx_GetFileSize (&arc, i);
wchar_t* wszFullName =
_wcslwr (_wcsdup (wszEntry));
if ( wcsstr ( wszFullName, L".dds") )
{
TexLoadMethod method = DontCare;
uint32_t checksum = 0x00;
wchar_t* wszUnqualifiedEntry =
wszFullName + wcslen (wszFullName);
// Strip the path
while ( wszUnqualifiedEntry >= wszFullName &&
*wszUnqualifiedEntry != L'/')
wszUnqualifiedEntry = CharPrevW (wszFullName, wszUnqualifiedEntry);
if (*wszUnqualifiedEntry == L'/')
wszUnqualifiedEntry = CharNextW (wszUnqualifiedEntry);
swscanf (wszUnqualifiedEntry, L"%x.dds", &checksum);
// Already got this texture...
if ( isTextureInjectable (checksum) ||
isTextureBlacklisted (checksum) )
{
free (wszFullName);
continue;
}
if (wcsstr (wszFullName, L"streaming"))
method = Streaming;
else if (wcsstr (wszFullName, L"blocking"))
method = Blocking;
TexRecord rec = { };
rec.size = (uint32_t)fileSize;
rec.archive = archive;
rec.fileno = i;
rec.method = method;
injectable_textures.emplace (std::make_pair (checksum, rec));
++tex_count;
++files;
liSize.QuadPart += rec.size;
}
free (wszFullName);
}
if (tex_count > 0)
{
++archive;
archives.emplace_back (wszQualifiedArchiveName);
}
}
SzArEx_Free (&arc, &thread_alloc);
File_Close (&arc_stream.file);
}
free (wszArchiveNameLwr);
}
} while (FindNextFileW (hFind, &fd) != 0);
FindClose (hFind);
}
tex_log.LogEx ( false, L" %lu files (%3.1f MiB)\n",
files, (double)liSize.QuadPart / (1024.0 * 1024.0) );
}
injector.unlockInjection ();
File_Close (&arc_stream.file);
}
bool
SK::D3D9::TextureManager::TextureManager::reloadTexture (uint32_t checksum)
{
if (! init)
return false;
if ( ! isTextureInjectable (checksum) )
return false;
injector.lockStreaming ();
Texture* pCacheTex =
getTexture (checksum);
ISKTextureD3D9* pTex =
pCacheTex ? pCacheTex->d3d9_tex :
nullptr;
if (pTex != nullptr && pTex->pTexOverride != nullptr)
{
tex_log.LogEx ( true, L"[Inject Tex] Reloading texture for checksum (%08x)... ",
checksum );
InterlockedDecrement (&injected_count);
InterlockedAdd64 (&injected_size, -pTex->override_size);
pTex->pTexOverride->Release ();
pTex->pTexOverride = nullptr;
}
else
{
injector.unlockStreaming ();
return false;
}
TexRecord record =
getInjectableTexture (checksum);
if (record.method == DontCare)
record.method = Streaming;
TexLoadRequest* load_op = nullptr;
wchar_t wszInjectFileName [MAX_PATH] = { L'\0' };
bool remap_stream =
injector.isStreaming (checksum);
// If -1, load from disk...
if (record.archive == std::numeric_limits <unsigned int>::max ())
{
if (record.method == Streaming)
{
_swprintf ( wszInjectFileName, L"%s\\inject\\textures\\streaming\\%08x%s",
SK_D3D11_res_root.c_str (),
checksum,
L".dds" );
}
else if (record.method == Blocking)
{
_swprintf ( wszInjectFileName, L"%s\\inject\\textures\\blocking\\%08x%s",
SK_D3D11_res_root.c_str (),
checksum,
L".dds" );
}
}
load_op = new TexLoadRequest ();
SK_GetCurrentRenderBackend ().device->QueryInterface <IDirect3DDevice9> (&load_op->pDevice);
load_op->checksum = checksum;
load_op->type = TexLoadRequest::Stream;
wcscpy (load_op->wszFilename, wszInjectFileName);
if (load_op->type == TexLoadRequest::Stream)
{
if ((! remap_stream))
tex_log.LogEx ( false, L"streaming\n" );
else
tex_log.LogEx ( false, L"in-flight already\n" );
}
load_op->SrcDataSize =
isTextureInjectable (checksum) ?
(UINT)injectable_textures [checksum].size : 0;
load_op->pDest = pTex;
pTex->must_block = false;
if (injector.isStreaming (load_op->checksum))
{
//ISKTextureD3D9* pTexOrig =
// (ISKTextureD3D9 *)injector.getTextureInFlight (load_op->checksum)->pDest;
// Remap the output of the in-flight texture
injector.getTextureInFlight (load_op->checksum)->pDest =
pTex;
Texture* pTexCache = getTexture (load_op->checksum);
if (pTexCache != nullptr)
{
for ( int i = 0;
i < pTexCache->refs;
++i )
{
pTex->AddRef ();
}
}
}
else
{
injector.addTextureInFlight (load_op);
load_op->pDest->AddRef ( );
stream_pool.postJob (load_op);
}
injector.unlockStreaming ();
if (injector.hasPendingLoads ())
loadQueuedTextures ();
return true;
}
void
SK::D3D9::TextureManager::getThreadStats (std::vector <TexThreadStats>& stats)
{
stats =
resample_pool->getWorkerStats ();
// For Inject (Small, Large) -> Push Back
}
void
SK::D3D9::TextureThreadPool::postJob (TexLoadRequest* job)
{
EnterCriticalSection (&cs_jobs);
{
// Defer the creation of this until the first job is posted
if (! spool_thread_)
{
spool_thread_ =
(HANDLE)_beginthreadex ( nullptr,
0,
Spooler,
this,
0x00,
nullptr );
}
// Don't let the game free this while we are working on it...
job->pDest->AddRef ();
jobs_.push (job);
LeaveCriticalSection (&cs_jobs);
SetEvent (events_.jobs_added);
}
}
IDirect3DTexture9*
ISKTextureD3D9::getDrawTexture (void) const
{
IDirect3DTexture9* pTexToUse = nullptr;
switch (img_to_use)
{
case ContentPreference::DontCare:
{
if (__remap_textures && pTexOverride != nullptr)
pTexToUse = pTexOverride;
else
pTexToUse = pTex;
} break;
case ContentPreference::Original:
{
pTexToUse = pTex;
} break;
case ContentPreference::Override:
{
if (pTexOverride != nullptr)
pTexToUse = pTexOverride;
else
pTexToUse = pTex;
} break;
};
if (debug_tex_id > 0 && tex_crc32c == (uint32_t)debug_tex_id && config.textures.highlight_debug_tex)
{
extern DWORD tracked_tex_blink_duration;
if (timeGetTime () % tracked_tex_blink_duration > tracked_tex_blink_duration / 2)
pTexToUse = nullptr;
}
return pTexToUse;
}
IDirect3DTexture9*
ISKTextureD3D9::use (void)
{
if (config.textures.dump_on_load && pTex != nullptr && uses > 0 && uses < 2)
{
D3DSURFACE_DESC desc = { };
if ( SUCCEEDED (pTex->GetLevelDesc (0, &desc)) &&
( ! ( SK::D3D9::tex_mgr.isTextureDumped (tex_crc32c) ||
SK::D3D9::tex_mgr.isTextureInjectable (tex_crc32c) ) ) &&
(desc.Pool != D3DPOOL_MANAGED) )
{
tex_log.Log ( L"[Dump Trace] Texture: (%lu x %lu) * <LODs: %lu> - CRC32C: %08X",
desc.Width, desc.Height, pTex->GetLevelCount (), tex_crc32c);
tex_log.Log ( L"[Dump Trace] Usage: %-20s - Format: %-20s",
SK_D3D9_UsageToStr (desc.Usage).c_str (),
SK_D3D9_FormatToStr (desc.Format).c_str ());
tex_log.Log ( L"[Dump Trace] Pool: %s",
SK_D3D9_PoolToStr (desc.Pool));
SK::D3D9::tex_mgr.injector.beginLoad ();
SK::D3D9::tex_mgr.dumpTexture (desc.Format, tex_crc32c, pTex);
SK::D3D9::tex_mgr.injector.endLoad ();
}
}
++uses;
QueryPerformanceCounter_Original (&last_used);
return getDrawTexture ();
}
void
ISKTextureD3D9::toggleOverride (void)
{
if (img_to_use != ContentPreference::Override)
img_to_use = ContentPreference::Override;
else
img_to_use = ContentPreference::DontCare;
}
void
ISKTextureD3D9::toggleOriginal (void)
{
if (img_to_use != ContentPreference::Original)
img_to_use = ContentPreference::Original;
else
img_to_use = ContentPreference::DontCare;
}
std::unordered_map <uint32_t, TexLoadRequest *> SK::D3D9::TextureManager::Injector::textures_in_flight;
std::queue <TexLoadRef> SK::D3D9::TextureManager::Injector::textures_to_stream;
std::queue <TexLoadRef> SK::D3D9::TextureManager::Injector::finished_loads;
CRITICAL_SECTION SK::D3D9::TextureManager::Injector::cs_tex_stream = { };
CRITICAL_SECTION SK::D3D9::TextureManager::Injector::cs_tex_resample = { };
CRITICAL_SECTION SK::D3D9::TextureManager::Injector::cs_tex_inject = { };
CRITICAL_SECTION SK::D3D9::TextureManager::Injector::cs_tex_dump = { };
CRITICAL_SECTION SK::D3D9::TextureManager::Injector::cs_tex_blacklist = { };
volatile LONG SK::D3D9::TextureManager::Injector::streaming = 0L;
volatile ULONG SK::D3D9::TextureManager::Injector::streaming_bytes = 0UL;
volatile LONG SK::D3D9::TextureManager::Injector::resampling = 0L; | gpl-2.0 |
yulin724/rt-thread-comment | components/rtgui/widgets/combobox.c | 1 | 7323 | #include <rtgui/dc.h>
#include <rtgui/rtgui_theme.h>
#include <rtgui/widgets/combobox.h>
static rt_bool_t rtgui_combobox_pulldown_hide(struct rtgui_widget* widget, struct rtgui_event* event);
const static rt_uint8_t down_arrow[] = {0xff, 0x7e, 0x3c, 0x18};
static void _rtgui_combobox_constructor(rtgui_combobox_t *box)
{
rtgui_rect_t rect = {0, 0, RTGUI_COMBOBOX_WIDTH, RTGUI_COMBOBOX_HEIGHT};
/* init widget and set event handler */
rtgui_widget_set_event_handler(RTGUI_WIDGET(box), rtgui_combobox_event_handler);
rtgui_widget_set_rect(RTGUI_WIDGET(box), &rect);
RTGUI_WIDGET_TEXTALIGN(RTGUI_WIDGET(box)) = RTGUI_ALIGN_CENTER_VERTICAL;
box->pd_pressed = RT_FALSE;
box->current_item = 0;
box->on_selected = RT_NULL;
box->pd_win = RT_NULL;
}
static void _rtgui_combobox_destructor(rtgui_combobox_t *box)
{
/* destroy pull down window */
rtgui_win_destroy(box->pd_win);
/* reset box field */
box->pd_win = RT_NULL;
}
void rtgui_combobox_pdwin_onitem(struct rtgui_widget* widget, struct rtgui_event* event)
{
rtgui_win_t* pd_win;
rtgui_combobox_t* combo;
rtgui_listbox_t* list;
list = RTGUI_LISTBOX(widget);
pd_win = RTGUI_WIN(rtgui_widget_get_toplevel(widget));
combo = RTGUI_COMBOBOX(pd_win->user_data);
combo->current_item = list->current_item;
if (combo->on_selected != RT_NULL)
combo->on_selected(RTGUI_WIDGET(combo), RT_NULL);
rtgui_win_hiden(pd_win);
rtgui_widget_update(RTGUI_WIDGET(combo));
return ;
}
rt_bool_t rtgui_combobox_pdwin_ondeactive(struct rtgui_widget* widget, struct rtgui_event* event)
{
rtgui_win_hiden(RTGUI_WIN(widget));
return RT_TRUE;
}
DEFINE_CLASS_TYPE(combobox, "combobox",
RTGUI_WIDGET_TYPE,
_rtgui_combobox_constructor,
_rtgui_combobox_destructor,
sizeof(struct rtgui_combobox));
rtgui_combobox_t *rtgui_combobox_create(struct rtgui_listbox_item* items, rt_uint16_t count, struct rtgui_rect* rect)
{
rtgui_combobox_t *box;
box = (rtgui_combobox_t*)rtgui_widget_create(RTGUI_COMBOBOX_TYPE);
box->items_count = count;
box->items = items;
rtgui_widget_set_rect(RTGUI_WIDGET(box), rect);
box->pd_win = RT_NULL;
return box;
}
void rtgui_combobox_destroy(rtgui_combobox_t* box)
{
rtgui_widget_destroy(RTGUI_WIDGET(box));
}
static void rtgui_combobox_ondraw(struct rtgui_combobox* box)
{
/* draw button */
rtgui_color_t bc;
struct rtgui_dc* dc;
struct rtgui_rect rect, r;
/* begin drawing */
dc = rtgui_dc_begin_drawing(RTGUI_WIDGET(box));
if (dc == RT_NULL) return;
bc = RTGUI_WIDGET_BACKGROUND(RTGUI_WIDGET(box));
/* get widget rect */
rtgui_widget_get_rect(RTGUI_WIDGET(box), &rect);
RTGUI_WIDGET_BACKGROUND(RTGUI_WIDGET(box)) = white;
/* fill widget rect with background color */
rtgui_dc_fill_rect(dc, &rect);
rtgui_dc_draw_rect(dc, &rect);
/* draw current item */
if (box->current_item < box->items_count)
{
rect.x1 += 5;
rtgui_dc_draw_text(dc, box->items[box->current_item].name, &rect);
}
/* restore background color */
RTGUI_WIDGET_BACKGROUND(RTGUI_WIDGET(box)) = bc;
/* draw pull down button */
rect.x1 = rect.x2 - RTGUI_COMBOBOX_BUTTON_WIDTH;
rtgui_rect_inflate(&rect, -1);
rtgui_dc_fill_rect(dc, &rect);
if (box->pd_pressed == RT_TRUE) rtgui_dc_draw_border(dc, &rect, RTGUI_BORDER_SUNKEN);
else rtgui_dc_draw_border(dc, &rect, RTGUI_BORDER_RAISE);
r.x1 = 0; r.y1 = 0; r.x2 = 8; r.y2 = 4;
rtgui_rect_moveto_align(&rect, &r, RTGUI_ALIGN_CENTER_HORIZONTAL | RTGUI_ALIGN_CENTER_VERTICAL);
rtgui_dc_draw_byte(dc, r.x1, r.y1, 4, down_arrow);
/* end drawing */
rtgui_dc_end_drawing(dc);
return;
}
static rt_bool_t rtgui_combobox_onmouse_button(struct rtgui_combobox* box, struct rtgui_event_mouse* event)
{
struct rtgui_rect rect;
/* get widget rect */
rect = RTGUI_WIDGET(box)->extent;
/* move to the pull down button */
rect.x1 = rect.x2 - RTGUI_COMBOBOX_BUTTON_WIDTH;
if (rtgui_rect_contains_point(&rect, event->x, event->y) == RT_EOK)
{
/* handle mouse button on pull down button */
if (event->button & RTGUI_MOUSE_BUTTON_LEFT &&
event->button & RTGUI_MOUSE_BUTTON_DOWN)
{
box->pd_pressed = RT_TRUE;
rtgui_widget_update(RTGUI_WIDGET(box));
}
else if (event->button & RTGUI_MOUSE_BUTTON_LEFT &&
event->button & RTGUI_MOUSE_BUTTON_UP)
{
box->pd_pressed = RT_FALSE;
rtgui_widget_update(RTGUI_WIDGET(box));
/* pop pull down window */
if (box->pd_win == RT_NULL)
{
rtgui_listbox_t *list;
/* create pull down window */
rect = RTGUI_WIDGET(box)->extent;
rect.y1 = rect.y2;
rect.y2 = rect.y1 + 5 * (2 + rtgui_theme_get_selected_height());
box->pd_win = rtgui_win_create(RT_NULL, "combo", &rect, RTGUI_WIN_STYLE_NO_TITLE);
rtgui_win_set_ondeactivate(RTGUI_WIN(box->pd_win), rtgui_combobox_pulldown_hide);
/* set user data to parent combobox */
box->pd_win->user_data = (rt_uint32_t)box;
/* create list box */
rtgui_rect_inflate(&rect, -1);
list = rtgui_listbox_create(box->items, box->items_count, &rect);
rtgui_container_add_child(RTGUI_CONTAINER(box->pd_win), RTGUI_WIDGET(list));
rtgui_widget_focus(RTGUI_WIDGET(list));
rtgui_listbox_set_onitem(list, rtgui_combobox_pdwin_onitem);
rtgui_win_set_ondeactivate(box->pd_win, rtgui_combobox_pdwin_ondeactive);
}
/* show combo box pull down window */
rtgui_win_show(RTGUI_WIN(box->pd_win), RT_FALSE);
}
return RT_TRUE;
}
return RT_FALSE;
}
rt_bool_t rtgui_combobox_event_handler(struct rtgui_widget* widget, struct rtgui_event* event)
{
struct rtgui_combobox* box = (struct rtgui_combobox*)widget;
switch (event->type)
{
case RTGUI_EVENT_PAINT:
#ifndef RTGUI_USING_SMALL_SIZE
if (widget->on_draw != RT_NULL) widget->on_draw(widget, event);
else
#endif
rtgui_combobox_ondraw(box);
break;
case RTGUI_EVENT_MOUSE_BUTTON:
return rtgui_combobox_onmouse_button(box, (struct rtgui_event_mouse*)event);
case RTGUI_EVENT_FOCUSED:
{
/* item focused */
struct rtgui_item* item;
struct rtgui_event_focused* focused;
focused = (struct rtgui_event_focused*) event;
item = (struct rtgui_item*) (focused->widget);
if (item != RT_NULL)
{
/* hide pull down window */
rtgui_win_hiden(RTGUI_WIN(box->pd_win));
rtgui_combobox_ondraw(box);
}
}
break;
}
return RT_FALSE;
}
static rt_bool_t rtgui_combobox_pulldown_hide(struct rtgui_widget* widget, struct rtgui_event* event)
{
struct rtgui_combobox* box;
if (widget == RT_NULL) return RT_TRUE;
box = (struct rtgui_combobox*) (((struct rtgui_win*)widget)->user_data);
if (box == RT_NULL) return RT_TRUE;
/* hide pull down window */
rtgui_win_hiden(RTGUI_WIN(box->pd_win));
/* clear pull down button state */
box->pd_pressed = RT_FALSE;
rtgui_widget_update(RTGUI_WIDGET(box));
return RT_TRUE;
}
struct rtgui_listbox_item* rtgui_combox_get_select(struct rtgui_combobox* box)
{
if ((box != RT_NULL) && (box->current_item < box->items_count))
{
return &(box->items[box->current_item]);
}
return RT_NULL;
}
void rtgui_combobox_set_onselected(struct rtgui_combobox* box, rtgui_onitem_func_t func)
{
box->on_selected = func;
}
| gpl-2.0 |
maddox/vlc | modules/gui/qt4/dialogs/extended.cpp | 1 | 4167 | /*****************************************************************************
* extended.cpp : Extended controls - Undocked
****************************************************************************
* Copyright (C) 2006-2008 the VideoLAN team
* $Id$
*
* Authors: Clément Stenac <zorglub@videolan.org>
* Jean-Baptiste Kempf <jb@videolan.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "dialogs/extended.hpp"
#include "main_interface.hpp" /* Needed for external MI size */
#include "input_manager.hpp"
#include <QTabWidget>
#include <QGridLayout>
ExtendedDialog *ExtendedDialog::instance = NULL;
ExtendedDialog::ExtendedDialog( intf_thread_t *_p_intf ): QVLCFrame( _p_intf )
{
setWindowFlags( Qt::Tool );
setWindowOpacity( config_GetFloat( p_intf, "qt-opacity" ) );
setWindowTitle( qtr( "Adjustments and Effects" ) );
setWindowRole( "vlc-extended" );
QGridLayout *layout = new QGridLayout( this );
layout->setLayoutMargins( 0, 2, 0, 1, 1 );
layout->setSpacing( 3 );
mainTabW = new QTabWidget( this );
/* AUDIO effects */
QWidget *audioWidget = new QWidget;
QHBoxLayout *audioLayout = new QHBoxLayout( audioWidget );
QTabWidget *audioTab = new QTabWidget( audioWidget );
equal = new Equalizer( p_intf, audioTab );
audioTab->addTab( equal, qtr( "Graphic Equalizer" ) );
Spatializer *spatial = new Spatializer( p_intf, audioTab );
audioTab->addTab( spatial, qtr( "Spatializer" ) );
audioLayout->addWidget( audioTab );
mainTabW->addTab( audioWidget, qtr( "Audio Effects" ) );
/* Video Effects */
QWidget *videoWidget = new QWidget;
QHBoxLayout *videoLayout = new QHBoxLayout( videoWidget );
QTabWidget *videoTab = new QTabWidget( videoWidget );
videoEffect = new ExtVideo( p_intf, videoTab );
videoLayout->addWidget( videoTab );
videoTab->setSizePolicy( QSizePolicy::Preferred, QSizePolicy::Maximum );
mainTabW->addTab( videoWidget, qtr( "Video Effects" ) );
syncW = new SyncControls( p_intf, videoTab );
mainTabW->addTab( syncW, qtr( "Synchronization" ) );
if( module_exists( "v4l2" ) )
{
ExtV4l2 *v4l2 = new ExtV4l2( p_intf, mainTabW );
mainTabW->addTab( v4l2, qtr( "v4l2 controls" ) );
}
layout->addWidget( mainTabW, 0, 0, 1, 5 );
QPushButton *closeButton = new QPushButton( qtr( "&Close" ) );
layout->addWidget( closeButton, 1, 4, 1, 1 );
CONNECT( closeButton, clicked(), this, close() );
/* Restore geometry or move this dialog on the left pane of the MI */
if( !restoreGeometry(getSettings()->value("EPanel/geometry").toByteArray()))
{
resize( QSize( 400, 280 ) );
MainInterface *p_mi = p_intf->p_sys->p_mi;
if( p_mi )
move( ( p_mi->x() - frameGeometry().width() - 10 ), p_mi->y() );
else
move ( 450 , 0 );
}
CONNECT( THEMIM->getIM(), statusChanged( int ), this, changedItem( int ) );
}
ExtendedDialog::~ExtendedDialog()
{
writeSettings( "EPanel" );
}
void ExtendedDialog::showTab( int i )
{
mainTabW->setCurrentIndex( i );
show();
}
int ExtendedDialog::currentTab()
{
return mainTabW->currentIndex();
}
void ExtendedDialog::changedItem( int i_status )
{
if( i_status != END_S ) return;
syncW->clean();
videoEffect->clean();
equal->clean();
}
| gpl-2.0 |
zheharry/linux-sh4-2.6.23.17_stm23_A18B | arch/sh/oprofile/backtrace.c | 1 | 3852 | /*
* SH specific backtracing code for oprofile
*
* Copyright 2007 STMicroelectronics Ltd.
*
* Author: Dave Peverley <dpeverley@mpc-data.co.uk>
*
* Based on ARM oprofile backtrace code by Richard Purdie and in turn, i386
* oprofile backtrace code by John Levon, David Smith
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <linux/kallsyms.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
#include <asm/sections.h>
/* Limit to stop backtracing too far. */
static int backtrace_limit = 20;
static int valid_kernel_stack(unsigned long *stackaddr, struct pt_regs *regs);
static unsigned long *
kernel_backtrace(unsigned long *stackaddr, struct pt_regs *regs)
{
unsigned long addr;
/*
* If not a valid kernel address, keep going till we find one
* or the SP stops being a valid address.
*/
do {
addr = *stackaddr++;
if (kernel_text_address(addr)) {
oprofile_add_trace(addr);
break;
}
} while (valid_kernel_stack(stackaddr, regs));
return stackaddr;
}
static unsigned long *
user_backtrace(unsigned long *stackaddr, struct pt_regs *regs)
{
unsigned long buf_stack;
/* Also check accessibility of address */
if (!access_ok(VERIFY_READ, stackaddr, sizeof(unsigned long))) {
return NULL;
}
if (__copy_from_user_inatomic(&buf_stack, stackaddr, sizeof(unsigned long))) {
return NULL;
}
/* Quick paranoia check */
if (buf_stack & 3) {
return NULL;
}
/*
* TODO : This doesn't work!
*/
#if 0
printk("user_backtrace() : R15 0x%08lx ", regs->regs[15]);
print_symbol("[%-10s] ", regs->regs[15]);
printk("PC 0x%08lx ", regs->pc);
print_symbol("[%-10s] ", regs->pc);
printk("PR 0x%08lx ", regs->pr);
print_symbol("[%-10s]\n", regs->pr);
#endif
oprofile_add_trace(buf_stack);
stackaddr++;
return stackaddr;
}
/*
* | | /\ Higher addresses
* | |
* --------------- stack base (address of current_thread_info)
* | thread info |
* . .
* | stack |
* --------------- saved regs->regs[15] value if valid
* . .
* --------------- struct pt_regs stored on stack (struct pt_regs *)
* | |
* . .
* | |
* --------------- ???
* | |
* | | \/ Lower addresses
*
* Thus, &pt_regs <-> stack base restricts the valid(ish) fp values
*/
static int valid_kernel_stack(unsigned long *stackaddr, struct pt_regs *regs)
{
unsigned long stack = (unsigned long)regs;
unsigned long stack_base = (stack & ~(THREAD_SIZE - 1)) + THREAD_SIZE;
return ((unsigned long)stackaddr > stack) && ((unsigned long)stackaddr < stack_base);
}
void sh_backtrace(struct pt_regs * const regs, unsigned int depth)
{
unsigned long *stackaddr;
/*
* Paranoia - clip max depth as we could get lost in the weeds.
*/
if (depth > backtrace_limit)
depth = backtrace_limit;
stackaddr = (unsigned long *)regs->regs[15];
if (!user_mode(regs)) {
while (depth-- && valid_kernel_stack(stackaddr, regs))
stackaddr = kernel_backtrace(stackaddr, regs);
return;
}
while (depth-- && (stackaddr != NULL)) {
stackaddr = user_backtrace(stackaddr, regs);
}
}
| gpl-2.0 |
RittikBhowmik/Project-X5pro-Kernel-u8800pro | fs/ecryptfs/main.c | 513 | 25648 | /**
* eCryptfs: Linux filesystem encryption layer
*
* Copyright (C) 1997-2003 Erez Zadok
* Copyright (C) 2001-2003 Stony Brook University
* Copyright (C) 2004-2007 International Business Machines Corp.
* Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
* Michael C. Thompson <mcthomps@us.ibm.com>
* Tyler Hicks <tyhicks@ou.edu>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*/
#include <linux/dcache.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/namei.h>
#include <linux/skbuff.h>
#include <linux/crypto.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/key.h>
#include <linux/parser.h>
#include <linux/fs_stack.h>
#include <linux/slab.h>
#include <linux/magic.h>
#include "ecryptfs_kernel.h"
/**
* Module parameter that defines the ecryptfs_verbosity level.
*/
int ecryptfs_verbosity = 0;
module_param(ecryptfs_verbosity, int, 0);
MODULE_PARM_DESC(ecryptfs_verbosity,
"Initial verbosity level (0 or 1; defaults to "
"0, which is Quiet)");
/**
* Module parameter that defines the number of message buffer elements
*/
unsigned int ecryptfs_message_buf_len = ECRYPTFS_DEFAULT_MSG_CTX_ELEMS;
module_param(ecryptfs_message_buf_len, uint, 0);
MODULE_PARM_DESC(ecryptfs_message_buf_len,
"Number of message buffer elements");
/**
* Module parameter that defines the maximum guaranteed amount of time to wait
* for a response from ecryptfsd. The actual sleep time will be, more than
* likely, a small amount greater than this specified value, but only less if
* the message successfully arrives.
*/
signed long ecryptfs_message_wait_timeout = ECRYPTFS_MAX_MSG_CTX_TTL / HZ;
module_param(ecryptfs_message_wait_timeout, long, 0);
MODULE_PARM_DESC(ecryptfs_message_wait_timeout,
"Maximum number of seconds that an operation will "
"sleep while waiting for a message response from "
"userspace");
/**
* Module parameter that is an estimate of the maximum number of users
* that will be concurrently using eCryptfs. Set this to the right
* value to balance performance and memory use.
*/
unsigned int ecryptfs_number_of_users = ECRYPTFS_DEFAULT_NUM_USERS;
module_param(ecryptfs_number_of_users, uint, 0);
MODULE_PARM_DESC(ecryptfs_number_of_users, "An estimate of the number of "
"concurrent users of eCryptfs");
void __ecryptfs_printk(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
if (fmt[1] == '7') { /* KERN_DEBUG */
if (ecryptfs_verbosity >= 1)
vprintk(fmt, args);
} else
vprintk(fmt, args);
va_end(args);
}
/**
* ecryptfs_init_lower_file
* @ecryptfs_dentry: Fully initialized eCryptfs dentry object, with
* the lower dentry and the lower mount set
*
* eCryptfs only ever keeps a single open file for every lower
* inode. All I/O operations to the lower inode occur through that
* file. When the first eCryptfs dentry that interposes with the first
* lower dentry for that inode is created, this function creates the
* lower file struct and associates it with the eCryptfs
* inode. When all eCryptfs files associated with the inode are released, the
* file is closed.
*
* The lower file will be opened with read/write permissions, if
* possible. Otherwise, it is opened read-only.
*
* This function does nothing if a lower file is already
* associated with the eCryptfs inode.
*
* Returns zero on success; non-zero otherwise
*/
static int ecryptfs_init_lower_file(struct dentry *dentry,
struct file **lower_file)
{
const struct cred *cred = current_cred();
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
int rc;
rc = ecryptfs_privileged_open(lower_file, lower_dentry, lower_mnt,
cred);
if (rc) {
printk(KERN_ERR "Error opening lower file "
"for lower_dentry [0x%p] and lower_mnt [0x%p]; "
"rc = [%d]\n", lower_dentry, lower_mnt, rc);
(*lower_file) = NULL;
}
return rc;
}
int ecryptfs_get_lower_file(struct dentry *dentry, struct inode *inode)
{
struct ecryptfs_inode_info *inode_info;
int count, rc = 0;
inode_info = ecryptfs_inode_to_private(inode);
mutex_lock(&inode_info->lower_file_mutex);
count = atomic_inc_return(&inode_info->lower_file_count);
if (WARN_ON_ONCE(count < 1))
rc = -EINVAL;
else if (count == 1) {
rc = ecryptfs_init_lower_file(dentry,
&inode_info->lower_file);
if (rc)
atomic_set(&inode_info->lower_file_count, 0);
}
mutex_unlock(&inode_info->lower_file_mutex);
return rc;
}
void ecryptfs_put_lower_file(struct inode *inode)
{
struct ecryptfs_inode_info *inode_info;
inode_info = ecryptfs_inode_to_private(inode);
if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count,
&inode_info->lower_file_mutex)) {
fput(inode_info->lower_file);
inode_info->lower_file = NULL;
mutex_unlock(&inode_info->lower_file_mutex);
}
}
enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig,
ecryptfs_opt_cipher, ecryptfs_opt_ecryptfs_cipher,
ecryptfs_opt_ecryptfs_key_bytes,
ecryptfs_opt_passthrough, ecryptfs_opt_xattr_metadata,
ecryptfs_opt_encrypted_view, ecryptfs_opt_fnek_sig,
ecryptfs_opt_fn_cipher, ecryptfs_opt_fn_cipher_key_bytes,
ecryptfs_opt_unlink_sigs, ecryptfs_opt_mount_auth_tok_only,
ecryptfs_opt_check_dev_ruid,
ecryptfs_opt_err };
static const match_table_t tokens = {
{ecryptfs_opt_sig, "sig=%s"},
{ecryptfs_opt_ecryptfs_sig, "ecryptfs_sig=%s"},
{ecryptfs_opt_cipher, "cipher=%s"},
{ecryptfs_opt_ecryptfs_cipher, "ecryptfs_cipher=%s"},
{ecryptfs_opt_ecryptfs_key_bytes, "ecryptfs_key_bytes=%u"},
{ecryptfs_opt_passthrough, "ecryptfs_passthrough"},
{ecryptfs_opt_xattr_metadata, "ecryptfs_xattr_metadata"},
{ecryptfs_opt_encrypted_view, "ecryptfs_encrypted_view"},
{ecryptfs_opt_fnek_sig, "ecryptfs_fnek_sig=%s"},
{ecryptfs_opt_fn_cipher, "ecryptfs_fn_cipher=%s"},
{ecryptfs_opt_fn_cipher_key_bytes, "ecryptfs_fn_key_bytes=%u"},
{ecryptfs_opt_unlink_sigs, "ecryptfs_unlink_sigs"},
{ecryptfs_opt_mount_auth_tok_only, "ecryptfs_mount_auth_tok_only"},
{ecryptfs_opt_check_dev_ruid, "ecryptfs_check_dev_ruid"},
{ecryptfs_opt_err, NULL}
};
static int ecryptfs_init_global_auth_toks(
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
struct ecryptfs_global_auth_tok *global_auth_tok;
struct ecryptfs_auth_tok *auth_tok;
int rc = 0;
list_for_each_entry(global_auth_tok,
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
rc = ecryptfs_keyring_auth_tok_for_sig(
&global_auth_tok->global_auth_tok_key, &auth_tok,
global_auth_tok->sig);
if (rc) {
printk(KERN_ERR "Could not find valid key in user "
"session keyring for sig specified in mount "
"option: [%s]\n", global_auth_tok->sig);
global_auth_tok->flags |= ECRYPTFS_AUTH_TOK_INVALID;
goto out;
} else {
global_auth_tok->flags &= ~ECRYPTFS_AUTH_TOK_INVALID;
up_write(&(global_auth_tok->global_auth_tok_key)->sem);
}
}
out:
return rc;
}
static void ecryptfs_init_mount_crypt_stat(
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
memset((void *)mount_crypt_stat, 0,
sizeof(struct ecryptfs_mount_crypt_stat));
INIT_LIST_HEAD(&mount_crypt_stat->global_auth_tok_list);
mutex_init(&mount_crypt_stat->global_auth_tok_list_mutex);
mount_crypt_stat->flags |= ECRYPTFS_MOUNT_CRYPT_STAT_INITIALIZED;
}
/**
* ecryptfs_parse_options
* @sb: The ecryptfs super block
* @options: The options passed to the kernel
* @check_ruid: set to 1 if device uid should be checked against the ruid
*
* Parse mount options:
* debug=N - ecryptfs_verbosity level for debug output
* sig=XXX - description(signature) of the key to use
*
* Returns the dentry object of the lower-level (lower/interposed)
* directory; We want to mount our stackable file system on top of
* that lower directory.
*
* The signature of the key to use must be the description of a key
* already in the keyring. Mounting will fail if the key can not be
* found.
*
* Returns zero on success; non-zero on error
*/
static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
uid_t *check_ruid)
{
char *p;
int rc = 0;
int sig_set = 0;
int cipher_name_set = 0;
int fn_cipher_name_set = 0;
int cipher_key_bytes;
int cipher_key_bytes_set = 0;
int fn_cipher_key_bytes;
int fn_cipher_key_bytes_set = 0;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&sbi->mount_crypt_stat;
substring_t args[MAX_OPT_ARGS];
int token;
char *sig_src;
char *cipher_name_dst;
char *cipher_name_src;
char *fn_cipher_name_dst;
char *fn_cipher_name_src;
char *fnek_dst;
char *fnek_src;
char *cipher_key_bytes_src;
char *fn_cipher_key_bytes_src;
u8 cipher_code;
*check_ruid = 0;
if (!options) {
rc = -EINVAL;
goto out;
}
ecryptfs_init_mount_crypt_stat(mount_crypt_stat);
while ((p = strsep(&options, ",")) != NULL) {
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case ecryptfs_opt_sig:
case ecryptfs_opt_ecryptfs_sig:
sig_src = args[0].from;
rc = ecryptfs_add_global_auth_tok(mount_crypt_stat,
sig_src, 0);
if (rc) {
printk(KERN_ERR "Error attempting to register "
"global sig; rc = [%d]\n", rc);
goto out;
}
sig_set = 1;
break;
case ecryptfs_opt_cipher:
case ecryptfs_opt_ecryptfs_cipher:
cipher_name_src = args[0].from;
cipher_name_dst =
mount_crypt_stat->
global_default_cipher_name;
strncpy(cipher_name_dst, cipher_name_src,
ECRYPTFS_MAX_CIPHER_NAME_SIZE);
cipher_name_dst[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0';
cipher_name_set = 1;
break;
case ecryptfs_opt_ecryptfs_key_bytes:
cipher_key_bytes_src = args[0].from;
cipher_key_bytes =
(int)simple_strtol(cipher_key_bytes_src,
&cipher_key_bytes_src, 0);
mount_crypt_stat->global_default_cipher_key_size =
cipher_key_bytes;
cipher_key_bytes_set = 1;
break;
case ecryptfs_opt_passthrough:
mount_crypt_stat->flags |=
ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED;
break;
case ecryptfs_opt_xattr_metadata:
mount_crypt_stat->flags |=
ECRYPTFS_XATTR_METADATA_ENABLED;
break;
case ecryptfs_opt_encrypted_view:
mount_crypt_stat->flags |=
ECRYPTFS_XATTR_METADATA_ENABLED;
mount_crypt_stat->flags |=
ECRYPTFS_ENCRYPTED_VIEW_ENABLED;
break;
case ecryptfs_opt_fnek_sig:
fnek_src = args[0].from;
fnek_dst =
mount_crypt_stat->global_default_fnek_sig;
strncpy(fnek_dst, fnek_src, ECRYPTFS_SIG_SIZE_HEX);
mount_crypt_stat->global_default_fnek_sig[
ECRYPTFS_SIG_SIZE_HEX] = '\0';
rc = ecryptfs_add_global_auth_tok(
mount_crypt_stat,
mount_crypt_stat->global_default_fnek_sig,
ECRYPTFS_AUTH_TOK_FNEK);
if (rc) {
printk(KERN_ERR "Error attempting to register "
"global fnek sig [%s]; rc = [%d]\n",
mount_crypt_stat->global_default_fnek_sig,
rc);
goto out;
}
mount_crypt_stat->flags |=
(ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES
| ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK);
break;
case ecryptfs_opt_fn_cipher:
fn_cipher_name_src = args[0].from;
fn_cipher_name_dst =
mount_crypt_stat->global_default_fn_cipher_name;
strncpy(fn_cipher_name_dst, fn_cipher_name_src,
ECRYPTFS_MAX_CIPHER_NAME_SIZE);
mount_crypt_stat->global_default_fn_cipher_name[
ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0';
fn_cipher_name_set = 1;
break;
case ecryptfs_opt_fn_cipher_key_bytes:
fn_cipher_key_bytes_src = args[0].from;
fn_cipher_key_bytes =
(int)simple_strtol(fn_cipher_key_bytes_src,
&fn_cipher_key_bytes_src, 0);
mount_crypt_stat->global_default_fn_cipher_key_bytes =
fn_cipher_key_bytes;
fn_cipher_key_bytes_set = 1;
break;
case ecryptfs_opt_unlink_sigs:
mount_crypt_stat->flags |= ECRYPTFS_UNLINK_SIGS;
break;
case ecryptfs_opt_mount_auth_tok_only:
mount_crypt_stat->flags |=
ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY;
break;
case ecryptfs_opt_check_dev_ruid:
*check_ruid = 1;
break;
case ecryptfs_opt_err:
default:
printk(KERN_WARNING
"%s: eCryptfs: unrecognized option [%s]\n",
__func__, p);
}
}
if (!sig_set) {
rc = -EINVAL;
ecryptfs_printk(KERN_ERR, "You must supply at least one valid "
"auth tok signature as a mount "
"parameter; see the eCryptfs README\n");
goto out;
}
if (!cipher_name_set) {
int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER);
BUG_ON(cipher_name_len >= ECRYPTFS_MAX_CIPHER_NAME_SIZE);
strcpy(mount_crypt_stat->global_default_cipher_name,
ECRYPTFS_DEFAULT_CIPHER);
}
if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
&& !fn_cipher_name_set)
strcpy(mount_crypt_stat->global_default_fn_cipher_name,
mount_crypt_stat->global_default_cipher_name);
if (!cipher_key_bytes_set)
mount_crypt_stat->global_default_cipher_key_size = 0;
if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
&& !fn_cipher_key_bytes_set)
mount_crypt_stat->global_default_fn_cipher_key_bytes =
mount_crypt_stat->global_default_cipher_key_size;
cipher_code = ecryptfs_code_for_cipher_string(
mount_crypt_stat->global_default_cipher_name,
mount_crypt_stat->global_default_cipher_key_size);
if (!cipher_code) {
ecryptfs_printk(KERN_ERR,
"eCryptfs doesn't support cipher: %s",
mount_crypt_stat->global_default_cipher_name);
rc = -EINVAL;
goto out;
}
mutex_lock(&key_tfm_list_mutex);
if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name,
NULL)) {
rc = ecryptfs_add_new_key_tfm(
NULL, mount_crypt_stat->global_default_cipher_name,
mount_crypt_stat->global_default_cipher_key_size);
if (rc) {
printk(KERN_ERR "Error attempting to initialize "
"cipher with name = [%s] and key size = [%td]; "
"rc = [%d]\n",
mount_crypt_stat->global_default_cipher_name,
mount_crypt_stat->global_default_cipher_key_size,
rc);
rc = -EINVAL;
mutex_unlock(&key_tfm_list_mutex);
goto out;
}
}
if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
&& !ecryptfs_tfm_exists(
mount_crypt_stat->global_default_fn_cipher_name, NULL)) {
rc = ecryptfs_add_new_key_tfm(
NULL, mount_crypt_stat->global_default_fn_cipher_name,
mount_crypt_stat->global_default_fn_cipher_key_bytes);
if (rc) {
printk(KERN_ERR "Error attempting to initialize "
"cipher with name = [%s] and key size = [%td]; "
"rc = [%d]\n",
mount_crypt_stat->global_default_fn_cipher_name,
mount_crypt_stat->global_default_fn_cipher_key_bytes,
rc);
rc = -EINVAL;
mutex_unlock(&key_tfm_list_mutex);
goto out;
}
}
mutex_unlock(&key_tfm_list_mutex);
rc = ecryptfs_init_global_auth_toks(mount_crypt_stat);
if (rc)
printk(KERN_WARNING "One or more global auth toks could not "
"properly register; rc = [%d]\n", rc);
out:
return rc;
}
struct kmem_cache *ecryptfs_sb_info_cache;
static struct file_system_type ecryptfs_fs_type;
/**
* ecryptfs_get_sb
* @fs_type
* @flags
* @dev_name: The path to mount over
* @raw_data: The options passed into the kernel
*/
static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data)
{
struct super_block *s;
struct ecryptfs_sb_info *sbi;
struct ecryptfs_dentry_info *root_info;
const char *err = "Getting sb failed";
struct inode *inode;
struct path path;
uid_t check_ruid;
int rc;
sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL);
if (!sbi) {
rc = -ENOMEM;
goto out;
}
rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid);
if (rc) {
err = "Error parsing options";
goto out;
}
s = sget(fs_type, NULL, set_anon_super, NULL);
if (IS_ERR(s)) {
rc = PTR_ERR(s);
goto out;
}
rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY);
if (rc)
goto out1;
ecryptfs_set_superblock_private(s, sbi);
s->s_bdi = &sbi->bdi;
/* ->kill_sb() will take care of sbi after that point */
sbi = NULL;
s->s_op = &ecryptfs_sops;
s->s_d_op = &ecryptfs_dops;
err = "Reading sb failed";
rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
if (rc) {
ecryptfs_printk(KERN_WARNING, "kern_path() failed\n");
goto out1;
}
if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) {
rc = -EINVAL;
printk(KERN_ERR "Mount on filesystem of type "
"eCryptfs explicitly disallowed due to "
"known incompatibilities\n");
goto out_free;
}
if (check_ruid && path.dentry->d_inode->i_uid != current_uid()) {
rc = -EPERM;
printk(KERN_ERR "Mount of device (uid: %d) not owned by "
"requested user (uid: %d)\n",
path.dentry->d_inode->i_uid, current_uid());
goto out_free;
}
ecryptfs_set_superblock_lower(s, path.dentry->d_sb);
/**
* Set the POSIX ACL flag based on whether they're enabled in the lower
* mount. Force a read-only eCryptfs mount if the lower mount is ro.
* Allow a ro eCryptfs mount even when the lower mount is rw.
*/
s->s_flags = flags & ~MS_POSIXACL;
s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL);
s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
s->s_blocksize = path.dentry->d_sb->s_blocksize;
s->s_magic = ECRYPTFS_SUPER_MAGIC;
inode = ecryptfs_get_inode(path.dentry->d_inode, s);
rc = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_free;
s->s_root = d_alloc_root(inode);
if (!s->s_root) {
iput(inode);
rc = -ENOMEM;
goto out_free;
}
rc = -ENOMEM;
root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
if (!root_info)
goto out_free;
/* ->kill_sb() will take care of root_info */
ecryptfs_set_dentry_private(s->s_root, root_info);
ecryptfs_set_dentry_lower(s->s_root, path.dentry);
ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt);
s->s_flags |= MS_ACTIVE;
return dget(s->s_root);
out_free:
path_put(&path);
out1:
deactivate_locked_super(s);
out:
if (sbi) {
ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat);
kmem_cache_free(ecryptfs_sb_info_cache, sbi);
}
printk(KERN_ERR "%s; rc = [%d]\n", err, rc);
return ERR_PTR(rc);
}
/**
* ecryptfs_kill_block_super
* @sb: The ecryptfs super block
*
* Used to bring the superblock down and free the private data.
*/
static void ecryptfs_kill_block_super(struct super_block *sb)
{
struct ecryptfs_sb_info *sb_info = ecryptfs_superblock_to_private(sb);
kill_anon_super(sb);
if (!sb_info)
return;
ecryptfs_destroy_mount_crypt_stat(&sb_info->mount_crypt_stat);
bdi_destroy(&sb_info->bdi);
kmem_cache_free(ecryptfs_sb_info_cache, sb_info);
}
static struct file_system_type ecryptfs_fs_type = {
.owner = THIS_MODULE,
.name = "ecryptfs",
.mount = ecryptfs_mount,
.kill_sb = ecryptfs_kill_block_super,
.fs_flags = 0
};
/**
* inode_info_init_once
*
* Initializes the ecryptfs_inode_info_cache when it is created
*/
static void
inode_info_init_once(void *vptr)
{
struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr;
inode_init_once(&ei->vfs_inode);
}
static struct ecryptfs_cache_info {
struct kmem_cache **cache;
const char *name;
size_t size;
void (*ctor)(void *obj);
} ecryptfs_cache_infos[] = {
{
.cache = &ecryptfs_auth_tok_list_item_cache,
.name = "ecryptfs_auth_tok_list_item",
.size = sizeof(struct ecryptfs_auth_tok_list_item),
},
{
.cache = &ecryptfs_file_info_cache,
.name = "ecryptfs_file_cache",
.size = sizeof(struct ecryptfs_file_info),
},
{
.cache = &ecryptfs_dentry_info_cache,
.name = "ecryptfs_dentry_info_cache",
.size = sizeof(struct ecryptfs_dentry_info),
},
{
.cache = &ecryptfs_inode_info_cache,
.name = "ecryptfs_inode_cache",
.size = sizeof(struct ecryptfs_inode_info),
.ctor = inode_info_init_once,
},
{
.cache = &ecryptfs_sb_info_cache,
.name = "ecryptfs_sb_cache",
.size = sizeof(struct ecryptfs_sb_info),
},
{
.cache = &ecryptfs_header_cache,
.name = "ecryptfs_headers",
.size = PAGE_CACHE_SIZE,
},
{
.cache = &ecryptfs_xattr_cache,
.name = "ecryptfs_xattr_cache",
.size = PAGE_CACHE_SIZE,
},
{
.cache = &ecryptfs_key_record_cache,
.name = "ecryptfs_key_record_cache",
.size = sizeof(struct ecryptfs_key_record),
},
{
.cache = &ecryptfs_key_sig_cache,
.name = "ecryptfs_key_sig_cache",
.size = sizeof(struct ecryptfs_key_sig),
},
{
.cache = &ecryptfs_global_auth_tok_cache,
.name = "ecryptfs_global_auth_tok_cache",
.size = sizeof(struct ecryptfs_global_auth_tok),
},
{
.cache = &ecryptfs_key_tfm_cache,
.name = "ecryptfs_key_tfm_cache",
.size = sizeof(struct ecryptfs_key_tfm),
},
{
.cache = &ecryptfs_open_req_cache,
.name = "ecryptfs_open_req_cache",
.size = sizeof(struct ecryptfs_open_req),
},
};
static void ecryptfs_free_kmem_caches(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(ecryptfs_cache_infos); i++) {
struct ecryptfs_cache_info *info;
info = &ecryptfs_cache_infos[i];
if (*(info->cache))
kmem_cache_destroy(*(info->cache));
}
}
/**
* ecryptfs_init_kmem_caches
*
* Returns zero on success; non-zero otherwise
*/
static int ecryptfs_init_kmem_caches(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(ecryptfs_cache_infos); i++) {
struct ecryptfs_cache_info *info;
info = &ecryptfs_cache_infos[i];
*(info->cache) = kmem_cache_create(info->name, info->size,
0, SLAB_HWCACHE_ALIGN, info->ctor);
if (!*(info->cache)) {
ecryptfs_free_kmem_caches();
ecryptfs_printk(KERN_WARNING, "%s: "
"kmem_cache_create failed\n",
info->name);
return -ENOMEM;
}
}
return 0;
}
static struct kobject *ecryptfs_kobj;
static ssize_t version_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buff)
{
return snprintf(buff, PAGE_SIZE, "%d\n", ECRYPTFS_VERSIONING_MASK);
}
static struct kobj_attribute version_attr = __ATTR_RO(version);
static struct attribute *attributes[] = {
&version_attr.attr,
NULL,
};
static struct attribute_group attr_group = {
.attrs = attributes,
};
static int do_sysfs_registration(void)
{
int rc;
ecryptfs_kobj = kobject_create_and_add("ecryptfs", fs_kobj);
if (!ecryptfs_kobj) {
printk(KERN_ERR "Unable to create ecryptfs kset\n");
rc = -ENOMEM;
goto out;
}
rc = sysfs_create_group(ecryptfs_kobj, &attr_group);
if (rc) {
printk(KERN_ERR
"Unable to create ecryptfs version attributes\n");
kobject_put(ecryptfs_kobj);
}
out:
return rc;
}
static void do_sysfs_unregistration(void)
{
sysfs_remove_group(ecryptfs_kobj, &attr_group);
kobject_put(ecryptfs_kobj);
}
static int __init ecryptfs_init(void)
{
int rc;
if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) {
rc = -EINVAL;
ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is "
"larger than the host's page size, and so "
"eCryptfs cannot run on this system. The "
"default eCryptfs extent size is [%u] bytes; "
"the page size is [%lu] bytes.\n",
ECRYPTFS_DEFAULT_EXTENT_SIZE,
(unsigned long)PAGE_CACHE_SIZE);
goto out;
}
rc = ecryptfs_init_kmem_caches();
if (rc) {
printk(KERN_ERR
"Failed to allocate one or more kmem_cache objects\n");
goto out;
}
rc = register_filesystem(&ecryptfs_fs_type);
if (rc) {
printk(KERN_ERR "Failed to register filesystem\n");
goto out_free_kmem_caches;
}
rc = do_sysfs_registration();
if (rc) {
printk(KERN_ERR "sysfs registration failed\n");
goto out_unregister_filesystem;
}
rc = ecryptfs_init_kthread();
if (rc) {
printk(KERN_ERR "%s: kthread initialization failed; "
"rc = [%d]\n", __func__, rc);
goto out_do_sysfs_unregistration;
}
rc = ecryptfs_init_messaging();
if (rc) {
printk(KERN_ERR "Failure occurred while attempting to "
"initialize the communications channel to "
"ecryptfsd\n");
goto out_destroy_kthread;
}
rc = ecryptfs_init_crypto();
if (rc) {
printk(KERN_ERR "Failure whilst attempting to init crypto; "
"rc = [%d]\n", rc);
goto out_release_messaging;
}
if (ecryptfs_verbosity > 0)
printk(KERN_CRIT "eCryptfs verbosity set to %d. Secret values "
"will be written to the syslog!\n", ecryptfs_verbosity);
goto out;
out_release_messaging:
ecryptfs_release_messaging();
out_destroy_kthread:
ecryptfs_destroy_kthread();
out_do_sysfs_unregistration:
do_sysfs_unregistration();
out_unregister_filesystem:
unregister_filesystem(&ecryptfs_fs_type);
out_free_kmem_caches:
ecryptfs_free_kmem_caches();
out:
return rc;
}
static void __exit ecryptfs_exit(void)
{
int rc;
rc = ecryptfs_destroy_crypto();
if (rc)
printk(KERN_ERR "Failure whilst attempting to destroy crypto; "
"rc = [%d]\n", rc);
ecryptfs_release_messaging();
ecryptfs_destroy_kthread();
do_sysfs_unregistration();
unregister_filesystem(&ecryptfs_fs_type);
ecryptfs_free_kmem_caches();
}
MODULE_AUTHOR("Michael A. Halcrow <mhalcrow@us.ibm.com>");
MODULE_DESCRIPTION("eCryptfs");
MODULE_LICENSE("GPL");
module_init(ecryptfs_init)
module_exit(ecryptfs_exit)
| gpl-2.0 |
TeamFreedom/mecha_2.6.35 | arch/mn10300/mm/dma-alloc.c | 769 | 2010 | /* MN10300 Dynamic DMA mapping support
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
* Derived from: arch/i386/kernel/pci-dma.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include <asm/io.h>
static unsigned long pci_sram_allocated = 0xbc000000;
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int gfp)
{
unsigned long addr;
void *ret;
printk("dma_alloc_coherent(%s,%zu,,%x)\n", dev_name(dev), size, gfp);
if (0xbe000000 - pci_sram_allocated >= size) {
size = (size + 255) & ~255;
addr = pci_sram_allocated;
pci_sram_allocated += size;
ret = (void *) addr;
goto done;
}
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
gfp |= GFP_DMA;
addr = __get_free_pages(gfp, get_order(size));
if (!addr)
return NULL;
/* map the coherent memory through the uncached memory window */
ret = (void *) (addr | 0x20000000);
/* fill the memory with obvious rubbish */
memset((void *) addr, 0xfb, size);
/* write back and evict all cache lines covering this region */
mn10300_dcache_flush_inv_range2(virt_to_phys((void *) addr), PAGE_SIZE);
done:
*dma_handle = virt_to_bus((void *) addr);
printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle);
return ret;
}
EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
unsigned long addr = (unsigned long) vaddr & ~0x20000000;
if (addr >= 0x9c000000)
return;
free_pages(addr, get_order(size));
}
EXPORT_SYMBOL(dma_free_coherent);
| gpl-2.0 |
volk3/CS736 | drivers/media/rc/keymaps/rc-nebula.c | 1793 | 2381 | /* nebula.h - Keytable for nebula Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table nebula[] = {
{ 0x0000, KEY_0 },
{ 0x0001, KEY_1 },
{ 0x0002, KEY_2 },
{ 0x0003, KEY_3 },
{ 0x0004, KEY_4 },
{ 0x0005, KEY_5 },
{ 0x0006, KEY_6 },
{ 0x0007, KEY_7 },
{ 0x0008, KEY_8 },
{ 0x0009, KEY_9 },
{ 0x000a, KEY_TV },
{ 0x000b, KEY_AUX },
{ 0x000c, KEY_DVD },
{ 0x000d, KEY_POWER },
{ 0x000e, KEY_CAMERA }, /* labelled 'Picture' */
{ 0x000f, KEY_AUDIO },
{ 0x0010, KEY_INFO },
{ 0x0011, KEY_F13 }, /* 16:9 */
{ 0x0012, KEY_F14 }, /* 14:9 */
{ 0x0013, KEY_EPG },
{ 0x0014, KEY_EXIT },
{ 0x0015, KEY_MENU },
{ 0x0016, KEY_UP },
{ 0x0017, KEY_DOWN },
{ 0x0018, KEY_LEFT },
{ 0x0019, KEY_RIGHT },
{ 0x001a, KEY_ENTER },
{ 0x001b, KEY_CHANNELUP },
{ 0x001c, KEY_CHANNELDOWN },
{ 0x001d, KEY_VOLUMEUP },
{ 0x001e, KEY_VOLUMEDOWN },
{ 0x001f, KEY_RED },
{ 0x0020, KEY_GREEN },
{ 0x0021, KEY_YELLOW },
{ 0x0022, KEY_BLUE },
{ 0x0023, KEY_SUBTITLE },
{ 0x0024, KEY_F15 }, /* AD */
{ 0x0025, KEY_TEXT },
{ 0x0026, KEY_MUTE },
{ 0x0027, KEY_REWIND },
{ 0x0028, KEY_STOP },
{ 0x0029, KEY_PLAY },
{ 0x002a, KEY_FASTFORWARD },
{ 0x002b, KEY_F16 }, /* chapter */
{ 0x002c, KEY_PAUSE },
{ 0x002d, KEY_PLAY },
{ 0x002e, KEY_RECORD },
{ 0x002f, KEY_F17 }, /* picture in picture */
{ 0x0030, KEY_KPPLUS }, /* zoom in */
{ 0x0031, KEY_KPMINUS }, /* zoom out */
{ 0x0032, KEY_F18 }, /* capture */
{ 0x0033, KEY_F19 }, /* web */
{ 0x0034, KEY_EMAIL },
{ 0x0035, KEY_PHONE },
{ 0x0036, KEY_PC },
};
static struct rc_map_list nebula_map = {
.map = {
.scan = nebula,
.size = ARRAY_SIZE(nebula),
.rc_type = RC_TYPE_RC5,
.name = RC_MAP_NEBULA,
}
};
static int __init init_rc_map_nebula(void)
{
return rc_map_register(&nebula_map);
}
static void __exit exit_rc_map_nebula(void)
{
rc_map_unregister(&nebula_map);
}
module_init(init_rc_map_nebula)
module_exit(exit_rc_map_nebula)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| gpl-2.0 |
s0be/android_kernel_letv_msm8994 | drivers/gpu/drm/gma500/cdv_intel_lvds.c | 2305 | 21887 | /*
* Copyright © 2006-2011 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
#include <linux/i2c.h>
#include <linux/dmi.h>
#include <drm/drmP.h>
#include "intel_bios.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
#include "power.h"
#include <linux/pm_runtime.h>
#include "cdv_device.h"
/**
* LVDS I2C backlight control macros
*/
#define BRIGHTNESS_MAX_LEVEL 100
#define BRIGHTNESS_MASK 0xFF
#define BLC_I2C_TYPE 0x01
#define BLC_PWM_TYPT 0x02
#define BLC_POLARITY_NORMAL 0
#define BLC_POLARITY_INVERSE 1
#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
#define PSB_BLC_PWM_PRECISION_FACTOR (10)
#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
struct cdv_intel_lvds_priv {
/**
* Saved LVDO output states
*/
uint32_t savePP_ON;
uint32_t savePP_OFF;
uint32_t saveLVDS;
uint32_t savePP_CONTROL;
uint32_t savePP_CYCLE;
uint32_t savePFIT_CONTROL;
uint32_t savePFIT_PGM_RATIOS;
uint32_t saveBLC_PWM_CTL;
};
/*
* Returns the maximum level of the backlight duty cycle field.
*/
static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 retval;
if (gma_power_begin(dev, false)) {
retval = ((REG_READ(BLC_PWM_CTL) &
BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
gma_power_end(dev);
} else
retval = ((dev_priv->regs.saveBLC_PWM_CTL &
BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
return retval;
}
#if 0
/*
* Set LVDS backlight level by I2C command
*/
static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
unsigned int level)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
u8 out_buf[2];
unsigned int blc_i2c_brightness;
struct i2c_msg msgs[] = {
{
.addr = lvds_i2c_bus->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
}
};
blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
BRIGHTNESS_MASK /
BRIGHTNESS_MAX_LEVEL);
if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
out_buf[1] = (u8)blc_i2c_brightness;
if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
return 0;
DRM_ERROR("I2C transfer error\n");
return -1;
}
static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 max_pwm_blc;
u32 blc_pwm_duty_cycle;
max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
/*BLC_PWM_CTL Should be initiated while backlight device init*/
BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
REG_WRITE(BLC_PWM_CTL,
(max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
(blc_pwm_duty_cycle));
return 0;
}
/*
* Set LVDS backlight level either by I2C or PWM
*/
void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
{
struct drm_psb_private *dev_priv = dev->dev_private;
if (!dev_priv->lvds_bl) {
DRM_ERROR("NO LVDS Backlight Info\n");
return;
}
if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
cdv_lvds_i2c_set_brightness(dev, level);
else
cdv_lvds_pwm_set_brightness(dev, level);
}
#endif
/**
* Sets the backlight level.
*
* level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
*/
static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 blc_pwm_ctl;
if (gma_power_begin(dev, false)) {
blc_pwm_ctl =
REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
REG_WRITE(BLC_PWM_CTL,
(blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
gma_power_end(dev);
} else {
blc_pwm_ctl = dev_priv->regs.saveBLC_PWM_CTL &
~BACKLIGHT_DUTY_CYCLE_MASK;
dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
}
}
/**
* Sets the power state for the panel.
*/
static void cdv_intel_lvds_set_power(struct drm_device *dev,
struct drm_encoder *encoder, bool on)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 pp_status;
if (!gma_power_begin(dev, true))
return;
if (on) {
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while ((pp_status & PP_ON) == 0);
cdv_intel_lvds_set_backlight(dev,
dev_priv->mode_dev.backlight_duty_cycle);
} else {
cdv_intel_lvds_set_backlight(dev, 0);
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
~POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while (pp_status & PP_ON);
}
gma_power_end(dev);
}
static void cdv_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
if (mode == DRM_MODE_DPMS_ON)
cdv_intel_lvds_set_power(dev, encoder, true);
else
cdv_intel_lvds_set_power(dev, encoder, false);
/* XXX: We never power down the LVDS pairs. */
}
static void cdv_intel_lvds_save(struct drm_connector *connector)
{
}
static void cdv_intel_lvds_restore(struct drm_connector *connector)
{
}
static int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode =
dev_priv->mode_dev.panel_fixed_mode;
/* just in case */
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
/* just in case */
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
if (fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
}
return MODE_OK;
}
static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct drm_encoder *tmp_encoder;
struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
/* Should never happen!! */
list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
head) {
if (tmp_encoder != encoder
&& tmp_encoder->crtc == encoder->crtc) {
printk(KERN_ERR "Can't enable LVDS and another "
"encoder on the same pipe\n");
return false;
}
}
/*
* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
if (panel_fixed_mode != NULL) {
adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
adjusted_mode->htotal = panel_fixed_mode->htotal;
adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
adjusted_mode->vtotal = panel_fixed_mode->vtotal;
adjusted_mode->clock = panel_fixed_mode->clock;
drm_mode_set_crtcinfo(adjusted_mode,
CRTC_INTERLACE_HALVE_V);
}
/*
* XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the
* user's requested refresh rate.
*/
return true;
}
static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (!gma_power_begin(dev, true))
return;
mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
cdv_intel_lvds_set_power(dev, encoder, false);
gma_power_end(dev);
}
static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (mode_dev->backlight_duty_cycle == 0)
mode_dev->backlight_duty_cycle =
cdv_intel_lvds_get_max_backlight(dev);
cdv_intel_lvds_set_power(dev, encoder, true);
}
static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(
encoder->crtc);
u32 pfit_control;
/*
* The LVDS pin pair will already have been turned on in the
* cdv_intel_crtc_mode_set since it has a large impact on the DPLL
* settings.
*/
/*
* Enable automatic panel scaling so that non-native modes fill the
* screen. Should be enabled before the pipe is enabled, according to
* register description and PRM.
*/
if (mode->hdisplay != adjusted_mode->hdisplay ||
mode->vdisplay != adjusted_mode->vdisplay)
pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
else
pfit_control = 0;
pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
if (dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
REG_WRITE(PFIT_CONTROL, pfit_control);
}
/**
* Detect the LVDS connection.
*
* This always returns CONNECTOR_STATUS_CONNECTED.
* This connector should only have
* been set up if the LVDS was actually connected anyway.
*/
static enum drm_connector_status cdv_intel_lvds_detect(
struct drm_connector *connector, bool force)
{
return connector_status_connected;
}
/**
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_encoder *psb_intel_encoder =
psb_intel_attached_encoder(connector);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
int ret;
ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter);
if (ret)
return ret;
/* Didn't get an EDID, so
* Set wide sync ranges so we get all modes
* handed to valid_mode for checking
*/
connector->display_info.min_vfreq = 0;
connector->display_info.max_vfreq = 200;
connector->display_info.min_hfreq = 0;
connector->display_info.max_hfreq = 200;
if (mode_dev->panel_fixed_mode != NULL) {
struct drm_display_mode *mode =
drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
drm_mode_probed_add(connector, mode);
return 1;
}
return 0;
}
/**
* cdv_intel_lvds_destroy - unregister and free LVDS structures
* @connector: connector to free
*
* Unregister the DDC bus for this connector then free the driver private
* structure.
*/
static void cdv_intel_lvds_destroy(struct drm_connector *connector)
{
struct psb_intel_encoder *psb_intel_encoder =
psb_intel_attached_encoder(connector);
if (psb_intel_encoder->i2c_bus)
psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
static int cdv_intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
struct drm_encoder *encoder = connector->encoder;
if (!strcmp(property->name, "scaling mode") && encoder) {
struct psb_intel_crtc *crtc =
to_psb_intel_crtc(encoder->crtc);
uint64_t curValue;
if (!crtc)
return -1;
switch (value) {
case DRM_MODE_SCALE_FULLSCREEN:
break;
case DRM_MODE_SCALE_NO_SCALE:
break;
case DRM_MODE_SCALE_ASPECT:
break;
default:
return -1;
}
if (drm_object_property_get_value(&connector->base,
property,
&curValue))
return -1;
if (curValue == value)
return 0;
if (drm_object_property_set_value(&connector->base,
property,
value))
return -1;
if (crtc->saved_mode.hdisplay != 0 &&
crtc->saved_mode.vdisplay != 0) {
if (!drm_crtc_helper_set_mode(encoder->crtc,
&crtc->saved_mode,
encoder->crtc->x,
encoder->crtc->y,
encoder->crtc->fb))
return -1;
}
} else if (!strcmp(property->name, "backlight") && encoder) {
if (drm_object_property_set_value(&connector->base,
property,
value))
return -1;
else
gma_backlight_set(encoder->dev, value);
} else if (!strcmp(property->name, "DPMS") && encoder) {
struct drm_encoder_helper_funcs *helpers =
encoder->helper_private;
helpers->dpms(encoder, value);
}
return 0;
}
static const struct drm_encoder_helper_funcs
cdv_intel_lvds_helper_funcs = {
.dpms = cdv_intel_lvds_encoder_dpms,
.mode_fixup = cdv_intel_lvds_mode_fixup,
.prepare = cdv_intel_lvds_prepare,
.mode_set = cdv_intel_lvds_mode_set,
.commit = cdv_intel_lvds_commit,
};
static const struct drm_connector_helper_funcs
cdv_intel_lvds_connector_helper_funcs = {
.get_modes = cdv_intel_lvds_get_modes,
.mode_valid = cdv_intel_lvds_mode_valid,
.best_encoder = psb_intel_best_encoder,
};
static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.save = cdv_intel_lvds_save,
.restore = cdv_intel_lvds_restore,
.detect = cdv_intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = cdv_intel_lvds_set_property,
.destroy = cdv_intel_lvds_destroy,
};
static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
}
static const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
.destroy = cdv_intel_lvds_enc_destroy,
};
/*
* Enumerate the child dev array parsed from VBT to check whether
* the LVDS is present.
* If it is present, return 1.
* If it is not present, return false.
* If no child dev is parsed from VBT, it assumes that the LVDS is present.
*/
static bool lvds_is_present_in_vbt(struct drm_device *dev,
u8 *i2c_pin)
{
struct drm_psb_private *dev_priv = dev->dev_private;
int i;
if (!dev_priv->child_dev_num)
return true;
for (i = 0; i < dev_priv->child_dev_num; i++) {
struct child_device_config *child = dev_priv->child_dev + i;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
* old for compatibility with some BIOSes.
*/
if (child->device_type != DEVICE_TYPE_INT_LFP &&
child->device_type != DEVICE_TYPE_LFP)
continue;
if (child->i2c_pin)
*i2c_pin = child->i2c_pin;
/* However, we cannot trust the BIOS writers to populate
* the VBT correctly. Since LVDS requires additional
* information from AIM blocks, a non-zero addin offset is
* a good indicator that the LVDS is actually present.
*/
if (child->addin_offset)
return true;
/* But even then some BIOS writers perform some black magic
* and instantiate the device without reference to any
* additional data. Trust that if the VBT was written into
* the OpRegion then they have validated the LVDS's existence.
*/
if (dev_priv->opregion.vbt)
return true;
}
return false;
}
/**
* cdv_intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
void cdv_intel_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
struct psb_intel_encoder *psb_intel_encoder;
struct psb_intel_connector *psb_intel_connector;
struct cdv_intel_lvds_priv *lvds_priv;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan;
struct drm_crtc *crtc;
struct drm_psb_private *dev_priv = dev->dev_private;
u32 lvds;
int pipe;
u8 pin;
pin = GMBUS_PORT_PANEL;
if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return;
}
psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
GFP_KERNEL);
if (!psb_intel_encoder)
return;
psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
GFP_KERNEL);
if (!psb_intel_connector)
goto failed_connector;
lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
if (!lvds_priv)
goto failed_lvds_priv;
psb_intel_encoder->dev_priv = lvds_priv;
connector = &psb_intel_connector->base;
encoder = &psb_intel_encoder->base;
drm_connector_init(dev, connector,
&cdv_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
drm_encoder_init(dev, encoder,
&cdv_intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
psb_intel_connector_attach_encoder(psb_intel_connector,
psb_intel_encoder);
psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
drm_connector_helper_add(connector,
&cdv_intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
/*Attach connector properties*/
drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
/**
* Set up I2C bus
* FIXME: distroy i2c_bus when exit
*/
psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
GPIOB,
"LVDSBLC_B");
if (!psb_intel_encoder->i2c_bus) {
dev_printk(KERN_ERR,
&dev->pdev->dev, "I2C bus registration failed.\n");
goto failed_blc_i2c;
}
psb_intel_encoder->i2c_bus->slave_addr = 0x2C;
dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus;
/*
* LVDS discovery:
* 1) check for EDID on DDC
* 2) check for VBT data
* 3) check to see if LVDS is already on
* if none of the above, no panel
* 4) make sure lid is open
* if closed, act like it's not there for now
*/
/* Set up the DDC bus. */
psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
GPIOC,
"LVDSDDC_C");
if (!psb_intel_encoder->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev,
"DDC bus registration " "failed.\n");
goto failed_ddc;
}
/*
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
psb_intel_ddc_get_modes(connector,
&psb_intel_encoder->ddc_bus->adapter);
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, scan);
goto out; /* FIXME: check for quirks */
}
}
/* Failed to get EDID, what about VBT? do we need this?*/
if (dev_priv->lfp_lvds_vbt_mode) {
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
goto out; /* FIXME: check for quirks */
}
}
/*
* If we didn't get EDID, try checking if the panel is already turned
* on. If so, assume that whatever is currently programmed is the
* correct mode.
*/
lvds = REG_READ(LVDS);
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
mode_dev->panel_fixed_mode =
cdv_intel_crtc_mode_get(dev, crtc);
if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
goto out; /* FIXME: check for quirks */
}
}
/* If we still don't have a mode after all that, give up. */
if (!mode_dev->panel_fixed_mode) {
DRM_DEBUG
("Found no modes on the lvds, ignoring the LVDS\n");
goto failed_find;
}
/* setup PWM */
{
u32 pwm;
pwm = REG_READ(BLC_PWM_CTL2);
if (pipe == 1)
pwm |= PWM_PIPE_B;
else
pwm &= ~PWM_PIPE_B;
pwm |= PWM_ENABLE;
REG_WRITE(BLC_PWM_CTL2, pwm);
}
out:
drm_sysfs_connector_add(connector);
return;
failed_find:
printk(KERN_ERR "Failed find\n");
if (psb_intel_encoder->ddc_bus)
psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
failed_ddc:
printk(KERN_ERR "Failed DDC\n");
if (psb_intel_encoder->i2c_bus)
psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
failed_blc_i2c:
printk(KERN_ERR "Failed BLC\n");
drm_encoder_cleanup(encoder);
drm_connector_cleanup(connector);
kfree(lvds_priv);
failed_lvds_priv:
kfree(psb_intel_connector);
failed_connector:
kfree(psb_intel_encoder);
}
| gpl-2.0 |
Shmarkus/android_kernel_rockchip_rk292x | drivers/staging/iio/accel/lis3l02dq_core.c | 2305 | 20291 | /*
* lis3l02dq.c support STMicroelectronics LISD02DQ
* 3d 2g Linear Accelerometers via SPI
*
* Copyright (c) 2007 Jonathan Cameron <jic23@cam.ac.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Settings:
* 16 bit left justified mode used.
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "../iio.h"
#include "../sysfs.h"
#include "../ring_generic.h"
#include "accel.h"
#include "lis3l02dq.h"
/* At the moment the spi framework doesn't allow global setting of cs_change.
* It's in the likely to be added comment at the top of spi.h.
* This means that use cannot be made of spi_write etc.
*/
/* direct copy of the irq_default_primary_handler */
#ifndef CONFIG_IIO_RING_BUFFER
static irqreturn_t lis3l02dq_noring(int irq, void *private)
{
return IRQ_WAKE_THREAD;
}
#endif
/**
* lis3l02dq_spi_read_reg_8() - read single byte from a single register
* @indio_dev: iio_dev for this actual device
* @reg_address: the address of the register to be read
* @val: pass back the resulting value
**/
int lis3l02dq_spi_read_reg_8(struct iio_dev *indio_dev,
u8 reg_address, u8 *val)
{
struct lis3l02dq_state *st = iio_priv(indio_dev);
struct spi_message msg;
int ret;
struct spi_transfer xfer = {
.tx_buf = st->tx,
.rx_buf = st->rx,
.bits_per_word = 8,
.len = 2,
};
mutex_lock(&st->buf_lock);
st->tx[0] = LIS3L02DQ_READ_REG(reg_address);
st->tx[1] = 0;
spi_message_init(&msg);
spi_message_add_tail(&xfer, &msg);
ret = spi_sync(st->us, &msg);
*val = st->rx[1];
mutex_unlock(&st->buf_lock);
return ret;
}
/**
* lis3l02dq_spi_write_reg_8() - write single byte to a register
* @indio_dev: iio_dev for this device
* @reg_address: the address of the register to be written
* @val: the value to write
**/
int lis3l02dq_spi_write_reg_8(struct iio_dev *indio_dev,
u8 reg_address,
u8 val)
{
int ret;
struct lis3l02dq_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = LIS3L02DQ_WRITE_REG(reg_address);
st->tx[1] = val;
ret = spi_write(st->us, st->tx, 2);
mutex_unlock(&st->buf_lock);
return ret;
}
/**
* lisl302dq_spi_write_reg_s16() - write 2 bytes to a pair of registers
* @indio_dev: iio_dev for this device
* @lower_reg_address: the address of the lower of the two registers.
* Second register is assumed to have address one greater.
* @value: value to be written
**/
static int lis3l02dq_spi_write_reg_s16(struct iio_dev *indio_dev,
u8 lower_reg_address,
s16 value)
{
int ret;
struct spi_message msg;
struct lis3l02dq_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = { {
.tx_buf = st->tx,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
}, {
.tx_buf = st->tx + 2,
.bits_per_word = 8,
.len = 2,
},
};
mutex_lock(&st->buf_lock);
st->tx[0] = LIS3L02DQ_WRITE_REG(lower_reg_address);
st->tx[1] = value & 0xFF;
st->tx[2] = LIS3L02DQ_WRITE_REG(lower_reg_address + 1);
st->tx[3] = (value >> 8) & 0xFF;
spi_message_init(&msg);
spi_message_add_tail(&xfers[0], &msg);
spi_message_add_tail(&xfers[1], &msg);
ret = spi_sync(st->us, &msg);
mutex_unlock(&st->buf_lock);
return ret;
}
static int lis3l02dq_read_reg_s16(struct iio_dev *indio_dev,
u8 lower_reg_address,
int *val)
{
struct lis3l02dq_state *st = iio_priv(indio_dev);
struct spi_message msg;
int ret;
s16 tempval;
struct spi_transfer xfers[] = { {
.tx_buf = st->tx,
.rx_buf = st->rx,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
}, {
.tx_buf = st->tx + 2,
.rx_buf = st->rx + 2,
.bits_per_word = 8,
.len = 2,
},
};
mutex_lock(&st->buf_lock);
st->tx[0] = LIS3L02DQ_READ_REG(lower_reg_address);
st->tx[1] = 0;
st->tx[2] = LIS3L02DQ_READ_REG(lower_reg_address + 1);
st->tx[3] = 0;
spi_message_init(&msg);
spi_message_add_tail(&xfers[0], &msg);
spi_message_add_tail(&xfers[1], &msg);
ret = spi_sync(st->us, &msg);
if (ret) {
dev_err(&st->us->dev, "problem when reading 16 bit register");
goto error_ret;
}
tempval = (s16)(st->rx[1]) | ((s16)(st->rx[3]) << 8);
*val = tempval;
error_ret:
mutex_unlock(&st->buf_lock);
return ret;
}
enum lis3l02dq_rm_ind {
LIS3L02DQ_ACCEL,
LIS3L02DQ_GAIN,
LIS3L02DQ_BIAS,
};
static u8 lis3l02dq_axis_map[3][3] = {
[LIS3L02DQ_ACCEL] = { LIS3L02DQ_REG_OUT_X_L_ADDR,
LIS3L02DQ_REG_OUT_Y_L_ADDR,
LIS3L02DQ_REG_OUT_Z_L_ADDR },
[LIS3L02DQ_GAIN] = { LIS3L02DQ_REG_GAIN_X_ADDR,
LIS3L02DQ_REG_GAIN_Y_ADDR,
LIS3L02DQ_REG_GAIN_Z_ADDR },
[LIS3L02DQ_BIAS] = { LIS3L02DQ_REG_OFFSET_X_ADDR,
LIS3L02DQ_REG_OFFSET_Y_ADDR,
LIS3L02DQ_REG_OFFSET_Z_ADDR }
};
static int lis3l02dq_read_thresh(struct iio_dev *indio_dev,
int e,
int *val)
{
return lis3l02dq_read_reg_s16(indio_dev, LIS3L02DQ_REG_THS_L_ADDR, val);
}
static int lis3l02dq_write_thresh(struct iio_dev *indio_dev,
int event_code,
int val)
{
u16 value = val;
return lis3l02dq_spi_write_reg_s16(indio_dev,
LIS3L02DQ_REG_THS_L_ADDR,
value);
}
static int lis3l02dq_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
int ret = -EINVAL, reg;
u8 uval;
s8 sval;
switch (mask) {
case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
if (val > 255 || val < -256)
return -EINVAL;
sval = val;
reg = lis3l02dq_axis_map[LIS3L02DQ_BIAS][chan->address];
ret = lis3l02dq_spi_write_reg_8(indio_dev, reg, sval);
break;
case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
if (val & ~0xFF)
return -EINVAL;
uval = val;
reg = lis3l02dq_axis_map[LIS3L02DQ_GAIN][chan->address];
ret = lis3l02dq_spi_write_reg_8(indio_dev, reg, uval);
break;
}
return ret;
}
static int lis3l02dq_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long mask)
{
u8 utemp;
s8 stemp;
ssize_t ret = 0;
u8 reg;
switch (mask) {
case 0:
/* Take the iio_dev status lock */
mutex_lock(&indio_dev->mlock);
if (indio_dev->currentmode == INDIO_RING_TRIGGERED)
ret = lis3l02dq_read_accel_from_ring(indio_dev->ring,
chan->scan_index,
val);
else {
reg = lis3l02dq_axis_map
[LIS3L02DQ_ACCEL][chan->address];
ret = lis3l02dq_read_reg_s16(indio_dev, reg, val);
}
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
case (1 << IIO_CHAN_INFO_SCALE_SHARED):
*val = 0;
*val2 = 9580;
return IIO_VAL_INT_PLUS_MICRO;
case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
reg = lis3l02dq_axis_map[LIS3L02DQ_GAIN][chan->address];
ret = lis3l02dq_spi_read_reg_8(indio_dev, reg, &utemp);
if (ret)
goto error_ret;
/* to match with what previous code does */
*val = utemp;
return IIO_VAL_INT;
case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
reg = lis3l02dq_axis_map[LIS3L02DQ_BIAS][chan->address];
ret = lis3l02dq_spi_read_reg_8(indio_dev, reg, (u8 *)&stemp);
/* to match with what previous code does */
*val = stemp;
return IIO_VAL_INT;
}
error_ret:
return ret;
}
static ssize_t lis3l02dq_read_frequency(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
int ret, len = 0;
s8 t;
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
(u8 *)&t);
if (ret)
return ret;
t &= LIS3L02DQ_DEC_MASK;
switch (t) {
case LIS3L02DQ_REG_CTRL_1_DF_128:
len = sprintf(buf, "280\n");
break;
case LIS3L02DQ_REG_CTRL_1_DF_64:
len = sprintf(buf, "560\n");
break;
case LIS3L02DQ_REG_CTRL_1_DF_32:
len = sprintf(buf, "1120\n");
break;
case LIS3L02DQ_REG_CTRL_1_DF_8:
len = sprintf(buf, "4480\n");
break;
}
return len;
}
static ssize_t lis3l02dq_write_frequency(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
long val;
int ret;
u8 t;
ret = strict_strtol(buf, 10, &val);
if (ret)
return ret;
mutex_lock(&indio_dev->mlock);
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
&t);
if (ret)
goto error_ret_mutex;
/* Wipe the bits clean */
t &= ~LIS3L02DQ_DEC_MASK;
switch (val) {
case 280:
t |= LIS3L02DQ_REG_CTRL_1_DF_128;
break;
case 560:
t |= LIS3L02DQ_REG_CTRL_1_DF_64;
break;
case 1120:
t |= LIS3L02DQ_REG_CTRL_1_DF_32;
break;
case 4480:
t |= LIS3L02DQ_REG_CTRL_1_DF_8;
break;
default:
ret = -EINVAL;
goto error_ret_mutex;
}
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
t);
error_ret_mutex:
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
static int lis3l02dq_initial_setup(struct iio_dev *indio_dev)
{
struct lis3l02dq_state *st = iio_priv(indio_dev);
int ret;
u8 val, valtest;
st->us->mode = SPI_MODE_3;
spi_setup(st->us);
val = LIS3L02DQ_DEFAULT_CTRL1;
/* Write suitable defaults to ctrl1 */
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
val);
if (ret) {
dev_err(&st->us->dev, "problem with setup control register 1");
goto err_ret;
}
/* Repeat as sometimes doesn't work first time?*/
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
val);
if (ret) {
dev_err(&st->us->dev, "problem with setup control register 1");
goto err_ret;
}
/* Read back to check this has worked acts as loose test of correct
* chip */
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
&valtest);
if (ret || (valtest != val)) {
dev_err(&indio_dev->dev,
"device not playing ball %d %d\n", valtest, val);
ret = -EINVAL;
goto err_ret;
}
val = LIS3L02DQ_DEFAULT_CTRL2;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
val);
if (ret) {
dev_err(&st->us->dev, "problem with setup control register 2");
goto err_ret;
}
val = LIS3L02DQ_REG_WAKE_UP_CFG_LATCH_SRC;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
val);
if (ret)
dev_err(&st->us->dev, "problem with interrupt cfg register");
err_ret:
return ret;
}
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
lis3l02dq_read_frequency,
lis3l02dq_write_frequency);
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("280 560 1120 4480");
static irqreturn_t lis3l02dq_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
u8 t;
s64 timestamp = iio_get_time_ns();
lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
&t);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_HIGH)
iio_push_event(indio_dev, 0,
IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
0,
IIO_EV_MOD_Z,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
timestamp);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_LOW)
iio_push_event(indio_dev, 0,
IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
0,
IIO_EV_MOD_Z,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
timestamp);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_HIGH)
iio_push_event(indio_dev, 0,
IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
0,
IIO_EV_MOD_Y,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
timestamp);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_LOW)
iio_push_event(indio_dev, 0,
IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
0,
IIO_EV_MOD_Y,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
timestamp);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_HIGH)
iio_push_event(indio_dev, 0,
IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
0,
IIO_EV_MOD_X,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
timestamp);
if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_LOW)
iio_push_event(indio_dev, 0,
IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
0,
IIO_EV_MOD_X,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
timestamp);
/* Ack and allow for new interrupts */
lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_WAKE_UP_ACK_ADDR,
&t);
return IRQ_HANDLED;
}
#define LIS3L02DQ_INFO_MASK \
((1 << IIO_CHAN_INFO_SCALE_SHARED) | \
(1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) | \
(1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE))
#define LIS3L02DQ_EVENT_MASK \
(IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) | \
IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING))
static struct iio_chan_spec lis3l02dq_channels[] = {
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X, LIS3L02DQ_INFO_MASK,
0, 0, IIO_ST('s', 12, 16, 0), LIS3L02DQ_EVENT_MASK),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y, LIS3L02DQ_INFO_MASK,
1, 1, IIO_ST('s', 12, 16, 0), LIS3L02DQ_EVENT_MASK),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Z, LIS3L02DQ_INFO_MASK,
2, 2, IIO_ST('s', 12, 16, 0), LIS3L02DQ_EVENT_MASK),
IIO_CHAN_SOFT_TIMESTAMP(3)
};
static ssize_t lis3l02dq_read_event_config(struct iio_dev *indio_dev,
int event_code)
{
u8 val;
int ret;
u8 mask = (1 << (IIO_EVENT_CODE_EXTRACT_MODIFIER(event_code)*2 +
(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_RISING)));
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
&val);
if (ret < 0)
return ret;
return !!(val & mask);
}
int lis3l02dq_disable_all_events(struct iio_dev *indio_dev)
{
int ret;
u8 control, val;
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
&control);
control &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
control);
if (ret)
goto error_ret;
/* Also for consistency clear the mask */
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
&val);
if (ret)
goto error_ret;
val &= ~0x3f;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
val);
if (ret)
goto error_ret;
ret = control;
error_ret:
return ret;
}
static int lis3l02dq_write_event_config(struct iio_dev *indio_dev,
int event_code,
int state)
{
int ret = 0;
u8 val, control;
u8 currentlyset;
bool changed = false;
u8 mask = (1 << (IIO_EVENT_CODE_EXTRACT_MODIFIER(event_code)*2 +
(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
IIO_EV_DIR_RISING)));
mutex_lock(&indio_dev->mlock);
/* read current control */
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
&control);
if (ret)
goto error_ret;
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
&val);
if (ret < 0)
goto error_ret;
currentlyset = val & mask;
if (!currentlyset && state) {
changed = true;
val |= mask;
} else if (currentlyset && !state) {
changed = true;
val &= ~mask;
}
if (changed) {
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
val);
if (ret)
goto error_ret;
control = val & 0x3f ?
(control | LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT) :
(control & ~LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT);
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
control);
if (ret)
goto error_ret;
}
error_ret:
mutex_unlock(&indio_dev->mlock);
return ret;
}
static struct attribute *lis3l02dq_attributes[] = {
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
NULL
};
static const struct attribute_group lis3l02dq_attribute_group = {
.attrs = lis3l02dq_attributes,
};
static const struct iio_info lis3l02dq_info = {
.num_interrupt_lines = 1,
.read_raw = &lis3l02dq_read_raw,
.write_raw = &lis3l02dq_write_raw,
.read_event_value = &lis3l02dq_read_thresh,
.write_event_value = &lis3l02dq_write_thresh,
.write_event_config = &lis3l02dq_write_event_config,
.read_event_config = &lis3l02dq_read_event_config,
.driver_module = THIS_MODULE,
.attrs = &lis3l02dq_attribute_group,
};
static int __devinit lis3l02dq_probe(struct spi_device *spi)
{
int ret, regdone = 0;
struct lis3l02dq_state *st;
struct iio_dev *indio_dev;
indio_dev = iio_allocate_device(sizeof *st);
if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
st = iio_priv(indio_dev);
/* this is only used tor removal purposes */
spi_set_drvdata(spi, st);
st->us = spi;
mutex_init(&st->buf_lock);
indio_dev->name = spi->dev.driver->name;
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &lis3l02dq_info;
indio_dev->channels = lis3l02dq_channels;
indio_dev->num_channels = ARRAY_SIZE(lis3l02dq_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
ret = lis3l02dq_configure_ring(indio_dev);
if (ret)
goto error_free_dev;
ret = iio_device_register(indio_dev);
if (ret)
goto error_unreg_ring_funcs;
regdone = 1;
ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
lis3l02dq_channels,
ARRAY_SIZE(lis3l02dq_channels));
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
goto error_unreg_ring_funcs;
}
if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
ret = request_threaded_irq(st->us->irq,
&lis3l02dq_th,
&lis3l02dq_event_handler,
IRQF_TRIGGER_RISING,
"lis3l02dq",
indio_dev);
if (ret)
goto error_uninitialize_ring;
ret = lis3l02dq_probe_trigger(indio_dev);
if (ret)
goto error_free_interrupt;
}
/* Get the device into a sane initial state */
ret = lis3l02dq_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
return 0;
error_remove_trigger:
if (indio_dev->modes & INDIO_RING_TRIGGERED)
lis3l02dq_remove_trigger(indio_dev);
error_free_interrupt:
if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
free_irq(st->us->irq, indio_dev);
error_uninitialize_ring:
iio_ring_buffer_unregister(indio_dev->ring);
error_unreg_ring_funcs:
lis3l02dq_unconfigure_ring(indio_dev);
error_free_dev:
if (regdone)
iio_device_unregister(indio_dev);
else
iio_free_device(indio_dev);
error_ret:
return ret;
}
/* Power down the device */
static int lis3l02dq_stop_device(struct iio_dev *indio_dev)
{
int ret;
struct lis3l02dq_state *st = iio_priv(indio_dev);
u8 val = 0;
mutex_lock(&indio_dev->mlock);
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
val);
if (ret) {
dev_err(&st->us->dev, "problem with turning device off: ctrl1");
goto err_ret;
}
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
val);
if (ret)
dev_err(&st->us->dev, "problem with turning device off: ctrl2");
err_ret:
mutex_unlock(&indio_dev->mlock);
return ret;
}
/* fixme, confirm ordering in this function */
static int lis3l02dq_remove(struct spi_device *spi)
{
int ret;
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct lis3l02dq_state *st = iio_priv(indio_dev);
ret = lis3l02dq_disable_all_events(indio_dev);
if (ret)
goto err_ret;
ret = lis3l02dq_stop_device(indio_dev);
if (ret)
goto err_ret;
if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
free_irq(st->us->irq, indio_dev);
lis3l02dq_remove_trigger(indio_dev);
iio_ring_buffer_unregister(indio_dev->ring);
lis3l02dq_unconfigure_ring(indio_dev);
iio_device_unregister(indio_dev);
return 0;
err_ret:
return ret;
}
static struct spi_driver lis3l02dq_driver = {
.driver = {
.name = "lis3l02dq",
.owner = THIS_MODULE,
},
.probe = lis3l02dq_probe,
.remove = __devexit_p(lis3l02dq_remove),
};
static __init int lis3l02dq_init(void)
{
return spi_register_driver(&lis3l02dq_driver);
}
module_init(lis3l02dq_init);
static __exit void lis3l02dq_exit(void)
{
spi_unregister_driver(&lis3l02dq_driver);
}
module_exit(lis3l02dq_exit);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("ST LIS3L02DQ Accelerometer SPI driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
CyanideL/android_kernel_moto_shamu | drivers/gpu/drm/nouveau/core/core/parent.c | 2561 | 3307 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/object.h>
#include <core/parent.h>
#include <core/client.h>
int
nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
struct nouveau_object **pengine,
struct nouveau_oclass **poclass)
{
struct nouveau_sclass *sclass;
struct nouveau_engine *engine;
struct nouveau_oclass *oclass;
u64 mask;
sclass = nv_parent(parent)->sclass;
while (sclass) {
if ((sclass->oclass->handle & 0xffff) == handle) {
*pengine = parent->engine;
*poclass = sclass->oclass;
return 0;
}
sclass = sclass->sclass;
}
mask = nv_parent(parent)->engine;
while (mask) {
int i = ffsll(mask) - 1;
if (nv_iclass(parent, NV_CLIENT_CLASS))
engine = nv_engine(nv_client(parent)->device);
else
engine = nouveau_engine(parent, i);
if (engine) {
oclass = engine->sclass;
while (oclass->ofuncs) {
if ((oclass->handle & 0xffff) == handle) {
*pengine = nv_object(engine);
*poclass = oclass;
return 0;
}
oclass++;
}
}
mask &= ~(1ULL << i);
}
return -EINVAL;
}
int
nouveau_parent_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, u32 pclass,
struct nouveau_oclass *sclass, u64 engcls,
int size, void **pobject)
{
struct nouveau_parent *object;
struct nouveau_sclass *nclass;
int ret;
ret = nouveau_object_create_(parent, engine, oclass, pclass |
NV_PARENT_CLASS, size, pobject);
object = *pobject;
if (ret)
return ret;
while (sclass && sclass->ofuncs) {
nclass = kzalloc(sizeof(*nclass), GFP_KERNEL);
if (!nclass)
return -ENOMEM;
nclass->sclass = object->sclass;
object->sclass = nclass;
nclass->engine = engine ? nv_engine(engine) : NULL;
nclass->oclass = sclass;
sclass++;
}
object->engine = engcls;
return 0;
}
void
nouveau_parent_destroy(struct nouveau_parent *parent)
{
struct nouveau_sclass *sclass;
while ((sclass = parent->sclass)) {
parent->sclass = sclass->sclass;
kfree(sclass);
}
nouveau_object_destroy(&parent->base);
}
void
_nouveau_parent_dtor(struct nouveau_object *object)
{
nouveau_parent_destroy(nv_parent(object));
}
| gpl-2.0 |
TamsuiCM11/android_kernel_sony_msm7x27a | arch/powerpc/kernel/align.c | 3841 | 25063 | /* align.c - handle alignment exceptions for the Power PC.
*
* Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
* Copyright (c) 1998-1999 TiVo, Inc.
* PowerPC 403GCX modifications.
* Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
* PowerPC 403GCX/405GP modifications.
* Copyright (c) 2001-2002 PPC64 team, IBM Corp
* 64-bit and Power4 support
* Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp
* <benh@kernel.crashing.org>
* Merge ppc32 and ppc64 implementations
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/cache.h>
#include <asm/cputable.h>
#include <asm/emulated_ops.h>
#include <asm/switch_to.h>
struct aligninfo {
unsigned char len;
unsigned char flags;
};
#define IS_XFORM(inst) (((inst) >> 26) == 31)
#define IS_DSFORM(inst) (((inst) >> 26) >= 56)
#define INVALID { 0, 0 }
/* Bits in the flags field */
#define LD 0 /* load */
#define ST 1 /* store */
#define SE 2 /* sign-extend value, or FP ld/st as word */
#define F 4 /* to/from fp regs */
#define U 8 /* update index register */
#define M 0x10 /* multiple load/store */
#define SW 0x20 /* byte swap */
#define S 0x40 /* single-precision fp or... */
#define SX 0x40 /* ... byte count in XER */
#define HARD 0x80 /* string, stwcx. */
#define E4 0x40 /* SPE endianness is word */
#define E8 0x80 /* SPE endianness is double word */
#define SPLT 0x80 /* VSX SPLAT load */
/* DSISR bits reported for a DCBZ instruction: */
#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
#define SWAP(a, b) (t = (a), (a) = (b), (b) = t)
/*
* The PowerPC stores certain bits of the instruction that caused the
* alignment exception in the DSISR register. This array maps those
* bits to information about the operand length and what the
* instruction would do.
*/
static struct aligninfo aligninfo[128] = {
{ 4, LD }, /* 00 0 0000: lwz / lwarx */
INVALID, /* 00 0 0001 */
{ 4, ST }, /* 00 0 0010: stw */
INVALID, /* 00 0 0011 */
{ 2, LD }, /* 00 0 0100: lhz */
{ 2, LD+SE }, /* 00 0 0101: lha */
{ 2, ST }, /* 00 0 0110: sth */
{ 4, LD+M }, /* 00 0 0111: lmw */
{ 4, LD+F+S }, /* 00 0 1000: lfs */
{ 8, LD+F }, /* 00 0 1001: lfd */
{ 4, ST+F+S }, /* 00 0 1010: stfs */
{ 8, ST+F }, /* 00 0 1011: stfd */
INVALID, /* 00 0 1100 */
{ 8, LD }, /* 00 0 1101: ld/ldu/lwa */
INVALID, /* 00 0 1110 */
{ 8, ST }, /* 00 0 1111: std/stdu */
{ 4, LD+U }, /* 00 1 0000: lwzu */
INVALID, /* 00 1 0001 */
{ 4, ST+U }, /* 00 1 0010: stwu */
INVALID, /* 00 1 0011 */
{ 2, LD+U }, /* 00 1 0100: lhzu */
{ 2, LD+SE+U }, /* 00 1 0101: lhau */
{ 2, ST+U }, /* 00 1 0110: sthu */
{ 4, ST+M }, /* 00 1 0111: stmw */
{ 4, LD+F+S+U }, /* 00 1 1000: lfsu */
{ 8, LD+F+U }, /* 00 1 1001: lfdu */
{ 4, ST+F+S+U }, /* 00 1 1010: stfsu */
{ 8, ST+F+U }, /* 00 1 1011: stfdu */
{ 16, LD+F }, /* 00 1 1100: lfdp */
INVALID, /* 00 1 1101 */
{ 16, ST+F }, /* 00 1 1110: stfdp */
INVALID, /* 00 1 1111 */
{ 8, LD }, /* 01 0 0000: ldx */
INVALID, /* 01 0 0001 */
{ 8, ST }, /* 01 0 0010: stdx */
INVALID, /* 01 0 0011 */
INVALID, /* 01 0 0100 */
{ 4, LD+SE }, /* 01 0 0101: lwax */
INVALID, /* 01 0 0110 */
INVALID, /* 01 0 0111 */
{ 4, LD+M+HARD+SX }, /* 01 0 1000: lswx */
{ 4, LD+M+HARD }, /* 01 0 1001: lswi */
{ 4, ST+M+HARD+SX }, /* 01 0 1010: stswx */
{ 4, ST+M+HARD }, /* 01 0 1011: stswi */
INVALID, /* 01 0 1100 */
{ 8, LD+U }, /* 01 0 1101: ldu */
INVALID, /* 01 0 1110 */
{ 8, ST+U }, /* 01 0 1111: stdu */
{ 8, LD+U }, /* 01 1 0000: ldux */
INVALID, /* 01 1 0001 */
{ 8, ST+U }, /* 01 1 0010: stdux */
INVALID, /* 01 1 0011 */
INVALID, /* 01 1 0100 */
{ 4, LD+SE+U }, /* 01 1 0101: lwaux */
INVALID, /* 01 1 0110 */
INVALID, /* 01 1 0111 */
INVALID, /* 01 1 1000 */
INVALID, /* 01 1 1001 */
INVALID, /* 01 1 1010 */
INVALID, /* 01 1 1011 */
INVALID, /* 01 1 1100 */
INVALID, /* 01 1 1101 */
INVALID, /* 01 1 1110 */
INVALID, /* 01 1 1111 */
INVALID, /* 10 0 0000 */
INVALID, /* 10 0 0001 */
INVALID, /* 10 0 0010: stwcx. */
INVALID, /* 10 0 0011 */
INVALID, /* 10 0 0100 */
INVALID, /* 10 0 0101 */
INVALID, /* 10 0 0110 */
INVALID, /* 10 0 0111 */
{ 4, LD+SW }, /* 10 0 1000: lwbrx */
INVALID, /* 10 0 1001 */
{ 4, ST+SW }, /* 10 0 1010: stwbrx */
INVALID, /* 10 0 1011 */
{ 2, LD+SW }, /* 10 0 1100: lhbrx */
{ 4, LD+SE }, /* 10 0 1101 lwa */
{ 2, ST+SW }, /* 10 0 1110: sthbrx */
INVALID, /* 10 0 1111 */
INVALID, /* 10 1 0000 */
INVALID, /* 10 1 0001 */
INVALID, /* 10 1 0010 */
INVALID, /* 10 1 0011 */
INVALID, /* 10 1 0100 */
INVALID, /* 10 1 0101 */
INVALID, /* 10 1 0110 */
INVALID, /* 10 1 0111 */
INVALID, /* 10 1 1000 */
INVALID, /* 10 1 1001 */
INVALID, /* 10 1 1010 */
INVALID, /* 10 1 1011 */
INVALID, /* 10 1 1100 */
INVALID, /* 10 1 1101 */
INVALID, /* 10 1 1110 */
{ 0, ST+HARD }, /* 10 1 1111: dcbz */
{ 4, LD }, /* 11 0 0000: lwzx */
INVALID, /* 11 0 0001 */
{ 4, ST }, /* 11 0 0010: stwx */
INVALID, /* 11 0 0011 */
{ 2, LD }, /* 11 0 0100: lhzx */
{ 2, LD+SE }, /* 11 0 0101: lhax */
{ 2, ST }, /* 11 0 0110: sthx */
INVALID, /* 11 0 0111 */
{ 4, LD+F+S }, /* 11 0 1000: lfsx */
{ 8, LD+F }, /* 11 0 1001: lfdx */
{ 4, ST+F+S }, /* 11 0 1010: stfsx */
{ 8, ST+F }, /* 11 0 1011: stfdx */
{ 16, LD+F }, /* 11 0 1100: lfdpx */
{ 4, LD+F+SE }, /* 11 0 1101: lfiwax */
{ 16, ST+F }, /* 11 0 1110: stfdpx */
{ 4, ST+F }, /* 11 0 1111: stfiwx */
{ 4, LD+U }, /* 11 1 0000: lwzux */
INVALID, /* 11 1 0001 */
{ 4, ST+U }, /* 11 1 0010: stwux */
INVALID, /* 11 1 0011 */
{ 2, LD+U }, /* 11 1 0100: lhzux */
{ 2, LD+SE+U }, /* 11 1 0101: lhaux */
{ 2, ST+U }, /* 11 1 0110: sthux */
INVALID, /* 11 1 0111 */
{ 4, LD+F+S+U }, /* 11 1 1000: lfsux */
{ 8, LD+F+U }, /* 11 1 1001: lfdux */
{ 4, ST+F+S+U }, /* 11 1 1010: stfsux */
{ 8, ST+F+U }, /* 11 1 1011: stfdux */
INVALID, /* 11 1 1100 */
{ 4, LD+F }, /* 11 1 1101: lfiwzx */
INVALID, /* 11 1 1110 */
INVALID, /* 11 1 1111 */
};
/*
* Create a DSISR value from the instruction
*/
static inline unsigned make_dsisr(unsigned instr)
{
unsigned dsisr;
/* bits 6:15 --> 22:31 */
dsisr = (instr & 0x03ff0000) >> 16;
if (IS_XFORM(instr)) {
/* bits 29:30 --> 15:16 */
dsisr |= (instr & 0x00000006) << 14;
/* bit 25 --> 17 */
dsisr |= (instr & 0x00000040) << 8;
/* bits 21:24 --> 18:21 */
dsisr |= (instr & 0x00000780) << 3;
} else {
/* bit 5 --> 17 */
dsisr |= (instr & 0x04000000) >> 12;
/* bits 1: 4 --> 18:21 */
dsisr |= (instr & 0x78000000) >> 17;
/* bits 30:31 --> 12:13 */
if (IS_DSFORM(instr))
dsisr |= (instr & 0x00000003) << 18;
}
return dsisr;
}
/*
* The dcbz (data cache block zero) instruction
* gives an alignment fault if used on non-cacheable
* memory. We handle the fault mainly for the
* case when we are running with the cache disabled
* for debugging.
*/
static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
{
long __user *p;
int i, size;
#ifdef __powerpc64__
size = ppc64_caches.dline_size;
#else
size = L1_CACHE_BYTES;
#endif
p = (long __user *) (regs->dar & -size);
if (user_mode(regs) && !access_ok(VERIFY_WRITE, p, size))
return -EFAULT;
for (i = 0; i < size / sizeof(long); ++i)
if (__put_user_inatomic(0, p+i))
return -EFAULT;
return 1;
}
/*
* Emulate load & store multiple instructions
* On 64-bit machines, these instructions only affect/use the
* bottom 4 bytes of each register, and the loads clear the
* top 4 bytes of the affected register.
*/
#ifdef CONFIG_PPC64
#define REG_BYTE(rp, i) *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4)
#else
#define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
#endif
#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
unsigned int reg, unsigned int nb,
unsigned int flags, unsigned int instr,
unsigned long swiz)
{
unsigned long *rptr;
unsigned int nb0, i, bswiz;
unsigned long p;
/*
* We do not try to emulate 8 bytes multiple as they aren't really
* available in our operating environments and we don't try to
* emulate multiples operations in kernel land as they should never
* be used/generated there at least not on unaligned boundaries
*/
if (unlikely((nb > 4) || !user_mode(regs)))
return 0;
/* lmw, stmw, lswi/x, stswi/x */
nb0 = 0;
if (flags & HARD) {
if (flags & SX) {
nb = regs->xer & 127;
if (nb == 0)
return 1;
} else {
unsigned long pc = regs->nip ^ (swiz & 4);
if (__get_user_inatomic(instr,
(unsigned int __user *)pc))
return -EFAULT;
if (swiz == 0 && (flags & SW))
instr = cpu_to_le32(instr);
nb = (instr >> 11) & 0x1f;
if (nb == 0)
nb = 32;
}
if (nb + reg * 4 > 128) {
nb0 = nb + reg * 4 - 128;
nb = 128 - reg * 4;
}
} else {
/* lwm, stmw */
nb = (32 - reg) * 4;
}
if (!access_ok((flags & ST ? VERIFY_WRITE: VERIFY_READ), addr, nb+nb0))
return -EFAULT; /* bad address */
rptr = ®s->gpr[reg];
p = (unsigned long) addr;
bswiz = (flags & SW)? 3: 0;
if (!(flags & ST)) {
/*
* This zeroes the top 4 bytes of the affected registers
* in 64-bit mode, and also zeroes out any remaining
* bytes of the last register for lsw*.
*/
memset(rptr, 0, ((nb + 3) / 4) * sizeof(unsigned long));
if (nb0 > 0)
memset(®s->gpr[0], 0,
((nb0 + 3) / 4) * sizeof(unsigned long));
for (i = 0; i < nb; ++i, ++p)
if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = ®s->gpr[0];
addr += nb;
for (i = 0; i < nb0; ++i, ++p)
if (__get_user_inatomic(REG_BYTE(rptr,
i ^ bswiz),
SWIZ_PTR(p)))
return -EFAULT;
}
} else {
for (i = 0; i < nb; ++i, ++p)
if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = ®s->gpr[0];
addr += nb;
for (i = 0; i < nb0; ++i, ++p)
if (__put_user_inatomic(REG_BYTE(rptr,
i ^ bswiz),
SWIZ_PTR(p)))
return -EFAULT;
}
}
return 1;
}
/*
* Emulate floating-point pair loads and stores.
* Only POWER6 has these instructions, and it does true little-endian,
* so we don't need the address swizzling.
*/
static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
unsigned int flags)
{
char *ptr0 = (char *) ¤t->thread.TS_FPR(reg);
char *ptr1 = (char *) ¤t->thread.TS_FPR(reg+1);
int i, ret, sw = 0;
if (!(flags & F))
return 0;
if (reg & 1)
return 0; /* invalid form: FRS/FRT must be even */
if (flags & SW)
sw = 7;
ret = 0;
for (i = 0; i < 8; ++i) {
if (!(flags & ST)) {
ret |= __get_user(ptr0[i^sw], addr + i);
ret |= __get_user(ptr1[i^sw], addr + i + 8);
} else {
ret |= __put_user(ptr0[i^sw], addr + i);
ret |= __put_user(ptr1[i^sw], addr + i + 8);
}
}
if (ret)
return -EFAULT;
return 1; /* exception handled and fixed up */
}
#ifdef CONFIG_SPE
static struct aligninfo spe_aligninfo[32] = {
{ 8, LD+E8 }, /* 0 00 00: evldd[x] */
{ 8, LD+E4 }, /* 0 00 01: evldw[x] */
{ 8, LD }, /* 0 00 10: evldh[x] */
INVALID, /* 0 00 11 */
{ 2, LD }, /* 0 01 00: evlhhesplat[x] */
INVALID, /* 0 01 01 */
{ 2, LD }, /* 0 01 10: evlhhousplat[x] */
{ 2, LD+SE }, /* 0 01 11: evlhhossplat[x] */
{ 4, LD }, /* 0 10 00: evlwhe[x] */
INVALID, /* 0 10 01 */
{ 4, LD }, /* 0 10 10: evlwhou[x] */
{ 4, LD+SE }, /* 0 10 11: evlwhos[x] */
{ 4, LD+E4 }, /* 0 11 00: evlwwsplat[x] */
INVALID, /* 0 11 01 */
{ 4, LD }, /* 0 11 10: evlwhsplat[x] */
INVALID, /* 0 11 11 */
{ 8, ST+E8 }, /* 1 00 00: evstdd[x] */
{ 8, ST+E4 }, /* 1 00 01: evstdw[x] */
{ 8, ST }, /* 1 00 10: evstdh[x] */
INVALID, /* 1 00 11 */
INVALID, /* 1 01 00 */
INVALID, /* 1 01 01 */
INVALID, /* 1 01 10 */
INVALID, /* 1 01 11 */
{ 4, ST }, /* 1 10 00: evstwhe[x] */
INVALID, /* 1 10 01 */
{ 4, ST }, /* 1 10 10: evstwho[x] */
INVALID, /* 1 10 11 */
{ 4, ST+E4 }, /* 1 11 00: evstwwe[x] */
INVALID, /* 1 11 01 */
{ 4, ST+E4 }, /* 1 11 10: evstwwo[x] */
INVALID, /* 1 11 11 */
};
#define EVLDD 0x00
#define EVLDW 0x01
#define EVLDH 0x02
#define EVLHHESPLAT 0x04
#define EVLHHOUSPLAT 0x06
#define EVLHHOSSPLAT 0x07
#define EVLWHE 0x08
#define EVLWHOU 0x0A
#define EVLWHOS 0x0B
#define EVLWWSPLAT 0x0C
#define EVLWHSPLAT 0x0E
#define EVSTDD 0x10
#define EVSTDW 0x11
#define EVSTDH 0x12
#define EVSTWHE 0x18
#define EVSTWHO 0x1A
#define EVSTWWE 0x1C
#define EVSTWWO 0x1E
/*
* Emulate SPE loads and stores.
* Only Book-E has these instructions, and it does true little-endian,
* so we don't need the address swizzling.
*/
static int emulate_spe(struct pt_regs *regs, unsigned int reg,
unsigned int instr)
{
int t, ret;
union {
u64 ll;
u32 w[2];
u16 h[4];
u8 v[8];
} data, temp;
unsigned char __user *p, *addr;
unsigned long *evr = ¤t->thread.evr[reg];
unsigned int nb, flags;
instr = (instr >> 1) & 0x1f;
/* DAR has the operand effective address */
addr = (unsigned char __user *)regs->dar;
nb = spe_aligninfo[instr].len;
flags = spe_aligninfo[instr].flags;
/* Verify the address of the operand */
if (unlikely(user_mode(regs) &&
!access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ),
addr, nb)))
return -EFAULT;
/* userland only */
if (unlikely(!user_mode(regs)))
return 0;
flush_spe_to_thread(current);
/* If we are loading, get the data from user space, else
* get it from register values
*/
if (flags & ST) {
data.ll = 0;
switch (instr) {
case EVSTDD:
case EVSTDW:
case EVSTDH:
data.w[0] = *evr;
data.w[1] = regs->gpr[reg];
break;
case EVSTWHE:
data.h[2] = *evr >> 16;
data.h[3] = regs->gpr[reg] >> 16;
break;
case EVSTWHO:
data.h[2] = *evr & 0xffff;
data.h[3] = regs->gpr[reg] & 0xffff;
break;
case EVSTWWE:
data.w[1] = *evr;
break;
case EVSTWWO:
data.w[1] = regs->gpr[reg];
break;
default:
return -EINVAL;
}
} else {
temp.ll = data.ll = 0;
ret = 0;
p = addr;
switch (nb) {
case 8:
ret |= __get_user_inatomic(temp.v[0], p++);
ret |= __get_user_inatomic(temp.v[1], p++);
ret |= __get_user_inatomic(temp.v[2], p++);
ret |= __get_user_inatomic(temp.v[3], p++);
case 4:
ret |= __get_user_inatomic(temp.v[4], p++);
ret |= __get_user_inatomic(temp.v[5], p++);
case 2:
ret |= __get_user_inatomic(temp.v[6], p++);
ret |= __get_user_inatomic(temp.v[7], p++);
if (unlikely(ret))
return -EFAULT;
}
switch (instr) {
case EVLDD:
case EVLDW:
case EVLDH:
data.ll = temp.ll;
break;
case EVLHHESPLAT:
data.h[0] = temp.h[3];
data.h[2] = temp.h[3];
break;
case EVLHHOUSPLAT:
case EVLHHOSSPLAT:
data.h[1] = temp.h[3];
data.h[3] = temp.h[3];
break;
case EVLWHE:
data.h[0] = temp.h[2];
data.h[2] = temp.h[3];
break;
case EVLWHOU:
case EVLWHOS:
data.h[1] = temp.h[2];
data.h[3] = temp.h[3];
break;
case EVLWWSPLAT:
data.w[0] = temp.w[1];
data.w[1] = temp.w[1];
break;
case EVLWHSPLAT:
data.h[0] = temp.h[2];
data.h[1] = temp.h[2];
data.h[2] = temp.h[3];
data.h[3] = temp.h[3];
break;
default:
return -EINVAL;
}
}
if (flags & SW) {
switch (flags & 0xf0) {
case E8:
SWAP(data.v[0], data.v[7]);
SWAP(data.v[1], data.v[6]);
SWAP(data.v[2], data.v[5]);
SWAP(data.v[3], data.v[4]);
break;
case E4:
SWAP(data.v[0], data.v[3]);
SWAP(data.v[1], data.v[2]);
SWAP(data.v[4], data.v[7]);
SWAP(data.v[5], data.v[6]);
break;
/* Its half word endian */
default:
SWAP(data.v[0], data.v[1]);
SWAP(data.v[2], data.v[3]);
SWAP(data.v[4], data.v[5]);
SWAP(data.v[6], data.v[7]);
break;
}
}
if (flags & SE) {
data.w[0] = (s16)data.h[1];
data.w[1] = (s16)data.h[3];
}
/* Store result to memory or update registers */
if (flags & ST) {
ret = 0;
p = addr;
switch (nb) {
case 8:
ret |= __put_user_inatomic(data.v[0], p++);
ret |= __put_user_inatomic(data.v[1], p++);
ret |= __put_user_inatomic(data.v[2], p++);
ret |= __put_user_inatomic(data.v[3], p++);
case 4:
ret |= __put_user_inatomic(data.v[4], p++);
ret |= __put_user_inatomic(data.v[5], p++);
case 2:
ret |= __put_user_inatomic(data.v[6], p++);
ret |= __put_user_inatomic(data.v[7], p++);
}
if (unlikely(ret))
return -EFAULT;
} else {
*evr = data.w[0];
regs->gpr[reg] = data.w[1];
}
return 1;
}
#endif /* CONFIG_SPE */
#ifdef CONFIG_VSX
/*
* Emulate VSX instructions...
*/
static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
unsigned int areg, struct pt_regs *regs,
unsigned int flags, unsigned int length,
unsigned int elsize)
{
char *ptr;
unsigned long *lptr;
int ret = 0;
int sw = 0;
int i, j;
flush_vsx_to_thread(current);
if (reg < 32)
ptr = (char *) ¤t->thread.TS_FPR(reg);
else
ptr = (char *) ¤t->thread.vr[reg - 32];
lptr = (unsigned long *) ptr;
if (flags & SW)
sw = elsize-1;
for (j = 0; j < length; j += elsize) {
for (i = 0; i < elsize; ++i) {
if (flags & ST)
ret |= __put_user(ptr[i^sw], addr + i);
else
ret |= __get_user(ptr[i^sw], addr + i);
}
ptr += elsize;
addr += elsize;
}
if (!ret) {
if (flags & U)
regs->gpr[areg] = regs->dar;
/* Splat load copies the same data to top and bottom 8 bytes */
if (flags & SPLT)
lptr[1] = lptr[0];
/* For 8 byte loads, zero the top 8 bytes */
else if (!(flags & ST) && (8 == length))
lptr[1] = 0;
} else
return -EFAULT;
return 1;
}
#endif
/*
* Called on alignment exception. Attempts to fixup
*
* Return 1 on success
* Return 0 if unable to handle the interrupt
* Return -EFAULT if data address is bad
*/
int fix_alignment(struct pt_regs *regs)
{
unsigned int instr, nb, flags, instruction = 0;
unsigned int reg, areg;
unsigned int dsisr;
unsigned char __user *addr;
unsigned long p, swiz;
int ret, t;
union {
u64 ll;
double dd;
unsigned char v[8];
struct {
unsigned hi32;
int low32;
} x32;
struct {
unsigned char hi48[6];
short low16;
} x16;
} data;
/*
* We require a complete register set, if not, then our assembly
* is broken
*/
CHECK_FULL_REGS(regs);
dsisr = regs->dsisr;
/* Some processors don't provide us with a DSISR we can use here,
* let's make one up from the instruction
*/
if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) {
unsigned long pc = regs->nip;
if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE))
pc ^= 4;
if (unlikely(__get_user_inatomic(instr,
(unsigned int __user *)pc)))
return -EFAULT;
if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE))
instr = cpu_to_le32(instr);
dsisr = make_dsisr(instr);
instruction = instr;
}
/* extract the operation and registers from the dsisr */
reg = (dsisr >> 5) & 0x1f; /* source/dest register */
areg = dsisr & 0x1f; /* register to update */
#ifdef CONFIG_SPE
if ((instr >> 26) == 0x4) {
PPC_WARN_ALIGNMENT(spe, regs);
return emulate_spe(regs, reg, instr);
}
#endif
instr = (dsisr >> 10) & 0x7f;
instr |= (dsisr >> 13) & 0x60;
/* Lookup the operation in our table */
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
/* Byteswap little endian loads and stores */
swiz = 0;
if (regs->msr & MSR_LE) {
flags ^= SW;
/*
* So-called "PowerPC little endian" mode works by
* swizzling addresses rather than by actually doing
* any byte-swapping. To emulate this, we XOR each
* byte address with 7. We also byte-swap, because
* the processor's address swizzling depends on the
* operand size (it xors the address with 7 for bytes,
* 6 for halfwords, 4 for words, 0 for doublewords) but
* we will xor with 7 and load/store each byte separately.
*/
if (cpu_has_feature(CPU_FTR_PPC_LE))
swiz = 7;
}
/* DAR has the operand effective address */
addr = (unsigned char __user *)regs->dar;
#ifdef CONFIG_VSX
if ((instruction & 0xfc00003e) == 0x7c000018) {
unsigned int elsize;
/* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */
reg |= (instruction & 0x1) << 5;
/* Simple inline decoder instead of a table */
/* VSX has only 8 and 16 byte memory accesses */
nb = 8;
if (instruction & 0x200)
nb = 16;
/* Vector stores in little-endian mode swap individual
elements, so process them separately */
elsize = 4;
if (instruction & 0x80)
elsize = 8;
flags = 0;
if (regs->msr & MSR_LE)
flags |= SW;
if (instruction & 0x100)
flags |= ST;
if (instruction & 0x040)
flags |= U;
/* splat load needs a special decoder */
if ((instruction & 0x400) == 0){
flags |= SPLT;
nb = 8;
}
PPC_WARN_ALIGNMENT(vsx, regs);
return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
}
#endif
/* A size of 0 indicates an instruction we don't support, with
* the exception of DCBZ which is handled as a special case here
*/
if (instr == DCBZ) {
PPC_WARN_ALIGNMENT(dcbz, regs);
return emulate_dcbz(regs, addr);
}
if (unlikely(nb == 0))
return 0;
/* Load/Store Multiple instructions are handled in their own
* function
*/
if (flags & M) {
PPC_WARN_ALIGNMENT(multiple, regs);
return emulate_multiple(regs, addr, reg, nb,
flags, instr, swiz);
}
/* Verify the address of the operand */
if (unlikely(user_mode(regs) &&
!access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ),
addr, nb)))
return -EFAULT;
/* Force the fprs into the save area so we can reference them */
if (flags & F) {
/* userland only */
if (unlikely(!user_mode(regs)))
return 0;
flush_fp_to_thread(current);
}
/* Special case for 16-byte FP loads and stores */
if (nb == 16) {
PPC_WARN_ALIGNMENT(fp_pair, regs);
return emulate_fp_pair(addr, reg, flags);
}
PPC_WARN_ALIGNMENT(unaligned, regs);
/* If we are loading, get the data from user space, else
* get it from register values
*/
if (!(flags & ST)) {
data.ll = 0;
ret = 0;
p = (unsigned long) addr;
switch (nb) {
case 8:
ret |= __get_user_inatomic(data.v[0], SWIZ_PTR(p++));
ret |= __get_user_inatomic(data.v[1], SWIZ_PTR(p++));
ret |= __get_user_inatomic(data.v[2], SWIZ_PTR(p++));
ret |= __get_user_inatomic(data.v[3], SWIZ_PTR(p++));
case 4:
ret |= __get_user_inatomic(data.v[4], SWIZ_PTR(p++));
ret |= __get_user_inatomic(data.v[5], SWIZ_PTR(p++));
case 2:
ret |= __get_user_inatomic(data.v[6], SWIZ_PTR(p++));
ret |= __get_user_inatomic(data.v[7], SWIZ_PTR(p++));
if (unlikely(ret))
return -EFAULT;
}
} else if (flags & F) {
data.dd = current->thread.TS_FPR(reg);
if (flags & S) {
/* Single-precision FP store requires conversion... */
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
cvt_df(&data.dd, (float *)&data.v[4]);
preempt_enable();
#else
return 0;
#endif
}
} else
data.ll = regs->gpr[reg];
if (flags & SW) {
switch (nb) {
case 8:
SWAP(data.v[0], data.v[7]);
SWAP(data.v[1], data.v[6]);
SWAP(data.v[2], data.v[5]);
SWAP(data.v[3], data.v[4]);
break;
case 4:
SWAP(data.v[4], data.v[7]);
SWAP(data.v[5], data.v[6]);
break;
case 2:
SWAP(data.v[6], data.v[7]);
break;
}
}
/* Perform other misc operations like sign extension
* or floating point single precision conversion
*/
switch (flags & ~(U|SW)) {
case LD+SE: /* sign extending integer loads */
case LD+F+SE: /* sign extend for lfiwax */
if ( nb == 2 )
data.ll = data.x16.low16;
else /* nb must be 4 */
data.ll = data.x32.low32;
break;
/* Single-precision FP load requires conversion... */
case LD+F+S:
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
cvt_fd((float *)&data.v[4], &data.dd);
preempt_enable();
#else
return 0;
#endif
break;
}
/* Store result to memory or update registers */
if (flags & ST) {
ret = 0;
p = (unsigned long) addr;
switch (nb) {
case 8:
ret |= __put_user_inatomic(data.v[0], SWIZ_PTR(p++));
ret |= __put_user_inatomic(data.v[1], SWIZ_PTR(p++));
ret |= __put_user_inatomic(data.v[2], SWIZ_PTR(p++));
ret |= __put_user_inatomic(data.v[3], SWIZ_PTR(p++));
case 4:
ret |= __put_user_inatomic(data.v[4], SWIZ_PTR(p++));
ret |= __put_user_inatomic(data.v[5], SWIZ_PTR(p++));
case 2:
ret |= __put_user_inatomic(data.v[6], SWIZ_PTR(p++));
ret |= __put_user_inatomic(data.v[7], SWIZ_PTR(p++));
}
if (unlikely(ret))
return -EFAULT;
} else if (flags & F)
current->thread.TS_FPR(reg) = data.dd;
else
regs->gpr[reg] = data.ll;
/* Update RA as needed */
if (flags & U)
regs->gpr[areg] = regs->dar;
return 1;
}
| gpl-2.0 |
sxwzhw/iproj | drivers/ata/pata_artop.c | 5121 | 12604 | /*
* pata_artop.c - ARTOP ATA controller driver
*
* (C) 2006 Red Hat
* (C) 2007,2011 Bartlomiej Zolnierkiewicz
*
* Based in part on drivers/ide/pci/aec62xx.c
* Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
* 865/865R fixes for Macintosh card version from a patch to the old
* driver by Thibaut VARENE <varenet@parisc-linux.org>
* When setting the PCI latency we must set 0x80 or higher for burst
* performance Alessandro Zummo <alessandro.zummo@towertech.it>
*
* TODO
* Investigate no_dsc on 850R
* Clock detect
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_artop"
#define DRV_VERSION "0.4.6"
/*
* The ARTOP has 33 Mhz and "over clocked" timing tables. Until we
* get PCI bus speed functionality we leave this as 0. Its a variable
* for when we get the functionality and also for folks wanting to
* test stuff.
*/
static int clock = 0;
/**
* artop62x0_pre_reset - probe begin
* @link: link
* @deadline: deadline jiffies for the operation
*
* Nothing complicated needed here.
*/
static int artop62x0_pre_reset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits artop_enable_bits[] = {
{ 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
{ 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
};
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
/* Odd numbered device ids are the units with enable bits. */
if ((pdev->device & 1) &&
!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* artop6260_cable_detect - identify cable type
* @ap: Port
*
* Identify the cable type for the ARTOP interface in question
*/
static int artop6260_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 tmp;
pci_read_config_byte(pdev, 0x49, &tmp);
if (tmp & (1 << ap->port_no))
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
/**
* artop6210_load_piomode - Load a set of PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device
* @pio: PIO mode
*
* Set PIO mode for device, in host controller PCI config space. This
* is used both to set PIO timings in PIO mode and also to set the
* matching PIO clocking for UDMA, as well as the MWDMA timings.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev, unsigned int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
const u16 timing[2][5] = {
{ 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 },
{ 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 }
};
/* Load the PIO timing active/recovery bits */
pci_write_config_word(pdev, 0x40 + 2 * dn, timing[clock][pio]);
}
/**
* artop6210_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
*
* Set PIO mode for device, in host controller PCI config space. For
* ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
* the event UDMA is used the later call to set_dmamode will set the
* bits as required.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6210_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
u8 ultra;
artop6210_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
/* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
pci_read_config_byte(pdev, 0x54, &ultra);
ultra &= ~(3 << (2 * dn));
pci_write_config_byte(pdev, 0x54, ultra);
}
/**
* artop6260_load_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
* @pio: PIO mode
*
* Set PIO mode for device, in host controller PCI config space. The
* ARTOP6260 and relatives store the timing data differently.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev, unsigned int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
const u8 timing[2][5] = {
{ 0x00, 0x0A, 0x08, 0x33, 0x31 },
{ 0x70, 0x7A, 0x78, 0x43, 0x41 }
};
/* Load the PIO timing active/recovery bits */
pci_write_config_byte(pdev, 0x40 + dn, timing[clock][pio]);
}
/**
* artop6260_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
*
* Set PIO mode for device, in host controller PCI config space. For
* ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
* the event UDMA is used the later call to set_dmamode will set the
* bits as required.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6260_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ultra;
artop6260_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
/* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
}
/**
* artop6210_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device whose timings we are configuring
*
* Set DMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
u8 ultra;
if (adev->dma_mode == XFER_MW_DMA_0)
pio = 1;
else
pio = 4;
/* Load the PIO timing active/recovery bits */
artop6210_load_piomode(ap, adev, pio);
pci_read_config_byte(pdev, 0x54, &ultra);
ultra &= ~(3 << (2 * dn));
/* Add ultra DMA bits if in UDMA mode */
if (adev->dma_mode >= XFER_UDMA_0) {
u8 mode = (adev->dma_mode - XFER_UDMA_0) + 1 - clock;
if (mode == 0)
mode = 1;
ultra |= (mode << (2 * dn));
}
pci_write_config_byte(pdev, 0x54, ultra);
}
/**
* artop6260_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
*
* Set DMA mode for device, in host controller PCI config space. The
* ARTOP6260 and relatives store the timing data differently.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ultra;
if (adev->dma_mode == XFER_MW_DMA_0)
pio = 1;
else
pio = 4;
/* Load the PIO timing active/recovery bits */
artop6260_load_piomode(ap, adev, pio);
/* Add ultra DMA bits if in UDMA mode */
pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
if (adev->dma_mode >= XFER_UDMA_0) {
u8 mode = adev->dma_mode - XFER_UDMA_0 + 1 - clock;
if (mode == 0)
mode = 1;
ultra |= (mode << (4 * adev->devno));
}
pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
}
/**
* artop_6210_qc_defer - implement serialization
* @qc: command
*
* Issue commands per host on this chip.
*/
static int artop6210_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_host *host = qc->ap->host;
struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
int rc;
/* First apply the usual rules */
rc = ata_std_qc_defer(qc);
if (rc != 0)
return rc;
/* Now apply serialization rules. Only allow a command if the
other channel state machine is idle */
if (alt && alt->qc_active)
return ATA_DEFER_PORT;
return 0;
}
static struct scsi_host_template artop_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations artop6210_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = artop6210_set_piomode,
.set_dmamode = artop6210_set_dmamode,
.prereset = artop62x0_pre_reset,
.qc_defer = artop6210_qc_defer,
};
static struct ata_port_operations artop6260_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = artop6260_cable_detect,
.set_piomode = artop6260_set_piomode,
.set_dmamode = artop6260_set_dmamode,
.prereset = artop62x0_pre_reset,
};
static void atp8xx_fixup(struct pci_dev *pdev)
{
if (pdev->device == 0x0005)
/* BIOS may have left us in UDMA, clear it before libata probe */
pci_write_config_byte(pdev, 0x54, 0);
else if (pdev->device == 0x0008 || pdev->device == 0x0009) {
u8 reg;
/* Mac systems come up with some registers not set as we
will need them */
/* Clear reset & test bits */
pci_read_config_byte(pdev, 0x49, ®);
pci_write_config_byte(pdev, 0x49, reg & ~0x30);
/* PCI latency must be > 0x80 for burst mode, tweak it
* if required.
*/
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, ®);
if (reg <= 0x80)
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90);
/* Enable IRQ output and burst mode */
pci_read_config_byte(pdev, 0x4a, ®);
pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
}
}
/**
* artop_init_one - Register ARTOP ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in artop_pci_tbl matching with @pdev
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info_6210 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &artop6210_ops,
};
static const struct ata_port_info info_626x = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &artop6260_ops,
};
static const struct ata_port_info info_628x = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &artop6260_ops,
};
static const struct ata_port_info info_628x_fast = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &artop6260_ops,
};
const struct ata_port_info *ppi[] = { NULL, NULL };
int rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
if (id->driver_data == 0) /* 6210 variant */
ppi[0] = &info_6210;
else if (id->driver_data == 1) /* 6260 */
ppi[0] = &info_626x;
else if (id->driver_data == 2) { /* 6280 or 6280 + fast */
unsigned long io = pci_resource_start(pdev, 4);
ppi[0] = &info_628x;
if (inb(io) & 0x10)
ppi[0] = &info_628x_fast;
}
BUG_ON(ppi[0] == NULL);
atp8xx_fixup(pdev);
return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0);
}
static const struct pci_device_id artop_pci_tbl[] = {
{ PCI_VDEVICE(ARTOP, 0x0005), 0 },
{ PCI_VDEVICE(ARTOP, 0x0006), 1 },
{ PCI_VDEVICE(ARTOP, 0x0007), 1 },
{ PCI_VDEVICE(ARTOP, 0x0008), 2 },
{ PCI_VDEVICE(ARTOP, 0x0009), 2 },
{ } /* terminate list */
};
#ifdef CONFIG_PM
static int atp8xx_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
atp8xx_fixup(pdev);
ata_host_resume(host);
return 0;
}
#endif
static struct pci_driver artop_pci_driver = {
.name = DRV_NAME,
.id_table = artop_pci_tbl,
.probe = artop_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = atp8xx_reinit_one,
#endif
};
static int __init artop_init(void)
{
return pci_register_driver(&artop_pci_driver);
}
static void __exit artop_exit(void)
{
pci_unregister_driver(&artop_pci_driver);
}
module_init(artop_init);
module_exit(artop_exit);
MODULE_AUTHOR("Alan Cox, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, artop_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
javelinanddart/android_kernel_htc_ville-liberty | arch/x86/platform/geode/net5501.c | 7169 | 3435 | /*
* System Specific setup for Soekris net5501
* At the moment this means setup of GPIO control of LEDs and buttons
* on net5501 boards.
*
*
* Copyright (C) 2008-2009 Tower Technologies
* Written by Alessandro Zummo <a.zummo@towertech.it>
*
* Copyright (C) 2008 Constantin Baranov <const@mimas.ru>
* Copyright (C) 2011 Ed Wildgoose <kernel@wildgooses.com>
* and Philip Prindeville <philipp@redfish-solutions.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/string.h>
#include <linux/module.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
#include <asm/geode.h>
#define BIOS_REGION_BASE 0xffff0000
#define BIOS_REGION_SIZE 0x00010000
static struct gpio_keys_button net5501_gpio_buttons[] = {
{
.code = KEY_RESTART,
.gpio = 24,
.active_low = 1,
.desc = "Reset button",
.type = EV_KEY,
.wakeup = 0,
.debounce_interval = 100,
.can_disable = 0,
}
};
static struct gpio_keys_platform_data net5501_buttons_data = {
.buttons = net5501_gpio_buttons,
.nbuttons = ARRAY_SIZE(net5501_gpio_buttons),
.poll_interval = 20,
};
static struct platform_device net5501_buttons_dev = {
.name = "gpio-keys-polled",
.id = 1,
.dev = {
.platform_data = &net5501_buttons_data,
}
};
static struct gpio_led net5501_leds[] = {
{
.name = "net5501:1",
.gpio = 6,
.default_trigger = "default-on",
.active_low = 0,
},
};
static struct gpio_led_platform_data net5501_leds_data = {
.num_leds = ARRAY_SIZE(net5501_leds),
.leds = net5501_leds,
};
static struct platform_device net5501_leds_dev = {
.name = "leds-gpio",
.id = -1,
.dev.platform_data = &net5501_leds_data,
};
static struct __initdata platform_device *net5501_devs[] = {
&net5501_buttons_dev,
&net5501_leds_dev,
};
static void __init register_net5501(void)
{
/* Setup LED control through leds-gpio driver */
platform_add_devices(net5501_devs, ARRAY_SIZE(net5501_devs));
}
struct net5501_board {
u16 offset;
u16 len;
char *sig;
};
static struct net5501_board __initdata boards[] = {
{ 0xb7b, 7, "net5501" }, /* net5501 v1.33/1.33c */
{ 0xb1f, 7, "net5501" }, /* net5501 v1.32i */
};
static bool __init net5501_present(void)
{
int i;
unsigned char *rombase, *bios;
bool found = false;
rombase = ioremap(BIOS_REGION_BASE, BIOS_REGION_SIZE - 1);
if (!rombase) {
printk(KERN_ERR "%s: failed to get rombase\n", KBUILD_MODNAME);
return found;
}
bios = rombase + 0x20; /* null terminated */
if (memcmp(bios, "comBIOS", 7))
goto unmap;
for (i = 0; i < ARRAY_SIZE(boards); i++) {
unsigned char *model = rombase + boards[i].offset;
if (!memcmp(model, boards[i].sig, boards[i].len)) {
printk(KERN_INFO "%s: system is recognized as \"%s\"\n",
KBUILD_MODNAME, model);
found = true;
break;
}
}
unmap:
iounmap(rombase);
return found;
}
static int __init net5501_init(void)
{
if (!is_geode())
return 0;
if (!net5501_present())
return 0;
register_net5501();
return 0;
}
module_init(net5501_init);
MODULE_AUTHOR("Philip Prindeville <philipp@redfish-solutions.com>");
MODULE_DESCRIPTION("Soekris net5501 System Setup");
MODULE_LICENSE("GPL");
| gpl-2.0 |
embeddedarm/linux-3.0.35-imx6-android | arch/sparc/kernel/us2e_cpufreq.c | 7425 | 9847 | /* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
*
* Copyright (C) 2003 David S. Miller (davem@redhat.com)
*
* Many thanks to Dominik Brodowski for fixing up the cpufreq
* infrastructure in order to make this driver easier to implement.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/cpufreq.h>
#include <linux/threads.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <asm/asi.h>
#include <asm/timer.h>
static struct cpufreq_driver *cpufreq_us2e_driver;
struct us2e_freq_percpu_info {
struct cpufreq_frequency_table table[6];
};
/* Indexed by cpu number. */
static struct us2e_freq_percpu_info *us2e_freq_table;
#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
* in the ESTAR mode control register.
*/
#define ESTAR_MODE_DIV_1 0x0000000000000000UL
#define ESTAR_MODE_DIV_2 0x0000000000000001UL
#define ESTAR_MODE_DIV_4 0x0000000000000003UL
#define ESTAR_MODE_DIV_6 0x0000000000000002UL
#define ESTAR_MODE_DIV_8 0x0000000000000004UL
#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
#define MCTRL0_REFR_COUNT_SHIFT 8
#define MCTRL0_REFR_INTERVAL 7800
#define MCTRL0_REFR_CLKS_P_CNT 64
static unsigned long read_hbreg(unsigned long addr)
{
unsigned long ret;
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=&r" (ret)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
return ret;
}
static void write_hbreg(unsigned long addr, unsigned long val)
{
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
: "memory");
if (addr == HBIRD_ESTAR_MODE_ADDR) {
/* Need to wait 16 clock cycles for the PLL to lock. */
udelay(1);
}
}
static void self_refresh_ctl(int enable)
{
unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
if (enable)
mctrl |= MCTRL0_SREFRESH_ENAB;
else
mctrl &= ~MCTRL0_SREFRESH_ENAB;
write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
(void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
}
static void frob_mem_refresh(int cpu_slowing_down,
unsigned long clock_tick,
unsigned long old_divisor, unsigned long divisor)
{
unsigned long old_refr_count, refr_count, mctrl;
refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
>> MCTRL0_REFR_COUNT_SHIFT;
mctrl &= ~MCTRL0_REFR_COUNT_MASK;
mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
unsigned long usecs;
/* We have to wait for both refresh counts (old
* and new) to go to zero.
*/
usecs = (MCTRL0_REFR_CLKS_P_CNT *
(refr_count + old_refr_count) *
1000000UL *
old_divisor) / clock_tick;
udelay(usecs + 1UL);
}
}
static void us2e_transition(unsigned long estar, unsigned long new_bits,
unsigned long clock_tick,
unsigned long old_divisor, unsigned long divisor)
{
unsigned long flags;
local_irq_save(flags);
estar &= ~ESTAR_MODE_DIV_MASK;
/* This is based upon the state transition diagram in the IIe manual. */
if (old_divisor == 2 && divisor == 1) {
self_refresh_ctl(0);
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
frob_mem_refresh(0, clock_tick, old_divisor, divisor);
} else if (old_divisor == 1 && divisor == 2) {
frob_mem_refresh(1, clock_tick, old_divisor, divisor);
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
self_refresh_ctl(1);
} else if (old_divisor == 1 && divisor > 2) {
us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
1, 2);
us2e_transition(estar, new_bits, clock_tick,
2, divisor);
} else if (old_divisor > 2 && divisor == 1) {
us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
old_divisor, 2);
us2e_transition(estar, new_bits, clock_tick,
2, divisor);
} else if (old_divisor < divisor) {
frob_mem_refresh(0, clock_tick, old_divisor, divisor);
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
} else if (old_divisor > divisor) {
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
frob_mem_refresh(1, clock_tick, old_divisor, divisor);
} else {
BUG();
}
local_irq_restore(flags);
}
static unsigned long index_to_estar_mode(unsigned int index)
{
switch (index) {
case 0:
return ESTAR_MODE_DIV_1;
case 1:
return ESTAR_MODE_DIV_2;
case 2:
return ESTAR_MODE_DIV_4;
case 3:
return ESTAR_MODE_DIV_6;
case 4:
return ESTAR_MODE_DIV_8;
default:
BUG();
}
}
static unsigned long index_to_divisor(unsigned int index)
{
switch (index) {
case 0:
return 1;
case 1:
return 2;
case 2:
return 4;
case 3:
return 6;
case 4:
return 8;
default:
BUG();
}
}
static unsigned long estar_to_divisor(unsigned long estar)
{
unsigned long ret;
switch (estar & ESTAR_MODE_DIV_MASK) {
case ESTAR_MODE_DIV_1:
ret = 1;
break;
case ESTAR_MODE_DIV_2:
ret = 2;
break;
case ESTAR_MODE_DIV_4:
ret = 4;
break;
case ESTAR_MODE_DIV_6:
ret = 6;
break;
case ESTAR_MODE_DIV_8:
ret = 8;
break;
default:
BUG();
}
return ret;
}
static unsigned int us2e_freq_get(unsigned int cpu)
{
cpumask_t cpus_allowed;
unsigned long clock_tick, estar;
if (!cpu_online(cpu))
return 0;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
clock_tick = sparc64_get_clock_tick(cpu) / 1000;
estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
set_cpus_allowed_ptr(current, &cpus_allowed);
return clock_tick / estar_to_divisor(estar);
}
static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
{
unsigned long new_bits, new_freq;
unsigned long clock_tick, divisor, old_divisor, estar;
cpumask_t cpus_allowed;
struct cpufreq_freqs freqs;
if (!cpu_online(cpu))
return;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
new_bits = index_to_estar_mode(index);
divisor = index_to_divisor(index);
new_freq /= divisor;
estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
old_divisor = estar_to_divisor(estar);
freqs.old = clock_tick / old_divisor;
freqs.new = new_freq;
freqs.cpu = cpu;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
if (old_divisor != divisor)
us2e_transition(estar, new_bits, clock_tick * 1000,
old_divisor, divisor);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
set_cpus_allowed_ptr(current, &cpus_allowed);
}
static int us2e_freq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int new_index = 0;
if (cpufreq_frequency_table_target(policy,
&us2e_freq_table[policy->cpu].table[0],
target_freq, relation, &new_index))
return -EINVAL;
us2e_set_cpu_divider_index(policy->cpu, new_index);
return 0;
}
static int us2e_freq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
&us2e_freq_table[policy->cpu].table[0]);
}
static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
struct cpufreq_frequency_table *table =
&us2e_freq_table[cpu].table[0];
table[0].index = 0;
table[0].frequency = clock_tick / 1;
table[1].index = 1;
table[1].frequency = clock_tick / 2;
table[2].index = 2;
table[2].frequency = clock_tick / 4;
table[2].index = 3;
table[2].frequency = clock_tick / 6;
table[2].index = 4;
table[2].frequency = clock_tick / 8;
table[2].index = 5;
table[3].frequency = CPUFREQ_TABLE_END;
policy->cpuinfo.transition_latency = 0;
policy->cur = clock_tick;
return cpufreq_frequency_table_cpuinfo(policy, table);
}
static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
{
if (cpufreq_us2e_driver)
us2e_set_cpu_divider_index(policy->cpu, 0);
return 0;
}
static int __init us2e_freq_init(void)
{
unsigned long manuf, impl, ver;
int ret;
if (tlb_type != spitfire)
return -ENODEV;
__asm__("rdpr %%ver, %0" : "=r" (ver));
manuf = ((ver >> 48) & 0xffff);
impl = ((ver >> 32) & 0xffff);
if (manuf == 0x17 && impl == 0x13) {
struct cpufreq_driver *driver;
ret = -ENOMEM;
driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
if (!driver)
goto err_out;
us2e_freq_table = kzalloc(
(NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
GFP_KERNEL);
if (!us2e_freq_table)
goto err_out;
driver->init = us2e_freq_cpu_init;
driver->verify = us2e_freq_verify;
driver->target = us2e_freq_target;
driver->get = us2e_freq_get;
driver->exit = us2e_freq_cpu_exit;
driver->owner = THIS_MODULE,
strcpy(driver->name, "UltraSPARC-IIe");
cpufreq_us2e_driver = driver;
ret = cpufreq_register_driver(driver);
if (ret)
goto err_out;
return 0;
err_out:
if (driver) {
kfree(driver);
cpufreq_us2e_driver = NULL;
}
kfree(us2e_freq_table);
us2e_freq_table = NULL;
return ret;
}
return -ENODEV;
}
static void __exit us2e_freq_exit(void)
{
if (cpufreq_us2e_driver) {
cpufreq_unregister_driver(cpufreq_us2e_driver);
kfree(cpufreq_us2e_driver);
cpufreq_us2e_driver = NULL;
kfree(us2e_freq_table);
us2e_freq_table = NULL;
}
}
MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
MODULE_LICENSE("GPL");
module_init(us2e_freq_init);
module_exit(us2e_freq_exit);
| gpl-2.0 |
intdes/linux-stable | drivers/char/tpm/tpm_infineon.c | 8193 | 17648 | /*
* Description:
* Device Driver for the Infineon Technologies
* SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module
* Specifications at www.trustedcomputinggroup.org
*
* Copyright (C) 2005, Marcel Selhorst <m.selhorst@sirrix.com>
* Sirrix AG - security technologies, http://www.sirrix.com and
* Applied Data Security Group, Ruhr-University Bochum, Germany
* Project-Homepage: http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*/
#include <linux/init.h>
#include <linux/pnp.h>
#include "tpm.h"
/* Infineon specific definitions */
/* maximum number of WTX-packages */
#define TPM_MAX_WTX_PACKAGES 50
/* msleep-Time for WTX-packages */
#define TPM_WTX_MSLEEP_TIME 20
/* msleep-Time --> Interval to check status register */
#define TPM_MSLEEP_TIME 3
/* gives number of max. msleep()-calls before throwing timeout */
#define TPM_MAX_TRIES 5000
#define TPM_INFINEON_DEV_VEN_VALUE 0x15D1
#define TPM_INF_IO_PORT 0x0
#define TPM_INF_IO_MEM 0x1
#define TPM_INF_ADDR 0x0
#define TPM_INF_DATA 0x1
struct tpm_inf_dev {
int iotype;
void __iomem *mem_base; /* MMIO ioremap'd addr */
unsigned long map_base; /* phys MMIO base */
unsigned long map_size; /* MMIO region size */
unsigned int index_off; /* index register offset */
unsigned int data_regs; /* Data registers */
unsigned int data_size;
unsigned int config_port; /* IO Port config index reg */
unsigned int config_size;
};
static struct tpm_inf_dev tpm_dev;
static inline void tpm_data_out(unsigned char data, unsigned char offset)
{
if (tpm_dev.iotype == TPM_INF_IO_PORT)
outb(data, tpm_dev.data_regs + offset);
else
writeb(data, tpm_dev.mem_base + tpm_dev.data_regs + offset);
}
static inline unsigned char tpm_data_in(unsigned char offset)
{
if (tpm_dev.iotype == TPM_INF_IO_PORT)
return inb(tpm_dev.data_regs + offset);
else
return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset);
}
static inline void tpm_config_out(unsigned char data, unsigned char offset)
{
if (tpm_dev.iotype == TPM_INF_IO_PORT)
outb(data, tpm_dev.config_port + offset);
else
writeb(data, tpm_dev.mem_base + tpm_dev.index_off + offset);
}
static inline unsigned char tpm_config_in(unsigned char offset)
{
if (tpm_dev.iotype == TPM_INF_IO_PORT)
return inb(tpm_dev.config_port + offset);
else
return readb(tpm_dev.mem_base + tpm_dev.index_off + offset);
}
/* TPM header definitions */
enum infineon_tpm_header {
TPM_VL_VER = 0x01,
TPM_VL_CHANNEL_CONTROL = 0x07,
TPM_VL_CHANNEL_PERSONALISATION = 0x0A,
TPM_VL_CHANNEL_TPM = 0x0B,
TPM_VL_CONTROL = 0x00,
TPM_INF_NAK = 0x15,
TPM_CTRL_WTX = 0x10,
TPM_CTRL_WTX_ABORT = 0x18,
TPM_CTRL_WTX_ABORT_ACK = 0x18,
TPM_CTRL_ERROR = 0x20,
TPM_CTRL_CHAININGACK = 0x40,
TPM_CTRL_CHAINING = 0x80,
TPM_CTRL_DATA = 0x04,
TPM_CTRL_DATA_CHA = 0x84,
TPM_CTRL_DATA_CHA_ACK = 0xC4
};
enum infineon_tpm_register {
WRFIFO = 0x00,
RDFIFO = 0x01,
STAT = 0x02,
CMD = 0x03
};
enum infineon_tpm_command_bits {
CMD_DIS = 0x00,
CMD_LP = 0x01,
CMD_RES = 0x02,
CMD_IRQC = 0x06
};
enum infineon_tpm_status_bits {
STAT_XFE = 0x00,
STAT_LPA = 0x01,
STAT_FOK = 0x02,
STAT_TOK = 0x03,
STAT_IRQA = 0x06,
STAT_RDA = 0x07
};
/* some outgoing values */
enum infineon_tpm_values {
CHIP_ID1 = 0x20,
CHIP_ID2 = 0x21,
TPM_DAR = 0x30,
RESET_LP_IRQC_DISABLE = 0x41,
ENABLE_REGISTER_PAIR = 0x55,
IOLIMH = 0x60,
IOLIML = 0x61,
DISABLE_REGISTER_PAIR = 0xAA,
IDVENL = 0xF1,
IDVENH = 0xF2,
IDPDL = 0xF3,
IDPDH = 0xF4
};
static int number_of_wtx;
static int empty_fifo(struct tpm_chip *chip, int clear_wrfifo)
{
int status;
int check = 0;
int i;
if (clear_wrfifo) {
for (i = 0; i < 4096; i++) {
status = tpm_data_in(WRFIFO);
if (status == 0xff) {
if (check == 5)
break;
else
check++;
}
}
}
/* Note: The values which are currently in the FIFO of the TPM
are thrown away since there is no usage for them. Usually,
this has nothing to say, since the TPM will give its answer
immediately or will be aborted anyway, so the data here is
usually garbage and useless.
We have to clean this, because the next communication with
the TPM would be rubbish, if there is still some old data
in the Read FIFO.
*/
i = 0;
do {
status = tpm_data_in(RDFIFO);
status = tpm_data_in(STAT);
i++;
if (i == TPM_MAX_TRIES)
return -EIO;
} while ((status & (1 << STAT_RDA)) != 0);
return 0;
}
static int wait(struct tpm_chip *chip, int wait_for_bit)
{
int status;
int i;
for (i = 0; i < TPM_MAX_TRIES; i++) {
status = tpm_data_in(STAT);
/* check the status-register if wait_for_bit is set */
if (status & 1 << wait_for_bit)
break;
msleep(TPM_MSLEEP_TIME);
}
if (i == TPM_MAX_TRIES) { /* timeout occurs */
if (wait_for_bit == STAT_XFE)
dev_err(chip->dev, "Timeout in wait(STAT_XFE)\n");
if (wait_for_bit == STAT_RDA)
dev_err(chip->dev, "Timeout in wait(STAT_RDA)\n");
return -EIO;
}
return 0;
};
static void wait_and_send(struct tpm_chip *chip, u8 sendbyte)
{
wait(chip, STAT_XFE);
tpm_data_out(sendbyte, WRFIFO);
}
/* Note: WTX means Waiting-Time-Extension. Whenever the TPM needs more
calculation time, it sends a WTX-package, which has to be acknowledged
or aborted. This usually occurs if you are hammering the TPM with key
creation. Set the maximum number of WTX-packages in the definitions
above, if the number is reached, the waiting-time will be denied
and the TPM command has to be resend.
*/
static void tpm_wtx(struct tpm_chip *chip)
{
number_of_wtx++;
dev_info(chip->dev, "Granting WTX (%02d / %02d)\n",
number_of_wtx, TPM_MAX_WTX_PACKAGES);
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_CTRL_WTX);
wait_and_send(chip, 0x00);
wait_and_send(chip, 0x00);
msleep(TPM_WTX_MSLEEP_TIME);
}
static void tpm_wtx_abort(struct tpm_chip *chip)
{
dev_info(chip->dev, "Aborting WTX\n");
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_CTRL_WTX_ABORT);
wait_and_send(chip, 0x00);
wait_and_send(chip, 0x00);
number_of_wtx = 0;
msleep(TPM_WTX_MSLEEP_TIME);
}
static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count)
{
int i;
int ret;
u32 size = 0;
number_of_wtx = 0;
recv_begin:
/* start receiving header */
for (i = 0; i < 4; i++) {
ret = wait(chip, STAT_RDA);
if (ret)
return -EIO;
buf[i] = tpm_data_in(RDFIFO);
}
if (buf[0] != TPM_VL_VER) {
dev_err(chip->dev,
"Wrong transport protocol implementation!\n");
return -EIO;
}
if (buf[1] == TPM_CTRL_DATA) {
/* size of the data received */
size = ((buf[2] << 8) | buf[3]);
for (i = 0; i < size; i++) {
wait(chip, STAT_RDA);
buf[i] = tpm_data_in(RDFIFO);
}
if ((size == 0x6D00) && (buf[1] == 0x80)) {
dev_err(chip->dev, "Error handling on vendor layer!\n");
return -EIO;
}
for (i = 0; i < size; i++)
buf[i] = buf[i + 6];
size = size - 6;
return size;
}
if (buf[1] == TPM_CTRL_WTX) {
dev_info(chip->dev, "WTX-package received\n");
if (number_of_wtx < TPM_MAX_WTX_PACKAGES) {
tpm_wtx(chip);
goto recv_begin;
} else {
tpm_wtx_abort(chip);
goto recv_begin;
}
}
if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) {
dev_info(chip->dev, "WTX-abort acknowledged\n");
return size;
}
if (buf[1] == TPM_CTRL_ERROR) {
dev_err(chip->dev, "ERROR-package received:\n");
if (buf[4] == TPM_INF_NAK)
dev_err(chip->dev,
"-> Negative acknowledgement"
" - retransmit command!\n");
return -EIO;
}
return -EIO;
}
static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
{
int i;
int ret;
u8 count_high, count_low, count_4, count_3, count_2, count_1;
/* Disabling Reset, LP and IRQC */
tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
ret = empty_fifo(chip, 1);
if (ret) {
dev_err(chip->dev, "Timeout while clearing FIFO\n");
return -EIO;
}
ret = wait(chip, STAT_XFE);
if (ret)
return -EIO;
count_4 = (count & 0xff000000) >> 24;
count_3 = (count & 0x00ff0000) >> 16;
count_2 = (count & 0x0000ff00) >> 8;
count_1 = (count & 0x000000ff);
count_high = ((count + 6) & 0xffffff00) >> 8;
count_low = ((count + 6) & 0x000000ff);
/* Sending Header */
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_CTRL_DATA);
wait_and_send(chip, count_high);
wait_and_send(chip, count_low);
/* Sending Data Header */
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_VL_CHANNEL_TPM);
wait_and_send(chip, count_4);
wait_and_send(chip, count_3);
wait_and_send(chip, count_2);
wait_and_send(chip, count_1);
/* Sending Data */
for (i = 0; i < count; i++) {
wait_and_send(chip, buf[i]);
}
return count;
}
static void tpm_inf_cancel(struct tpm_chip *chip)
{
/*
Since we are using the legacy mode to communicate
with the TPM, we have no cancel functions, but have
a workaround for interrupting the TPM through WTX.
*/
}
static u8 tpm_inf_status(struct tpm_chip *chip)
{
return tpm_data_in(STAT);
}
static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
static struct attribute *inf_attrs[] = {
&dev_attr_pubek.attr,
&dev_attr_pcrs.attr,
&dev_attr_caps.attr,
&dev_attr_cancel.attr,
NULL,
};
static struct attribute_group inf_attr_grp = {.attrs = inf_attrs };
static const struct file_operations inf_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.open = tpm_open,
.read = tpm_read,
.write = tpm_write,
.release = tpm_release,
};
static const struct tpm_vendor_specific tpm_inf = {
.recv = tpm_inf_recv,
.send = tpm_inf_send,
.cancel = tpm_inf_cancel,
.status = tpm_inf_status,
.req_complete_mask = 0,
.req_complete_val = 0,
.attr_group = &inf_attr_grp,
.miscdev = {.fops = &inf_ops,},
};
static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
/* Infineon TPMs */
{"IFX0101", 0},
{"IFX0102", 0},
{"", 0}
};
MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
const struct pnp_device_id *dev_id)
{
int rc = 0;
u8 iol, ioh;
int vendorid[2];
int version[2];
int productid[2];
char chipname[20];
struct tpm_chip *chip;
/* read IO-ports through PnP */
if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
!(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
tpm_dev.iotype = TPM_INF_IO_PORT;
tpm_dev.config_port = pnp_port_start(dev, 0);
tpm_dev.config_size = pnp_port_len(dev, 0);
tpm_dev.data_regs = pnp_port_start(dev, 1);
tpm_dev.data_size = pnp_port_len(dev, 1);
if ((tpm_dev.data_size < 4) || (tpm_dev.config_size < 2)) {
rc = -EINVAL;
goto err_last;
}
dev_info(&dev->dev, "Found %s with ID %s\n",
dev->name, dev_id->id);
if (!((tpm_dev.data_regs >> 8) & 0xff)) {
rc = -EINVAL;
goto err_last;
}
/* publish my base address and request region */
if (request_region(tpm_dev.data_regs, tpm_dev.data_size,
"tpm_infineon0") == NULL) {
rc = -EINVAL;
goto err_last;
}
if (request_region(tpm_dev.config_port, tpm_dev.config_size,
"tpm_infineon0") == NULL) {
release_region(tpm_dev.data_regs, tpm_dev.data_size);
rc = -EINVAL;
goto err_last;
}
} else if (pnp_mem_valid(dev, 0) &&
!(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
tpm_dev.iotype = TPM_INF_IO_MEM;
tpm_dev.map_base = pnp_mem_start(dev, 0);
tpm_dev.map_size = pnp_mem_len(dev, 0);
dev_info(&dev->dev, "Found %s with ID %s\n",
dev->name, dev_id->id);
/* publish my base address and request region */
if (request_mem_region(tpm_dev.map_base, tpm_dev.map_size,
"tpm_infineon0") == NULL) {
rc = -EINVAL;
goto err_last;
}
tpm_dev.mem_base = ioremap(tpm_dev.map_base, tpm_dev.map_size);
if (tpm_dev.mem_base == NULL) {
release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
rc = -EINVAL;
goto err_last;
}
/*
* The only known MMIO based Infineon TPM system provides
* a single large mem region with the device config
* registers at the default TPM_ADDR. The data registers
* seem like they could be placed anywhere within the MMIO
* region, but lets just put them at zero offset.
*/
tpm_dev.index_off = TPM_ADDR;
tpm_dev.data_regs = 0x0;
} else {
rc = -EINVAL;
goto err_last;
}
/* query chip for its vendor, its version number a.s.o. */
tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
tpm_config_out(IDVENL, TPM_INF_ADDR);
vendorid[1] = tpm_config_in(TPM_INF_DATA);
tpm_config_out(IDVENH, TPM_INF_ADDR);
vendorid[0] = tpm_config_in(TPM_INF_DATA);
tpm_config_out(IDPDL, TPM_INF_ADDR);
productid[1] = tpm_config_in(TPM_INF_DATA);
tpm_config_out(IDPDH, TPM_INF_ADDR);
productid[0] = tpm_config_in(TPM_INF_DATA);
tpm_config_out(CHIP_ID1, TPM_INF_ADDR);
version[1] = tpm_config_in(TPM_INF_DATA);
tpm_config_out(CHIP_ID2, TPM_INF_ADDR);
version[0] = tpm_config_in(TPM_INF_DATA);
switch ((productid[0] << 8) | productid[1]) {
case 6:
snprintf(chipname, sizeof(chipname), " (SLD 9630 TT 1.1)");
break;
case 11:
snprintf(chipname, sizeof(chipname), " (SLB 9635 TT 1.2)");
break;
default:
snprintf(chipname, sizeof(chipname), " (unknown chip)");
break;
}
if ((vendorid[0] << 8 | vendorid[1]) == (TPM_INFINEON_DEV_VEN_VALUE)) {
/* configure TPM with IO-ports */
tpm_config_out(IOLIMH, TPM_INF_ADDR);
tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
tpm_config_out(IOLIML, TPM_INF_ADDR);
tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
/* control if IO-ports are set correctly */
tpm_config_out(IOLIMH, TPM_INF_ADDR);
ioh = tpm_config_in(TPM_INF_DATA);
tpm_config_out(IOLIML, TPM_INF_ADDR);
iol = tpm_config_in(TPM_INF_DATA);
if ((ioh << 8 | iol) != tpm_dev.data_regs) {
dev_err(&dev->dev,
"Could not set IO-data registers to 0x%x\n",
tpm_dev.data_regs);
rc = -EIO;
goto err_release_region;
}
/* activate register */
tpm_config_out(TPM_DAR, TPM_INF_ADDR);
tpm_config_out(0x01, TPM_INF_DATA);
tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
/* disable RESET, LP and IRQC */
tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
/* Finally, we're done, print some infos */
dev_info(&dev->dev, "TPM found: "
"config base 0x%lx, "
"data base 0x%lx, "
"chip version 0x%02x%02x, "
"vendor id 0x%x%x (Infineon), "
"product id 0x%02x%02x"
"%s\n",
tpm_dev.iotype == TPM_INF_IO_PORT ?
tpm_dev.config_port :
tpm_dev.map_base + tpm_dev.index_off,
tpm_dev.iotype == TPM_INF_IO_PORT ?
tpm_dev.data_regs :
tpm_dev.map_base + tpm_dev.data_regs,
version[0], version[1],
vendorid[0], vendorid[1],
productid[0], productid[1], chipname);
if (!(chip = tpm_register_hardware(&dev->dev, &tpm_inf)))
goto err_release_region;
return 0;
} else {
rc = -ENODEV;
goto err_release_region;
}
err_release_region:
if (tpm_dev.iotype == TPM_INF_IO_PORT) {
release_region(tpm_dev.data_regs, tpm_dev.data_size);
release_region(tpm_dev.config_port, tpm_dev.config_size);
} else {
iounmap(tpm_dev.mem_base);
release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
}
err_last:
return rc;
}
static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
{
struct tpm_chip *chip = pnp_get_drvdata(dev);
if (chip) {
if (tpm_dev.iotype == TPM_INF_IO_PORT) {
release_region(tpm_dev.data_regs, tpm_dev.data_size);
release_region(tpm_dev.config_port,
tpm_dev.config_size);
} else {
iounmap(tpm_dev.mem_base);
release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
}
tpm_dev_vendor_release(chip);
tpm_remove_hardware(chip->dev);
}
}
static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
{
struct tpm_chip *chip = pnp_get_drvdata(dev);
int rc;
if (chip) {
u8 savestate[] = {
0, 193, /* TPM_TAG_RQU_COMMAND */
0, 0, 0, 10, /* blob length (in bytes) */
0, 0, 0, 152 /* TPM_ORD_SaveState */
};
dev_info(&dev->dev, "saving TPM state\n");
rc = tpm_inf_send(chip, savestate, sizeof(savestate));
if (rc < 0) {
dev_err(&dev->dev, "error while saving TPM state\n");
return rc;
}
}
return 0;
}
static int tpm_inf_pnp_resume(struct pnp_dev *dev)
{
/* Re-configure TPM after suspending */
tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
tpm_config_out(IOLIMH, TPM_INF_ADDR);
tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
tpm_config_out(IOLIML, TPM_INF_ADDR);
tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
/* activate register */
tpm_config_out(TPM_DAR, TPM_INF_ADDR);
tpm_config_out(0x01, TPM_INF_DATA);
tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
/* disable RESET, LP and IRQC */
tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
return tpm_pm_resume(&dev->dev);
}
static struct pnp_driver tpm_inf_pnp_driver = {
.name = "tpm_inf_pnp",
.id_table = tpm_inf_pnp_tbl,
.probe = tpm_inf_pnp_probe,
.suspend = tpm_inf_pnp_suspend,
.resume = tpm_inf_pnp_resume,
.remove = __devexit_p(tpm_inf_pnp_remove)
};
static int __init init_inf(void)
{
return pnp_register_driver(&tpm_inf_pnp_driver);
}
static void __exit cleanup_inf(void)
{
pnp_unregister_driver(&tpm_inf_pnp_driver);
}
module_init(init_inf);
module_exit(cleanup_inf);
MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
MODULE_VERSION("1.9.2");
MODULE_LICENSE("GPL");
| gpl-2.0 |
vm03/android_kernel_lge_msm8610 | drivers/staging/bcm/vendorspecificextn.c | 8193 | 4402 | #include "headers.h"
//-----------------------------------------------------------------------------
// Procedure: vendorextnGetSectionInfo
//
// Description: Finds the type of NVM used.
//
// Arguments:
// Adapter - ptr to Adapter object instance
// pNVMType - ptr to NVM type.
// Returns:
// STATUS_SUCCESS/STATUS_FAILURE
//
//-----------------------------------------------------------------------------
INT vendorextnGetSectionInfo(PVOID pContext,PFLASH2X_VENDORSPECIFIC_INFO pVendorInfo)
{
return STATUS_FAILURE;
}
//-----------------------------------------------------------------------------
// Procedure: vendorextnInit
//
// Description: Initializing the vendor extension NVM interface
//
// Arguments:
// Adapter - Pointer to MINI Adapter Structure.
// Returns:
// STATUS_SUCCESS/STATUS_FAILURE
//
//-----------------------------------------------------------------------------
INT vendorextnInit(PMINI_ADAPTER Adapter)
{
return STATUS_SUCCESS;
}
//-----------------------------------------------------------------------------
// Procedure: vendorextnExit
//
// Description: Free the resource associated with vendor extension NVM interface
//
// Arguments:
// Adapter - Pointer to MINI Adapter Structure.
// Returns:
// STATUS_SUCCESS/STATUS_FAILURE
//
//-----------------------------------------------------------------------------
INT vendorextnExit(PMINI_ADAPTER Adapter)
{
return STATUS_SUCCESS;
}
//------------------------------------------------------------------------
// Procedure: vendorextnIoctl
//
// Description: execute the vendor extension specific ioctl
//
//Arguments:
// Adapter -Beceem private Adapter Structure
// cmd -vendor extension specific Ioctl commad
// arg -input parameter sent by vendor
//
// Returns:
// CONTINUE_COMMON_PATH in case it is not meant to be processed by vendor ioctls
// STATUS_SUCCESS/STATUS_FAILURE as per the IOCTL return value
//
//--------------------------------------------------------------------------
INT vendorextnIoctl(PMINI_ADAPTER Adapter, UINT cmd, ULONG arg)
{
return CONTINUE_COMMON_PATH;
}
//------------------------------------------------------------------
// Procedure: vendorextnReadSection
//
// Description: Reads from a section of NVM
//
// Arguments:
// pContext - ptr to Adapter object instance
// pBuffer - Read the data from Vendor Area to this buffer
// SectionVal - Value of type of Section
// Offset - Read from the Offset of the Vendor Section.
// numOfBytes - Read numOfBytes from the Vendor section to Buffer
//
// Returns:
// STATUS_SUCCESS/STATUS_FAILURE
//
//------------------------------------------------------------------
INT vendorextnReadSection(PVOID pContext, PUCHAR pBuffer, FLASH2X_SECTION_VAL SectionVal,
UINT offset, UINT numOfBytes)
{
return STATUS_FAILURE;
}
//------------------------------------------------------------------
// Procedure: vendorextnWriteSection
//
// Description: Write to a Section of NVM
//
// Arguments:
// pContext - ptr to Adapter object instance
// pBuffer - Write the data provided in the buffer
// SectionVal - Value of type of Section
// Offset - Writes to the Offset of the Vendor Section.
// numOfBytes - Write num Bytes after reading from pBuffer.
// bVerify - the Buffer Written should be verified.
//
// Returns:
// STATUS_SUCCESS/STATUS_FAILURE
//
//------------------------------------------------------------------
INT vendorextnWriteSection(PVOID pContext, PUCHAR pBuffer, FLASH2X_SECTION_VAL SectionVal,
UINT offset, UINT numOfBytes, BOOLEAN bVerify)
{
return STATUS_FAILURE;
}
//------------------------------------------------------------------
// Procedure: vendorextnWriteSectionWithoutErase
//
// Description: Write to a Section of NVM without erasing the sector
//
// Arguments:
// pContext - ptr to Adapter object instance
// pBuffer - Write the data provided in the buffer
// SectionVal - Value of type of Section
// Offset - Writes to the Offset of the Vendor Section.
// numOfBytes - Write num Bytes after reading from pBuffer.
//
// Returns:
// STATUS_SUCCESS/STATUS_FAILURE
//
//------------------------------------------------------------------
INT vendorextnWriteSectionWithoutErase(PVOID pContext, PUCHAR pBuffer, FLASH2X_SECTION_VAL SectionVal,
UINT offset, UINT numOfBytes)
{
return STATUS_FAILURE;
}
| gpl-2.0 |
phenomx4/android_kernel_zte_warplte | drivers/ide/jmicron.c | 9217 | 4602 |
/*
* Copyright (C) 2006 Red Hat
*
* May be copied or modified under the terms of the GNU General Public License
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ide.h>
#include <linux/init.h>
#define DRV_NAME "jmicron"
typedef enum {
PORT_PATA0 = 0,
PORT_PATA1 = 1,
PORT_SATA = 2,
} port_type;
/**
* jmicron_cable_detect - cable detection
* @hwif: IDE port
*
* Returns the cable type.
*/
static u8 jmicron_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *pdev = to_pci_dev(hwif->dev);
u32 control;
u32 control5;
int port = hwif->channel;
port_type port_map[2];
pci_read_config_dword(pdev, 0x40, &control);
/* There are two basic mappings. One has the two SATA ports merged
as master/slave and the secondary as PATA, the other has only the
SATA port mapped */
if (control & (1 << 23)) {
port_map[0] = PORT_SATA;
port_map[1] = PORT_PATA0;
} else {
port_map[0] = PORT_SATA;
port_map[1] = PORT_SATA;
}
/* The 365/366 may have this bit set to map the second PATA port
as the internal primary channel */
pci_read_config_dword(pdev, 0x80, &control5);
if (control5 & (1<<24))
port_map[0] = PORT_PATA1;
/* The two ports may then be logically swapped by the firmware */
if (control & (1 << 22))
port = port ^ 1;
/*
* Now we know which physical port we are talking about we can
* actually do our cable checking etc. Thankfully we don't need
* to do the plumbing for other cases.
*/
switch (port_map[port]) {
case PORT_PATA0:
if (control & (1 << 3)) /* 40/80 pin primary */
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
case PORT_PATA1:
if (control5 & (1 << 19)) /* 40/80 pin secondary */
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
case PORT_SATA:
break;
}
/* Avoid bogus "control reaches end of non-void function" */
return ATA_CBL_PATA80;
}
static void jmicron_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
}
/**
* jmicron_set_dma_mode - set host controller for DMA mode
* @hwif: port
* @drive: drive
*
* As the JMicron snoops for timings we don't need to do anything here.
*/
static void jmicron_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
}
static const struct ide_port_ops jmicron_port_ops = {
.set_pio_mode = jmicron_set_pio_mode,
.set_dma_mode = jmicron_set_dma_mode,
.cable_detect = jmicron_cable_detect,
};
static const struct ide_port_info jmicron_chipset __devinitdata = {
.name = DRV_NAME,
.enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
.port_ops = &jmicron_port_ops,
.pio_mask = ATA_PIO5,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
};
/**
* jmicron_init_one - pci layer discovery entry
* @dev: PCI device
* @id: ident table entry
*
* Called by the PCI code when it finds a Jmicron controller.
* We then use the IDE PCI generic helper to do most of the work.
*/
static int __devinit jmicron_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &jmicron_chipset, NULL);
}
/* All JMB PATA controllers have and will continue to have the same
* interface. Matching vendor and device class is enough for all
* current and future controllers if the controller is programmed
* properly.
*
* If libata is configured, jmicron PCI quirk programs the controller
* into the correct mode. If libata isn't configured, match known
* device IDs too to maintain backward compatibility.
*/
static struct pci_device_id jmicron_pci_tbl[] = {
#if !defined(CONFIG_ATA) && !defined(CONFIG_ATA_MODULE)
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB361) },
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB363) },
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB365) },
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB366) },
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB368) },
#endif
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, jmicron_pci_tbl);
static struct pci_driver jmicron_pci_driver = {
.name = "JMicron IDE",
.id_table = jmicron_pci_tbl,
.probe = jmicron_init_one,
.remove = ide_pci_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
static int __init jmicron_ide_init(void)
{
return ide_pci_register_driver(&jmicron_pci_driver);
}
static void __exit jmicron_ide_exit(void)
{
pci_unregister_driver(&jmicron_pci_driver);
}
module_init(jmicron_ide_init);
module_exit(jmicron_ide_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("PCI driver module for the JMicron in legacy modes");
MODULE_LICENSE("GPL");
| gpl-2.0 |
pio-masaki/kernel_AT270 | drivers/ide/siimage.c | 9217 | 21477 | /*
* Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2003 Red Hat
* Copyright (C) 2007-2008 MontaVista Software, Inc.
* Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz
*
* May be copied or modified under the terms of the GNU General Public License
*
* Documentation for CMD680:
* http://gkernel.sourceforge.net/specs/sii/sii-0680a-v1.31.pdf.bz2
*
* Documentation for SiI 3112:
* http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
*
* Errata and other documentation only available under NDA.
*
*
* FAQ Items:
* If you are using Marvell SATA-IDE adapters with Maxtor drives
* ensure the system is set up for ATA100/UDMA5, not UDMA6.
*
* If you are using WD drives with SATA bridges you must set the
* drive to "Single". "Master" will hang.
*
* If you have strange problems with nVidia chipset systems please
* see the SI support documentation and update your system BIOS
* if necessary
*
* The Dell DRAC4 has some interesting features including effectively hot
* unplugging/replugging the virtual CD interface when the DRAC is reset.
* This often causes drivers/ide/siimage to panic but is ok with the rather
* smarter code in libata.
*
* TODO:
* - VDMA support
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ide.h>
#include <linux/init.h>
#include <linux/io.h>
#define DRV_NAME "siimage"
/**
* pdev_is_sata - check if device is SATA
* @pdev: PCI device to check
*
* Returns true if this is a SATA controller
*/
static int pdev_is_sata(struct pci_dev *pdev)
{
#ifdef CONFIG_BLK_DEV_IDE_SATA
switch (pdev->device) {
case PCI_DEVICE_ID_SII_3112:
case PCI_DEVICE_ID_SII_1210SA:
return 1;
case PCI_DEVICE_ID_SII_680:
return 0;
}
BUG();
#endif
return 0;
}
/**
* is_sata - check if hwif is SATA
* @hwif: interface to check
*
* Returns true if this is a SATA controller
*/
static inline int is_sata(ide_hwif_t *hwif)
{
return pdev_is_sata(to_pci_dev(hwif->dev));
}
/**
* siimage_selreg - return register base
* @hwif: interface
* @r: config offset
*
* Turn a config register offset into the right address in either
* PCI space or MMIO space to access the control register in question
* Thankfully this is a configuration operation, so isn't performance
* critical.
*/
static unsigned long siimage_selreg(ide_hwif_t *hwif, int r)
{
unsigned long base = (unsigned long)hwif->hwif_data;
base += 0xA0 + r;
if (hwif->host_flags & IDE_HFLAG_MMIO)
base += hwif->channel << 6;
else
base += hwif->channel << 4;
return base;
}
/**
* siimage_seldev - return register base
* @hwif: interface
* @r: config offset
*
* Turn a config register offset into the right address in either
* PCI space or MMIO space to access the control register in question
* including accounting for the unit shift.
*/
static inline unsigned long siimage_seldev(ide_drive_t *drive, int r)
{
ide_hwif_t *hwif = drive->hwif;
unsigned long base = (unsigned long)hwif->hwif_data;
u8 unit = drive->dn & 1;
base += 0xA0 + r;
if (hwif->host_flags & IDE_HFLAG_MMIO)
base += hwif->channel << 6;
else
base += hwif->channel << 4;
base |= unit << unit;
return base;
}
static u8 sil_ioread8(struct pci_dev *dev, unsigned long addr)
{
struct ide_host *host = pci_get_drvdata(dev);
u8 tmp = 0;
if (host->host_priv)
tmp = readb((void __iomem *)addr);
else
pci_read_config_byte(dev, addr, &tmp);
return tmp;
}
static u16 sil_ioread16(struct pci_dev *dev, unsigned long addr)
{
struct ide_host *host = pci_get_drvdata(dev);
u16 tmp = 0;
if (host->host_priv)
tmp = readw((void __iomem *)addr);
else
pci_read_config_word(dev, addr, &tmp);
return tmp;
}
static void sil_iowrite8(struct pci_dev *dev, u8 val, unsigned long addr)
{
struct ide_host *host = pci_get_drvdata(dev);
if (host->host_priv)
writeb(val, (void __iomem *)addr);
else
pci_write_config_byte(dev, addr, val);
}
static void sil_iowrite16(struct pci_dev *dev, u16 val, unsigned long addr)
{
struct ide_host *host = pci_get_drvdata(dev);
if (host->host_priv)
writew(val, (void __iomem *)addr);
else
pci_write_config_word(dev, addr, val);
}
static void sil_iowrite32(struct pci_dev *dev, u32 val, unsigned long addr)
{
struct ide_host *host = pci_get_drvdata(dev);
if (host->host_priv)
writel(val, (void __iomem *)addr);
else
pci_write_config_dword(dev, addr, val);
}
/**
* sil_udma_filter - compute UDMA mask
* @drive: IDE device
*
* Compute the available UDMA speeds for the device on the interface.
*
* For the CMD680 this depends on the clocking mode (scsc), for the
* SI3112 SATA controller life is a bit simpler.
*/
static u8 sil_pata_udma_filter(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long base = (unsigned long)hwif->hwif_data;
u8 scsc, mask = 0;
base += (hwif->host_flags & IDE_HFLAG_MMIO) ? 0x4A : 0x8A;
scsc = sil_ioread8(dev, base);
switch (scsc & 0x30) {
case 0x10: /* 133 */
mask = ATA_UDMA6;
break;
case 0x20: /* 2xPCI */
mask = ATA_UDMA6;
break;
case 0x00: /* 100 */
mask = ATA_UDMA5;
break;
default: /* Disabled ? */
BUG();
}
return mask;
}
static u8 sil_sata_udma_filter(ide_drive_t *drive)
{
char *m = (char *)&drive->id[ATA_ID_PROD];
return strstr(m, "Maxtor") ? ATA_UDMA5 : ATA_UDMA6;
}
/**
* sil_set_pio_mode - set host controller for PIO mode
* @hwif: port
* @drive: drive
*
* Load the timing settings for this device mode into the
* controller.
*/
static void sil_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static const u16 tf_speed[] = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 };
static const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
struct pci_dev *dev = to_pci_dev(hwif->dev);
ide_drive_t *pair = ide_get_pair_dev(drive);
u32 speedt = 0;
u16 speedp = 0;
unsigned long addr = siimage_seldev(drive, 0x04);
unsigned long tfaddr = siimage_selreg(hwif, 0x02);
unsigned long base = (unsigned long)hwif->hwif_data;
const u8 pio = drive->pio_mode - XFER_PIO_0;
u8 tf_pio = pio;
u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
u8 addr_mask = hwif->channel ? (mmio ? 0xF4 : 0x84)
: (mmio ? 0xB4 : 0x80);
u8 mode = 0;
u8 unit = drive->dn & 1;
/* trim *taskfile* PIO to the slowest of the master/slave */
if (pair) {
u8 pair_pio = pair->pio_mode - XFER_PIO_0;
if (pair_pio < tf_pio)
tf_pio = pair_pio;
}
/* cheat for now and use the docs */
speedp = data_speed[pio];
speedt = tf_speed[tf_pio];
sil_iowrite16(dev, speedp, addr);
sil_iowrite16(dev, speedt, tfaddr);
/* now set up IORDY */
speedp = sil_ioread16(dev, tfaddr - 2);
speedp &= ~0x200;
mode = sil_ioread8(dev, base + addr_mask);
mode &= ~(unit ? 0x30 : 0x03);
if (ide_pio_need_iordy(drive, pio)) {
speedp |= 0x200;
mode |= unit ? 0x10 : 0x01;
}
sil_iowrite16(dev, speedp, tfaddr - 2);
sil_iowrite8(dev, mode, base + addr_mask);
}
/**
* sil_set_dma_mode - set host controller for DMA mode
* @hwif: port
* @drive: drive
*
* Tune the SiI chipset for the desired DMA mode.
*/
static void sil_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static const u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 };
static const u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 };
static const u16 dma[] = { 0x2208, 0x10C2, 0x10C1 };
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long base = (unsigned long)hwif->hwif_data;
u16 ultra = 0, multi = 0;
u8 mode = 0, unit = drive->dn & 1;
u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
u8 scsc = 0, addr_mask = hwif->channel ? (mmio ? 0xF4 : 0x84)
: (mmio ? 0xB4 : 0x80);
unsigned long ma = siimage_seldev(drive, 0x08);
unsigned long ua = siimage_seldev(drive, 0x0C);
const u8 speed = drive->dma_mode;
scsc = sil_ioread8 (dev, base + (mmio ? 0x4A : 0x8A));
mode = sil_ioread8 (dev, base + addr_mask);
multi = sil_ioread16(dev, ma);
ultra = sil_ioread16(dev, ua);
mode &= ~(unit ? 0x30 : 0x03);
ultra &= ~0x3F;
scsc = ((scsc & 0x30) == 0x00) ? 0 : 1;
scsc = is_sata(hwif) ? 1 : scsc;
if (speed >= XFER_UDMA_0) {
multi = dma[2];
ultra |= scsc ? ultra6[speed - XFER_UDMA_0] :
ultra5[speed - XFER_UDMA_0];
mode |= unit ? 0x30 : 0x03;
} else {
multi = dma[speed - XFER_MW_DMA_0];
mode |= unit ? 0x20 : 0x02;
}
sil_iowrite8 (dev, mode, base + addr_mask);
sil_iowrite16(dev, multi, ma);
sil_iowrite16(dev, ultra, ua);
}
static int sil_test_irq(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long addr = siimage_selreg(hwif, 1);
u8 val = sil_ioread8(dev, addr);
/* Return 1 if INTRQ asserted */
return (val & 8) ? 1 : 0;
}
/**
* siimage_mmio_dma_test_irq - check we caused an IRQ
* @drive: drive we are testing
*
* Check if we caused an IDE DMA interrupt. We may also have caused
* SATA status interrupts, if so we clean them up and continue.
*/
static int siimage_mmio_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
void __iomem *sata_error_addr
= (void __iomem *)hwif->sata_scr[SATA_ERROR_OFFSET];
if (sata_error_addr) {
unsigned long base = (unsigned long)hwif->hwif_data;
u32 ext_stat = readl((void __iomem *)(base + 0x10));
u8 watchdog = 0;
if (ext_stat & ((hwif->channel) ? 0x40 : 0x10)) {
u32 sata_error = readl(sata_error_addr);
writel(sata_error, sata_error_addr);
watchdog = (sata_error & 0x00680000) ? 1 : 0;
printk(KERN_WARNING "%s: sata_error = 0x%08x, "
"watchdog = %d, %s\n",
drive->name, sata_error, watchdog, __func__);
} else
watchdog = (ext_stat & 0x8000) ? 1 : 0;
ext_stat >>= 16;
if (!(ext_stat & 0x0404) && !watchdog)
return 0;
}
/* return 1 if INTR asserted */
if (readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)) & 4)
return 1;
return 0;
}
static int siimage_dma_test_irq(ide_drive_t *drive)
{
if (drive->hwif->host_flags & IDE_HFLAG_MMIO)
return siimage_mmio_dma_test_irq(drive);
else
return ide_dma_test_irq(drive);
}
/**
* sil_sata_reset_poll - wait for SATA reset
* @drive: drive we are resetting
*
* Poll the SATA phy and see whether it has come back from the dead
* yet.
*/
static int sil_sata_reset_poll(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
void __iomem *sata_status_addr
= (void __iomem *)hwif->sata_scr[SATA_STATUS_OFFSET];
if (sata_status_addr) {
/* SATA Status is available only when in MMIO mode */
u32 sata_stat = readl(sata_status_addr);
if ((sata_stat & 0x03) != 0x03) {
printk(KERN_WARNING "%s: reset phy dead, status=0x%08x\n",
hwif->name, sata_stat);
return -ENXIO;
}
}
return 0;
}
/**
* sil_sata_pre_reset - reset hook
* @drive: IDE device being reset
*
* For the SATA devices we need to handle recalibration/geometry
* differently
*/
static void sil_sata_pre_reset(ide_drive_t *drive)
{
if (drive->media == ide_disk) {
drive->special_flags &=
~(IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE);
}
}
/**
* init_chipset_siimage - set up an SI device
* @dev: PCI device
*
* Perform the initial PCI set up for this device. Attempt to switch
* to 133 MHz clocking if the system isn't already set up to do it.
*/
static int init_chipset_siimage(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
void __iomem *ioaddr = host->host_priv;
unsigned long base, scsc_addr;
u8 rev = dev->revision, tmp;
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, rev ? 1 : 255);
if (ioaddr)
pci_set_master(dev);
base = (unsigned long)ioaddr;
if (ioaddr && pdev_is_sata(dev)) {
u32 tmp32, irq_mask;
/* make sure IDE0/1 interrupts are not masked */
irq_mask = (1 << 22) | (1 << 23);
tmp32 = readl(ioaddr + 0x48);
if (tmp32 & irq_mask) {
tmp32 &= ~irq_mask;
writel(tmp32, ioaddr + 0x48);
readl(ioaddr + 0x48); /* flush */
}
writel(0, ioaddr + 0x148);
writel(0, ioaddr + 0x1C8);
}
sil_iowrite8(dev, 0, base ? (base + 0xB4) : 0x80);
sil_iowrite8(dev, 0, base ? (base + 0xF4) : 0x84);
scsc_addr = base ? (base + 0x4A) : 0x8A;
tmp = sil_ioread8(dev, scsc_addr);
switch (tmp & 0x30) {
case 0x00:
/* On 100 MHz clocking, try and switch to 133 MHz */
sil_iowrite8(dev, tmp | 0x10, scsc_addr);
break;
case 0x30:
/* Clocking is disabled, attempt to force 133MHz clocking. */
sil_iowrite8(dev, tmp & ~0x20, scsc_addr);
case 0x10:
/* On 133Mhz clocking. */
break;
case 0x20:
/* On PCIx2 clocking. */
break;
}
tmp = sil_ioread8(dev, scsc_addr);
sil_iowrite8 (dev, 0x72, base + 0xA1);
sil_iowrite16(dev, 0x328A, base + 0xA2);
sil_iowrite32(dev, 0x62DD62DD, base + 0xA4);
sil_iowrite32(dev, 0x43924392, base + 0xA8);
sil_iowrite32(dev, 0x40094009, base + 0xAC);
sil_iowrite8 (dev, 0x72, base ? (base + 0xE1) : 0xB1);
sil_iowrite16(dev, 0x328A, base ? (base + 0xE2) : 0xB2);
sil_iowrite32(dev, 0x62DD62DD, base ? (base + 0xE4) : 0xB4);
sil_iowrite32(dev, 0x43924392, base ? (base + 0xE8) : 0xB8);
sil_iowrite32(dev, 0x40094009, base ? (base + 0xEC) : 0xBC);
if (base && pdev_is_sata(dev)) {
writel(0xFFFF0000, ioaddr + 0x108);
writel(0xFFFF0000, ioaddr + 0x188);
writel(0x00680000, ioaddr + 0x148);
writel(0x00680000, ioaddr + 0x1C8);
}
/* report the clocking mode of the controller */
if (!pdev_is_sata(dev)) {
static const char *clk_str[] =
{ "== 100", "== 133", "== 2X PCI", "DISABLED!" };
tmp >>= 4;
printk(KERN_INFO DRV_NAME " %s: BASE CLOCK %s\n",
pci_name(dev), clk_str[tmp & 3]);
}
return 0;
}
/**
* init_mmio_iops_siimage - set up the iops for MMIO
* @hwif: interface to set up
*
* The basic setup here is fairly simple, we can use standard MMIO
* operations. However we do have to set the taskfile register offsets
* by hand as there isn't a standard defined layout for them this time.
*
* The hardware supports buffered taskfiles and also some rather nice
* extended PRD tables. For better SI3112 support use the libata driver
*/
static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
void *addr = host->host_priv;
u8 ch = hwif->channel;
struct ide_io_ports *io_ports = &hwif->io_ports;
unsigned long base;
/*
* Fill in the basic hwif bits
*/
hwif->host_flags |= IDE_HFLAG_MMIO;
hwif->hwif_data = addr;
/*
* Now set up the hw. We have to do this ourselves as the
* MMIO layout isn't the same as the standard port based I/O.
*/
memset(io_ports, 0, sizeof(*io_ports));
base = (unsigned long)addr;
if (ch)
base += 0xC0;
else
base += 0x80;
/*
* The buffered task file doesn't have status/control, so we
* can't currently use it sanely since we want to use LBA48 mode.
*/
io_ports->data_addr = base;
io_ports->error_addr = base + 1;
io_ports->nsect_addr = base + 2;
io_ports->lbal_addr = base + 3;
io_ports->lbam_addr = base + 4;
io_ports->lbah_addr = base + 5;
io_ports->device_addr = base + 6;
io_ports->status_addr = base + 7;
io_ports->ctl_addr = base + 10;
if (pdev_is_sata(dev)) {
base = (unsigned long)addr;
if (ch)
base += 0x80;
hwif->sata_scr[SATA_STATUS_OFFSET] = base + 0x104;
hwif->sata_scr[SATA_ERROR_OFFSET] = base + 0x108;
hwif->sata_scr[SATA_CONTROL_OFFSET] = base + 0x100;
}
hwif->irq = dev->irq;
hwif->dma_base = (unsigned long)addr + (ch ? 0x08 : 0x00);
}
static int is_dev_seagate_sata(ide_drive_t *drive)
{
const char *s = (const char *)&drive->id[ATA_ID_PROD];
unsigned len = strnlen(s, ATA_ID_PROD_LEN);
if ((len > 4) && (!memcmp(s, "ST", 2)))
if ((!memcmp(s + len - 2, "AS", 2)) ||
(!memcmp(s + len - 3, "ASL", 3))) {
printk(KERN_INFO "%s: applying pessimistic Seagate "
"errata fix\n", drive->name);
return 1;
}
return 0;
}
/**
* sil_quirkproc - post probe fixups
* @drive: drive
*
* Called after drive probe we use this to decide whether the
* Seagate fixup must be applied. This used to be in init_iops but
* that can occur before we know what drives are present.
*/
static void sil_quirkproc(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
/* Try and rise the rqsize */
if (!is_sata(hwif) || !is_dev_seagate_sata(drive))
hwif->rqsize = 128;
}
/**
* init_iops_siimage - set up iops
* @hwif: interface to set up
*
* Do the basic setup for the SIIMAGE hardware interface
* and then do the MMIO setup if we can. This is the first
* look in we get for setting up the hwif so that we
* can get the iops right before using them.
*/
static void __devinit init_iops_siimage(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
hwif->hwif_data = NULL;
/* Pessimal until we finish probing */
hwif->rqsize = 15;
if (host->host_priv)
init_mmio_iops_siimage(hwif);
}
/**
* sil_cable_detect - cable detection
* @hwif: interface to check
*
* Check for the presence of an ATA66 capable cable on the interface.
*/
static u8 sil_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long addr = siimage_selreg(hwif, 0);
u8 ata66 = sil_ioread8(dev, addr);
return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
}
static const struct ide_port_ops sil_pata_port_ops = {
.set_pio_mode = sil_set_pio_mode,
.set_dma_mode = sil_set_dma_mode,
.quirkproc = sil_quirkproc,
.test_irq = sil_test_irq,
.udma_filter = sil_pata_udma_filter,
.cable_detect = sil_cable_detect,
};
static const struct ide_port_ops sil_sata_port_ops = {
.set_pio_mode = sil_set_pio_mode,
.set_dma_mode = sil_set_dma_mode,
.reset_poll = sil_sata_reset_poll,
.pre_reset = sil_sata_pre_reset,
.quirkproc = sil_quirkproc,
.test_irq = sil_test_irq,
.udma_filter = sil_sata_udma_filter,
.cable_detect = sil_cable_detect,
};
static const struct ide_dma_ops sil_dma_ops = {
.dma_host_set = ide_dma_host_set,
.dma_setup = ide_dma_setup,
.dma_start = ide_dma_start,
.dma_end = ide_dma_end,
.dma_test_irq = siimage_dma_test_irq,
.dma_timer_expiry = ide_dma_sff_timer_expiry,
.dma_lost_irq = ide_dma_lost_irq,
.dma_sff_read_status = ide_dma_sff_read_status,
};
#define DECLARE_SII_DEV(p_ops) \
{ \
.name = DRV_NAME, \
.init_chipset = init_chipset_siimage, \
.init_iops = init_iops_siimage, \
.port_ops = p_ops, \
.dma_ops = &sil_dma_ops, \
.pio_mask = ATA_PIO4, \
.mwdma_mask = ATA_MWDMA2, \
.udma_mask = ATA_UDMA6, \
}
static const struct ide_port_info siimage_chipsets[] __devinitdata = {
/* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
/* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
};
/**
* siimage_init_one - PCI layer discovery entry
* @dev: PCI device
* @id: ident table entry
*
* Called by the PCI code when it finds an SiI680 or SiI3112 controller.
* We then use the IDE PCI generic helper to do most of the work.
*/
static int __devinit siimage_init_one(struct pci_dev *dev,
const struct pci_device_id *id)
{
void __iomem *ioaddr = NULL;
resource_size_t bar5 = pci_resource_start(dev, 5);
unsigned long barsize = pci_resource_len(dev, 5);
int rc;
struct ide_port_info d;
u8 idx = id->driver_data;
u8 BA5_EN;
d = siimage_chipsets[idx];
if (idx) {
static int first = 1;
if (first) {
printk(KERN_INFO DRV_NAME ": For full SATA support you "
"should use the libata sata_sil module.\n");
first = 0;
}
d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
}
rc = pci_enable_device(dev);
if (rc)
return rc;
pci_read_config_byte(dev, 0x8A, &BA5_EN);
if ((BA5_EN & 0x01) || bar5) {
/*
* Drop back to PIO if we can't map the MMIO. Some systems
* seem to get terminally confused in the PCI spaces.
*/
if (!request_mem_region(bar5, barsize, d.name)) {
printk(KERN_WARNING DRV_NAME " %s: MMIO ports not "
"available\n", pci_name(dev));
} else {
ioaddr = pci_ioremap_bar(dev, 5);
if (ioaddr == NULL)
release_mem_region(bar5, barsize);
}
}
rc = ide_pci_init_one(dev, &d, ioaddr);
if (rc) {
if (ioaddr) {
iounmap(ioaddr);
release_mem_region(bar5, barsize);
}
pci_disable_device(dev);
}
return rc;
}
static void __devexit siimage_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
void __iomem *ioaddr = host->host_priv;
ide_pci_remove(dev);
if (ioaddr) {
resource_size_t bar5 = pci_resource_start(dev, 5);
unsigned long barsize = pci_resource_len(dev, 5);
iounmap(ioaddr);
release_mem_region(bar5, barsize);
}
pci_disable_device(dev);
}
static const struct pci_device_id siimage_pci_tbl[] = {
{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), 0 },
#ifdef CONFIG_BLK_DEV_IDE_SATA
{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_3112), 1 },
{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_1210SA), 1 },
#endif
{ 0, },
};
MODULE_DEVICE_TABLE(pci, siimage_pci_tbl);
static struct pci_driver siimage_pci_driver = {
.name = "SiI_IDE",
.id_table = siimage_pci_tbl,
.probe = siimage_init_one,
.remove = __devexit_p(siimage_remove),
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
static int __init siimage_ide_init(void)
{
return ide_pci_register_driver(&siimage_pci_driver);
}
static void __exit siimage_ide_exit(void)
{
pci_unregister_driver(&siimage_pci_driver);
}
module_init(siimage_ide_init);
module_exit(siimage_ide_exit);
MODULE_AUTHOR("Andre Hedrick, Alan Cox");
MODULE_DESCRIPTION("PCI driver module for SiI IDE");
MODULE_LICENSE("GPL");
| gpl-2.0 |
gmillz/kernel_lge_msm8974 | drivers/ide/it8213.c | 9217 | 5669 | /*
* ITE 8213 IDE driver
*
* Copyright (C) 2006 Jack Lee
* Copyright (C) 2006 Alan Cox
* Copyright (C) 2007 Bartlomiej Zolnierkiewicz
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ide.h>
#include <linux/init.h>
#define DRV_NAME "it8213"
/**
* it8213_set_pio_mode - set host controller for PIO mode
* @hwif: port
* @drive: drive
*
* Set the interface PIO mode.
*/
static void it8213_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
int is_slave = drive->dn & 1;
int master_port = 0x40;
int slave_port = 0x44;
unsigned long flags;
u16 master_data;
u8 slave_data;
static DEFINE_SPINLOCK(tune_lock);
int control = 0;
const u8 pio = drive->pio_mode - XFER_PIO_0;
static const u8 timings[][2] = {
{ 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
spin_lock_irqsave(&tune_lock, flags);
pci_read_config_word(dev, master_port, &master_data);
if (pio > 1)
control |= 1; /* Programmable timing on */
if (drive->media != ide_disk)
control |= 4; /* ATAPI */
if (ide_pio_need_iordy(drive, pio))
control |= 2; /* IORDY */
if (is_slave) {
master_data |= 0x4000;
master_data &= ~0x0070;
if (pio > 1)
master_data = master_data | (control << 4);
pci_read_config_byte(dev, slave_port, &slave_data);
slave_data = slave_data & 0xf0;
slave_data = slave_data | (timings[pio][0] << 2) | timings[pio][1];
} else {
master_data &= ~0x3307;
if (pio > 1)
master_data = master_data | control;
master_data = master_data | (timings[pio][0] << 12) | (timings[pio][1] << 8);
}
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
pci_write_config_byte(dev, slave_port, slave_data);
spin_unlock_irqrestore(&tune_lock, flags);
}
/**
* it8213_set_dma_mode - set host controller for DMA mode
* @hwif: port
* @drive: drive
*
* Tune the ITE chipset for the DMA mode.
*/
static void it8213_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 maslave = 0x40;
int a_speed = 3 << (drive->dn * 4);
int u_flag = 1 << drive->dn;
int v_flag = 0x01 << drive->dn;
int w_flag = 0x10 << drive->dn;
int u_speed = 0;
u16 reg4042, reg4a;
u8 reg48, reg54, reg55;
const u8 speed = drive->dma_mode;
pci_read_config_word(dev, maslave, ®4042);
pci_read_config_byte(dev, 0x48, ®48);
pci_read_config_word(dev, 0x4a, ®4a);
pci_read_config_byte(dev, 0x54, ®54);
pci_read_config_byte(dev, 0x55, ®55);
if (speed >= XFER_UDMA_0) {
u8 udma = speed - XFER_UDMA_0;
u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4);
if (!(reg48 & u_flag))
pci_write_config_byte(dev, 0x48, reg48 | u_flag);
if (speed >= XFER_UDMA_5)
pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
else
pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
if ((reg4a & a_speed) != u_speed)
pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
if (speed > XFER_UDMA_2) {
if (!(reg54 & v_flag))
pci_write_config_byte(dev, 0x54, reg54 | v_flag);
} else
pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
} else {
const u8 mwdma_to_pio[] = { 0, 3, 4 };
if (reg48 & u_flag)
pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
if (reg4a & a_speed)
pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
if (reg54 & v_flag)
pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
if (reg55 & w_flag)
pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
if (speed >= XFER_MW_DMA_0)
drive->pio_mode =
mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
else
drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
it8213_set_pio_mode(hwif, drive);
}
}
static u8 it8213_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 reg42h = 0;
pci_read_config_byte(dev, 0x42, ®42h);
return (reg42h & 0x02) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
}
static const struct ide_port_ops it8213_port_ops = {
.set_pio_mode = it8213_set_pio_mode,
.set_dma_mode = it8213_set_dma_mode,
.cable_detect = it8213_cable_detect,
};
static const struct ide_port_info it8213_chipset __devinitdata = {
.name = DRV_NAME,
.enablebits = { {0x41, 0x80, 0x80} },
.port_ops = &it8213_port_ops,
.host_flags = IDE_HFLAG_SINGLE,
.pio_mask = ATA_PIO4,
.swdma_mask = ATA_SWDMA2_ONLY,
.mwdma_mask = ATA_MWDMA12_ONLY,
.udma_mask = ATA_UDMA6,
};
/**
* it8213_init_one - pci layer discovery entry
* @dev: PCI device
* @id: ident table entry
*
* Called by the PCI code when it finds an ITE8213 controller. As
* this device follows the standard interfaces we can use the
* standard helper functions to do almost all the work for us.
*/
static int __devinit it8213_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &it8213_chipset, NULL);
}
static const struct pci_device_id it8213_pci_tbl[] = {
{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8213), 0 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, it8213_pci_tbl);
static struct pci_driver it8213_pci_driver = {
.name = "ITE8213_IDE",
.id_table = it8213_pci_tbl,
.probe = it8213_init_one,
.remove = ide_pci_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
static int __init it8213_ide_init(void)
{
return ide_pci_register_driver(&it8213_pci_driver);
}
static void __exit it8213_ide_exit(void)
{
pci_unregister_driver(&it8213_pci_driver);
}
module_init(it8213_ide_init);
module_exit(it8213_ide_exit);
MODULE_AUTHOR("Jack Lee, Alan Cox");
MODULE_DESCRIPTION("PCI driver module for the ITE 8213");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Evil-Green/boeffla-kernel-jb-u10-s3 | arch/blackfin/kernel/irqchip.c | 9985 | 3849 | /*
* Copyright 2005-2009 Analog Devices Inc.
*
* Licensed under the GPL-2 or later
*/
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/irq_handler.h>
#include <asm/trace.h>
#include <asm/pda.h>
static atomic_t irq_err_count;
void ack_bad_irq(unsigned int irq)
{
atomic_inc(&irq_err_count);
printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
}
static struct irq_desc bad_irq_desc = {
.handle_irq = handle_bad_irq,
.lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
};
#ifdef CONFIG_CPUMASK_OFFSTACK
/* We are not allocating a variable-sized bad_irq_desc.affinity */
#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
#endif
#ifdef CONFIG_PROC_FS
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
struct irqaction *action;
unsigned long flags;
if (i < NR_IRQS) {
struct irq_desc *desc = irq_to_desc(i);
raw_spin_lock_irqsave(&desc->lock, flags);
action = desc->action;
if (!action)
goto skip;
seq_printf(p, "%3d: ", i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
seq_printf(p, " %8s", irq_desc_get_chip(desc)->name);
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
seq_printf(p, " %s", action->name);
seq_putc(p, '\n');
skip:
raw_spin_unlock_irqrestore(&desc->lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda[j].__nmi_count);
seq_printf(p, " CORE Non Maskable Interrupt\n");
seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
}
return 0;
}
#endif
#ifdef CONFIG_DEBUG_STACKOVERFLOW
static void check_stack_overflow(int irq)
{
/* Debugging check for stack overflow: is there less than STACK_WARN free? */
long sp = __get_SP() & (THREAD_SIZE - 1);
if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
dump_stack();
pr_emerg("irq%i: possible stack overflow only %ld bytes free\n",
irq, sp - sizeof(struct thread_info));
}
}
#else
static inline void check_stack_overflow(int irq) { }
#endif
#ifndef CONFIG_IPIPE
static void maybe_lower_to_irq14(void)
{
unsigned short pending, other_ints;
/*
* If we're the only interrupt running (ignoring IRQ15 which
* is for syscalls), lower our priority to IRQ14 so that
* softirqs run at that level. If there's another,
* lower-level interrupt, irq_exit will defer softirqs to
* that. If the interrupt pipeline is enabled, we are already
* running at IRQ14 priority, so we don't need this code.
*/
CSYNC();
pending = bfin_read_IPEND() & ~0x8000;
other_ints = pending & (pending - 1);
if (other_ints == 0)
lower_to_irq14();
}
#else
static inline void maybe_lower_to_irq14(void) { }
#endif
/*
* do_IRQ handles all hardware IRQs. Decoded IRQs should not
* come via this function. Instead, they should provide their
* own 'handler'
*/
#ifdef CONFIG_DO_IRQ_L1
__attribute__((l1_text))
#endif
asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
check_stack_overflow(irq);
/*
* Some hardware gives randomly wrong interrupts. Rather
* than crashing, do something sensible.
*/
if (irq >= NR_IRQS)
handle_bad_irq(irq, &bad_irq_desc);
else
generic_handle_irq(irq);
maybe_lower_to_irq14();
irq_exit();
set_irq_regs(old_regs);
}
void __init init_IRQ(void)
{
init_arch_irq();
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
/* Now that evt_ivhw is set up, turn this on */
trace_buff_offset = 0;
bfin_write_TBUFCTL(BFIN_TRACE_ON);
printk(KERN_INFO "Hardware Trace expanded to %ik\n",
1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN);
#endif
}
| gpl-2.0 |
TEAM-RAZOR-DEVICES/android_kernel_lge_v500 | fs/nilfs2/btnode.c | 12289 | 7842 | /*
* btnode.c - NILFS B-tree node cache
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* This file was originally written by Seiji Kihara <kihara@osrg.net>
* and fully revised by Ryusuke Konishi <ryusuke@osrg.net> for
* stabilization and simplification.
*
*/
#include <linux/types.h>
#include <linux/buffer_head.h>
#include <linux/mm.h>
#include <linux/backing-dev.h>
#include <linux/gfp.h>
#include "nilfs.h"
#include "mdt.h"
#include "dat.h"
#include "page.h"
#include "btnode.h"
void nilfs_btnode_cache_clear(struct address_space *btnc)
{
invalidate_mapping_pages(btnc, 0, -1);
truncate_inode_pages(btnc, 0);
}
struct buffer_head *
nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
{
struct inode *inode = NILFS_BTNC_I(btnc);
struct buffer_head *bh;
bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
if (unlikely(!bh))
return NULL;
if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
buffer_dirty(bh))) {
brelse(bh);
BUG();
}
memset(bh->b_data, 0, 1 << inode->i_blkbits);
bh->b_bdev = inode->i_sb->s_bdev;
bh->b_blocknr = blocknr;
set_buffer_mapped(bh);
set_buffer_uptodate(bh);
unlock_page(bh->b_page);
page_cache_release(bh->b_page);
return bh;
}
int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
sector_t pblocknr, int mode,
struct buffer_head **pbh, sector_t *submit_ptr)
{
struct buffer_head *bh;
struct inode *inode = NILFS_BTNC_I(btnc);
struct page *page;
int err;
bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
if (unlikely(!bh))
return -ENOMEM;
err = -EEXIST; /* internal code */
page = bh->b_page;
if (buffer_uptodate(bh) || buffer_dirty(bh))
goto found;
if (pblocknr == 0) {
pblocknr = blocknr;
if (inode->i_ino != NILFS_DAT_INO) {
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
/* blocknr is a virtual block number */
err = nilfs_dat_translate(nilfs->ns_dat, blocknr,
&pblocknr);
if (unlikely(err)) {
brelse(bh);
goto out_locked;
}
}
}
if (mode == READA) {
if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) {
err = -EBUSY; /* internal code */
brelse(bh);
goto out_locked;
}
} else { /* mode == READ */
lock_buffer(bh);
}
if (buffer_uptodate(bh)) {
unlock_buffer(bh);
err = -EEXIST; /* internal code */
goto found;
}
set_buffer_mapped(bh);
bh->b_bdev = inode->i_sb->s_bdev;
bh->b_blocknr = pblocknr; /* set block address for read */
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
submit_bh(mode, bh);
bh->b_blocknr = blocknr; /* set back to the given block address */
*submit_ptr = pblocknr;
err = 0;
found:
*pbh = bh;
out_locked:
unlock_page(page);
page_cache_release(page);
return err;
}
/**
* nilfs_btnode_delete - delete B-tree node buffer
* @bh: buffer to be deleted
*
* nilfs_btnode_delete() invalidates the specified buffer and delete the page
* including the buffer if the page gets unbusy.
*/
void nilfs_btnode_delete(struct buffer_head *bh)
{
struct address_space *mapping;
struct page *page = bh->b_page;
pgoff_t index = page_index(page);
int still_dirty;
page_cache_get(page);
lock_page(page);
wait_on_page_writeback(page);
nilfs_forget_buffer(bh);
still_dirty = PageDirty(page);
mapping = page->mapping;
unlock_page(page);
page_cache_release(page);
if (!still_dirty && mapping)
invalidate_inode_pages2_range(mapping, index, index);
}
/**
* nilfs_btnode_prepare_change_key
* prepare to move contents of the block for old key to one of new key.
* the old buffer will not be removed, but might be reused for new buffer.
* it might return -ENOMEM because of memory allocation errors,
* and might return -EIO because of disk read errors.
*/
int nilfs_btnode_prepare_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
{
struct buffer_head *obh, *nbh;
struct inode *inode = NILFS_BTNC_I(btnc);
__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
int err;
if (oldkey == newkey)
return 0;
obh = ctxt->bh;
ctxt->newbh = NULL;
if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
lock_page(obh->b_page);
/*
* We cannot call radix_tree_preload for the kernels older
* than 2.6.23, because it is not exported for modules.
*/
retry:
err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
if (err)
goto failed_unlock;
/* BUG_ON(oldkey != obh->b_page->index); */
if (unlikely(oldkey != obh->b_page->index))
NILFS_PAGE_BUG(obh->b_page,
"invalid oldkey %lld (newkey=%lld)",
(unsigned long long)oldkey,
(unsigned long long)newkey);
spin_lock_irq(&btnc->tree_lock);
err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
spin_unlock_irq(&btnc->tree_lock);
/*
* Note: page->index will not change to newkey until
* nilfs_btnode_commit_change_key() will be called.
* To protect the page in intermediate state, the page lock
* is held.
*/
radix_tree_preload_end();
if (!err)
return 0;
else if (err != -EEXIST)
goto failed_unlock;
err = invalidate_inode_pages2_range(btnc, newkey, newkey);
if (!err)
goto retry;
/* fallback to copy mode */
unlock_page(obh->b_page);
}
nbh = nilfs_btnode_create_block(btnc, newkey);
if (!nbh)
return -ENOMEM;
BUG_ON(nbh == obh);
ctxt->newbh = nbh;
return 0;
failed_unlock:
unlock_page(obh->b_page);
return err;
}
/**
* nilfs_btnode_commit_change_key
* commit the change_key operation prepared by prepare_change_key().
*/
void nilfs_btnode_commit_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
{
struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh;
__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
struct page *opage;
if (oldkey == newkey)
return;
if (nbh == NULL) { /* blocksize == pagesize */
opage = obh->b_page;
if (unlikely(oldkey != opage->index))
NILFS_PAGE_BUG(opage,
"invalid oldkey %lld (newkey=%lld)",
(unsigned long long)oldkey,
(unsigned long long)newkey);
mark_buffer_dirty(obh);
spin_lock_irq(&btnc->tree_lock);
radix_tree_delete(&btnc->page_tree, oldkey);
radix_tree_tag_set(&btnc->page_tree, newkey,
PAGECACHE_TAG_DIRTY);
spin_unlock_irq(&btnc->tree_lock);
opage->index = obh->b_blocknr = newkey;
unlock_page(opage);
} else {
nilfs_copy_buffer(nbh, obh);
mark_buffer_dirty(nbh);
nbh->b_blocknr = newkey;
ctxt->bh = nbh;
nilfs_btnode_delete(obh); /* will decrement bh->b_count */
}
}
/**
* nilfs_btnode_abort_change_key
* abort the change_key operation prepared by prepare_change_key().
*/
void nilfs_btnode_abort_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
{
struct buffer_head *nbh = ctxt->newbh;
__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
if (oldkey == newkey)
return;
if (nbh == NULL) { /* blocksize == pagesize */
spin_lock_irq(&btnc->tree_lock);
radix_tree_delete(&btnc->page_tree, newkey);
spin_unlock_irq(&btnc->tree_lock);
unlock_page(ctxt->bh->b_page);
} else
brelse(nbh);
}
| gpl-2.0 |
drower/wireshark-1.10.0 | wiretap/radcom.c | 2 | 12296 | /* radcom.c
*
* $Id: radcom.c 46803 2012-12-27 12:19:25Z guy $
*
* Wiretap Library
* Copyright (c) 1998 by Gilbert Ramirez <gram@alumni.rice.edu>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "config.h"
#include <errno.h>
#include <string.h>
#include "wtap-int.h"
#include "file_wrappers.h"
#include "buffer.h"
#include "radcom.h"
struct frame_date {
guint16 year;
guint8 month;
guint8 day;
guint32 sec; /* seconds since midnight */
guint32 usec;
};
struct unaligned_frame_date {
char year[2];
char month;
char day;
char sec[4]; /* seconds since midnight */
char usec[4];
};
/* Found at the beginning of the file. Bytes 2 and 3 (D2:00) seem to be
* different in some captures */
static const guint8 radcom_magic[8] = {
0x42, 0xD2, 0x00, 0x34, 0x12, 0x66, 0x22, 0x88
};
static const guint8 encap_magic[4] = {
0x00, 0x42, 0x43, 0x09
};
static const guint8 active_time_magic[11] = {
0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x20, 0x54, 0x69, 0x6d, 0x65
};
/* RADCOM record header - followed by frame data (perhaps including FCS).
"data_length" appears to be the length of packet data following
the record header. It's 0 in the last record.
"length" appears to be the amount of captured packet data, and
"real_length" might be the actual length of the frame on the wire -
in some captures, it's the same as "length", and, in others,
it's greater than "length". In the last record, however, those
may have bogus values (or is that some kind of trailer record?).
"xxx" appears to be all-zero in all but the last record in one
capture; if so, perhaps this indicates that the last record is,
in fact, a trailer of some sort, and some field in the header
is a record type. */
struct radcomrec_hdr {
char xxx[4]; /* unknown */
char data_length[2]; /* packet length? */
char xxy[5]; /* unknown */
struct unaligned_frame_date date; /* date/time stamp of packet */
char real_length[2]; /* actual length of packet */
char length[2]; /* captured length of packet */
char xxz[2]; /* unknown */
char dce; /* DCE/DTE flag (and other flags?) */
char xxw[9]; /* unknown */
};
static gboolean radcom_read(wtap *wth, int *err, gchar **err_info,
gint64 *data_offset);
static gboolean radcom_seek_read(wtap *wth, gint64 seek_off,
struct wtap_pkthdr *pkhdr, guint8 *pd, int length,
int *err, gchar **err_info);
static int radcom_read_rec_header(FILE_T fh, struct radcomrec_hdr *hdr,
int *err, gchar **err_info);
static gboolean radcom_read_rec_data(FILE_T fh, guint8 *pd, int length,
int *err, gchar **err_info);
int radcom_open(wtap *wth, int *err, gchar **err_info)
{
int bytes_read;
guint8 r_magic[8], t_magic[11], search_encap[7];
struct frame_date start_date;
#if 0
guint32 sec;
struct tm tm;
#endif
/* Read in the string that should be at the start of a RADCOM file */
errno = WTAP_ERR_CANT_READ;
bytes_read = file_read(r_magic, 8, wth->fh);
if (bytes_read != 8) {
*err = file_error(wth->fh, err_info);
if (*err != 0 && *err != WTAP_ERR_SHORT_READ)
return -1;
return 0;
}
/* XXX: bytes 2 and 3 of the "magic" header seem to be different in some
* captures. We force them to our standard value so that the test
* succeeds (until we find if they have a special meaning, perhaps a
* version number ?) */
r_magic[1] = 0xD2;
r_magic[2] = 0x00;
if (memcmp(r_magic, radcom_magic, 8) != 0) {
return 0;
}
/* Look for the "Active Time" string. The "frame_date" structure should
* be located 32 bytes before the beginning of this string */
errno = WTAP_ERR_CANT_READ;
bytes_read = file_read(t_magic, 11, wth->fh);
if (bytes_read != 11) {
*err = file_error(wth->fh, err_info);
if (*err != 0 && *err != WTAP_ERR_SHORT_READ)
return -1;
return 0;
}
while (memcmp(t_magic, active_time_magic, 11) != 0)
{
if (file_seek(wth->fh, -10, SEEK_CUR, err) == -1)
return -1;
errno = WTAP_ERR_CANT_READ;
bytes_read = file_read(t_magic, 11, wth->fh);
if (bytes_read != 11) {
*err = file_error(wth->fh, err_info);
if (*err != 0 && *err != WTAP_ERR_SHORT_READ)
return -1;
return 0;
}
}
if (file_seek(wth->fh, -43, SEEK_CUR, err) == -1) return -1;
/* Get capture start time */
errno = WTAP_ERR_CANT_READ;
bytes_read = file_read(&start_date, sizeof(struct frame_date),
wth->fh);
if (bytes_read != sizeof(struct frame_date)) {
*err = file_error(wth->fh, err_info);
if (*err != 0 && *err != WTAP_ERR_SHORT_READ)
return -1;
return 0;
}
/* This is a radcom file */
wth->file_type = WTAP_FILE_RADCOM;
wth->subtype_read = radcom_read;
wth->subtype_seek_read = radcom_seek_read;
wth->snapshot_length = 0; /* not available in header, only in frame */
wth->tsprecision = WTAP_FILE_TSPREC_USEC;
#if 0
tm.tm_year = pletohs(&start_date.year)-1900;
tm.tm_mon = start_date.month-1;
tm.tm_mday = start_date.day;
sec = pletohl(&start_date.sec);
tm.tm_hour = sec/3600;
tm.tm_min = (sec%3600)/60;
tm.tm_sec = sec%60;
tm.tm_isdst = -1;
#endif
if (file_seek(wth->fh, sizeof(struct frame_date), SEEK_CUR, err) == -1)
return -1;
errno = WTAP_ERR_CANT_READ;
bytes_read = file_read(search_encap, 4, wth->fh);
if (bytes_read != 4) {
goto read_error;
}
while (memcmp(encap_magic, search_encap, 4)) {
if (file_seek(wth->fh, -3, SEEK_CUR, err) == -1)
return -1;
errno = WTAP_ERR_CANT_READ;
bytes_read = file_read(search_encap, 4, wth->fh);
if (bytes_read != 4) {
goto read_error;
}
}
if (file_seek(wth->fh, 12, SEEK_CUR, err) == -1)
return -1;
errno = WTAP_ERR_CANT_READ;
bytes_read = file_read(search_encap, 4, wth->fh);
if (bytes_read != 4) {
goto read_error;
}
if (memcmp(search_encap, "LAPB", 4) == 0)
wth->file_encap = WTAP_ENCAP_LAPB;
else if (memcmp(search_encap, "Ethe", 4) == 0)
wth->file_encap = WTAP_ENCAP_ETHERNET;
else if (memcmp(search_encap, "ATM/", 4) == 0)
wth->file_encap = WTAP_ENCAP_ATM_RFC1483;
else {
*err = WTAP_ERR_UNSUPPORTED_ENCAP;
*err_info = g_strdup_printf("radcom: network type \"%.4s\" unknown", search_encap);
return -1;
}
#if 0
bytes_read = file_read(&next_date, sizeof(struct frame_date), wth->fh);
errno = WTAP_ERR_CANT_READ;
if (bytes_read != sizeof(struct frame_date)) {
goto read_error;
}
while (memcmp(&start_date, &next_date, 4)) {
if (file_seek(wth->fh, 1-sizeof(struct frame_date), SEEK_CUR, err) == -1)
return -1;
errno = WTAP_ERR_CANT_READ;
bytes_read = file_read(&next_date, sizeof(struct frame_date),
wth->fh);
if (bytes_read != sizeof(struct frame_date)) {
goto read_error;
}
}
#endif
if (wth->file_encap == WTAP_ENCAP_ETHERNET) {
if (file_seek(wth->fh, 294, SEEK_CUR, err) == -1)
return -1;
} else if (wth->file_encap == WTAP_ENCAP_LAPB) {
if (file_seek(wth->fh, 297, SEEK_CUR, err) == -1)
return -1;
} else if (wth->file_encap == WTAP_ENCAP_ATM_RFC1483) {
if (file_seek(wth->fh, 504, SEEK_CUR, err) == -1)
return -1;
}
return 1;
read_error:
*err = file_error(wth->fh, err_info);
if (*err != 0)
return -1;
return 0;
}
/* Read the next packet */
static gboolean radcom_read(wtap *wth, int *err, gchar **err_info,
gint64 *data_offset)
{
int ret;
struct radcomrec_hdr hdr;
guint16 data_length, real_length, length;
guint32 sec;
int bytes_read;
struct tm tm;
guint8 phdr[8];
char fcs[2];
/* Read record header. */
*data_offset = file_tell(wth->fh);
ret = radcom_read_rec_header(wth->fh, &hdr, err, err_info);
if (ret <= 0) {
/* Read error or EOF */
return FALSE;
}
data_length = pletohs(&hdr.data_length);
if (data_length == 0) {
/*
* The last record appears to have 0 in its "data_length"
* field, but non-zero values in other fields, so we
* check for that and treat it as an EOF indication.
*/
*err = 0;
return FALSE;
}
length = pletohs(&hdr.length);
real_length = pletohs(&hdr.real_length);
if (wth->file_encap == WTAP_ENCAP_LAPB) {
length -= 2; /* FCS */
real_length -= 2;
}
wth->phdr.presence_flags = WTAP_HAS_TS|WTAP_HAS_CAP_LEN;
wth->phdr.len = real_length;
wth->phdr.caplen = length;
tm.tm_year = pletohs(&hdr.date.year)-1900;
tm.tm_mon = (hdr.date.month&0x0f)-1;
tm.tm_mday = hdr.date.day;
sec = pletohl(&hdr.date.sec);
tm.tm_hour = sec/3600;
tm.tm_min = (sec%3600)/60;
tm.tm_sec = sec%60;
tm.tm_isdst = -1;
wth->phdr.ts.secs = mktime(&tm);
wth->phdr.ts.nsecs = pletohl(&hdr.date.usec) * 1000;
switch (wth->file_encap) {
case WTAP_ENCAP_ETHERNET:
/* XXX - is there an FCS? */
wth->phdr.pseudo_header.eth.fcs_len = -1;
break;
case WTAP_ENCAP_LAPB:
wth->phdr.pseudo_header.x25.flags = (hdr.dce & 0x1) ?
0x00 : FROM_DCE;
break;
case WTAP_ENCAP_ATM_RFC1483:
/*
* XXX - is this stuff a pseudo-header?
* The direction appears to be in the "hdr.dce" field.
*/
if (!radcom_read_rec_data(wth->fh, phdr, sizeof phdr, err,
err_info))
return FALSE; /* Read error */
length -= 8;
wth->phdr.len -= 8;
wth->phdr.caplen -= 8;
break;
}
/*
* Read the packet data.
*/
buffer_assure_space(wth->frame_buffer, length);
if (!radcom_read_rec_data(wth->fh,
buffer_start_ptr(wth->frame_buffer), length, err, err_info))
return FALSE; /* Read error */
if (wth->file_encap == WTAP_ENCAP_LAPB) {
/* Read the FCS.
XXX - should we have some way of indicating the
presence and size of an FCS to our caller?
That'd let us handle other file types as well. */
errno = WTAP_ERR_CANT_READ;
bytes_read = file_read(&fcs, sizeof fcs, wth->fh);
if (bytes_read != sizeof fcs) {
*err = file_error(wth->fh, err_info);
if (*err == 0)
*err = WTAP_ERR_SHORT_READ;
return FALSE;
}
}
return TRUE;
}
static gboolean
radcom_seek_read(wtap *wth, gint64 seek_off,
struct wtap_pkthdr *pkhdr, guint8 *pd, int length,
int *err, gchar **err_info)
{
union wtap_pseudo_header *pseudo_header = &pkhdr->pseudo_header;
int ret;
struct radcomrec_hdr hdr;
guint8 phdr[8];
if (file_seek(wth->random_fh, seek_off, SEEK_SET, err) == -1)
return FALSE;
/* Read record header. */
ret = radcom_read_rec_header(wth->random_fh, &hdr, err, err_info);
if (ret <= 0) {
/* Read error or EOF */
if (ret == 0) {
/* EOF means "short read" in random-access mode */
*err = WTAP_ERR_SHORT_READ;
}
return FALSE;
}
switch (wth->file_encap) {
case WTAP_ENCAP_ETHERNET:
/* XXX - is there an FCS? */
pseudo_header->eth.fcs_len = -1;
break;
case WTAP_ENCAP_LAPB:
pseudo_header->x25.flags = (hdr.dce & 0x1) ? 0x00 : FROM_DCE;
break;
case WTAP_ENCAP_ATM_RFC1483:
/*
* XXX - is this stuff a pseudo-header?
* The direction appears to be in the "hdr.dce" field.
*/
if (!radcom_read_rec_data(wth->random_fh, phdr, sizeof phdr,
err, err_info))
return FALSE; /* Read error */
break;
}
/*
* Read the packet data.
*/
return radcom_read_rec_data(wth->random_fh, pd, length, err, err_info);
}
static int
radcom_read_rec_header(FILE_T fh, struct radcomrec_hdr *hdr, int *err,
gchar **err_info)
{
int bytes_read;
errno = WTAP_ERR_CANT_READ;
bytes_read = file_read(hdr, sizeof *hdr, fh);
if (bytes_read != sizeof *hdr) {
*err = file_error(fh, err_info);
if (*err != 0)
return -1;
if (bytes_read != 0) {
*err = WTAP_ERR_SHORT_READ;
return -1;
}
return 0;
}
return 1;
}
static gboolean
radcom_read_rec_data(FILE_T fh, guint8 *pd, int length, int *err,
gchar **err_info)
{
int bytes_read;
errno = WTAP_ERR_CANT_READ;
bytes_read = file_read(pd, length, fh);
if (bytes_read != length) {
*err = file_error(fh, err_info);
if (*err == 0)
*err = WTAP_ERR_SHORT_READ;
return FALSE;
}
return TRUE;
}
| gpl-2.0 |
MattCrystal/tripping-hipster | arch/arm/mach-msm/sensors_adsp.c | 2 | 32015 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/workqueue.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/notifier.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ctype.h>
#include <linux/of_device.h>
#include <linux/msm_dsps.h>
#include <linux/uaccess.h>
#include <asm/mach-types.h>
#include <asm/arch_timer.h>
#include <mach/subsystem_restart.h>
#include <mach/ocmem.h>
#include <mach/msm_smd.h>
#include <mach/sensors_adsp.h>
#include <mach/msm_bus.h>
#include <mach/msm_bus_board.h>
#define DRV_NAME "sensors"
#define DRV_VERSION "1.00"
#define SNS_OCMEM_SMD_CHANNEL "SENSOR"
#define SNS_OCMEM_CLIENT_ID OCMEM_SENSORS
#define SNS_OCMEM_SIZE SZ_256K
#define SMD_BUF_SIZE 1024
#define SNS_TIMEOUT_MS 1000
#define SNS_OCMEM_ALLOC_GROW 0x00000001
#define SNS_OCMEM_ALLOC_SHRINK 0x00000002
#define SNS_OCMEM_MAP_DONE 0x00000004
#define SNS_OCMEM_MAP_FAIL 0x00000008
#define SNS_OCMEM_UNMAP_DONE 0x00000010
#define SNS_OCMEM_UNMAP_FAIL 0x00000020
#define DSPS_HAS_CLIENT 0x00000100
#define DSPS_HAS_NO_CLIENT 0x00000200
#define DSPS_BW_VOTE_ON 0x00000400
#define DSPS_BW_VOTE_OFF 0x00000800
#define DSPS_PHYS_ADDR_SET 0x00001000
/*
* Structure contains all state used by the sensors driver
*/
struct sns_adsp_control_s {
wait_queue_head_t sns_wait;
spinlock_t sns_lock;
struct workqueue_struct *sns_workqueue;
struct work_struct sns_work;
struct workqueue_struct *smd_wq;
struct work_struct smd_read_work;
smd_channel_t *smd_ch;
uint32_t sns_ocmem_status;
uint32_t mem_segments_size;
struct sns_mem_segment_s_v01 mem_segments[SNS_OCMEM_MAX_NUM_SEG_V01];
struct ocmem_buf *buf;
struct ocmem_map_list map_list;
struct ocmem_notifier *ocmem_handle;
bool ocmem_enabled;
struct notifier_block ocmem_nb;
uint32_t sns_ocmem_bus_client;
struct platform_device *pdev;
void *pil;
struct class *dev_class;
dev_t dev_num;
struct device *dev;
struct cdev *cdev;
};
static struct sns_adsp_control_s sns_ctl;
/*
* All asynchronous responses from the OCMEM driver are received
* by this function
*/
int sns_ocmem_drv_cb(struct notifier_block *self,
unsigned long action,
void *dev)
{
unsigned long flags;
spin_lock_irqsave(&sns_ctl.sns_lock, flags);
pr_debug("%s: Received OCMEM callback: action=%li\n",
__func__, action);
switch (action) {
case OCMEM_MAP_DONE:
sns_ctl.sns_ocmem_status |= SNS_OCMEM_MAP_DONE;
sns_ctl.sns_ocmem_status &= (~OCMEM_MAP_FAIL &
~SNS_OCMEM_UNMAP_DONE &
~SNS_OCMEM_UNMAP_FAIL);
break;
case OCMEM_MAP_FAIL:
sns_ctl.sns_ocmem_status |= SNS_OCMEM_MAP_FAIL;
sns_ctl.sns_ocmem_status &= (~OCMEM_MAP_DONE &
~SNS_OCMEM_UNMAP_DONE &
~SNS_OCMEM_UNMAP_FAIL);
break;
case OCMEM_UNMAP_DONE:
sns_ctl.sns_ocmem_status |= SNS_OCMEM_UNMAP_DONE;
sns_ctl.sns_ocmem_status &= (~SNS_OCMEM_UNMAP_FAIL &
~SNS_OCMEM_MAP_DONE &
~OCMEM_MAP_FAIL);
break;
case OCMEM_UNMAP_FAIL:
sns_ctl.sns_ocmem_status |= SNS_OCMEM_UNMAP_FAIL;
sns_ctl.sns_ocmem_status &= (~SNS_OCMEM_UNMAP_DONE &
~SNS_OCMEM_MAP_DONE &
~OCMEM_MAP_FAIL);
break;
case OCMEM_ALLOC_GROW:
sns_ctl.sns_ocmem_status |= SNS_OCMEM_ALLOC_GROW;
sns_ctl.sns_ocmem_status &= ~SNS_OCMEM_ALLOC_SHRINK;
break;
case OCMEM_ALLOC_SHRINK:
sns_ctl.sns_ocmem_status |= SNS_OCMEM_ALLOC_SHRINK;
sns_ctl.sns_ocmem_status &= ~SNS_OCMEM_ALLOC_GROW;
break;
default:
pr_err("%s: Unknown action received in OCMEM callback %lu\n",
__func__, action);
break;
}
spin_unlock_irqrestore(&sns_ctl.sns_lock, flags);
wake_up(&sns_ctl.sns_wait);
return 0;
}
/*
* Processes messages received through SMD from the ADSP
*
* @param hdr The message header
* @param msg Message pointer
*
*/
void sns_ocmem_smd_process(struct sns_ocmem_hdr_s *hdr, void *msg)
{
unsigned long flags;
spin_lock_irqsave(&sns_ctl.sns_lock, flags);
pr_debug("%s: Received message from ADSP; id: %i type: %i (%08x)\n",
__func__, hdr->msg_id, hdr->msg_type,
sns_ctl.sns_ocmem_status);
if (hdr->msg_id == SNS_OCMEM_PHYS_ADDR_RESP_V01 &&
hdr->msg_type == SNS_OCMEM_MSG_TYPE_RESP) {
struct sns_ocmem_phys_addr_resp_msg_v01 *msg_ptr =
(struct sns_ocmem_phys_addr_resp_msg_v01 *)msg;
pr_debug("%s: Received SNS_OCMEM_PHYS_ADDR_RESP_V01\n",
__func__);
pr_debug("%s: segments_valid=%d, segments_len=%d\n", __func__,
msg_ptr->segments_valid, msg_ptr->segments_len);
if (msg_ptr->segments_valid) {
sns_ctl.mem_segments_size = msg_ptr->segments_len;
memcpy(sns_ctl.mem_segments, msg_ptr->segments,
sizeof(struct sns_mem_segment_s_v01) *
msg_ptr->segments_len);
sns_ctl.sns_ocmem_status |= DSPS_PHYS_ADDR_SET;
} else {
pr_err("%s: Received invalid segment list\n", __func__);
}
} else if (hdr->msg_id == SNS_OCMEM_HAS_CLIENT_IND_V01 &&
hdr->msg_type == SNS_OCMEM_MSG_TYPE_IND) {
struct sns_ocmem_has_client_ind_msg_v01 *msg_ptr =
(struct sns_ocmem_has_client_ind_msg_v01 *)msg;
pr_debug("%s: Received SNS_OCMEM_HAS_CLIENT_IND_V01\n",
__func__);
pr_debug("%s: ADSP has %i client(s)\n", __func__,
msg_ptr->num_clients);
if (msg_ptr->num_clients > 0) {
sns_ctl.sns_ocmem_status |= DSPS_HAS_CLIENT;
sns_ctl.sns_ocmem_status &= ~DSPS_HAS_NO_CLIENT;
} else {
sns_ctl.sns_ocmem_status |= DSPS_HAS_NO_CLIENT;
sns_ctl.sns_ocmem_status &= ~DSPS_HAS_CLIENT;
}
} else if (hdr->msg_id == SNS_OCMEM_BW_VOTE_RESP_V01 &&
hdr->msg_type == SNS_OCMEM_MSG_TYPE_RESP) {
/* no need to handle this response msg, just return */
pr_debug("%s: Received SNS_OCMEM_BW_VOTE_RESP_V01\n", __func__);
spin_unlock_irqrestore(&sns_ctl.sns_lock, flags);
return;
} else if (hdr->msg_id == SNS_OCMEM_BW_VOTE_IND_V01 &&
hdr->msg_type == SNS_OCMEM_MSG_TYPE_IND) {
struct sns_ocmem_bw_vote_ind_msg_v01 *msg_ptr =
(struct sns_ocmem_bw_vote_ind_msg_v01 *)msg;
pr_debug("%s: Received BW_VOTE_IND_V01, is_vote_on=%d\n",
__func__, msg_ptr->is_vote_on);
if (msg_ptr->is_vote_on) {
sns_ctl.sns_ocmem_status |= DSPS_BW_VOTE_ON;
sns_ctl.sns_ocmem_status &= ~DSPS_BW_VOTE_OFF;
} else {
sns_ctl.sns_ocmem_status |= DSPS_BW_VOTE_OFF;
sns_ctl.sns_ocmem_status &= ~DSPS_BW_VOTE_ON;
}
} else {
pr_err("%s: Unknown message type received. id: %i; type: %i\n",
__func__, hdr->msg_id, hdr->msg_type);
}
spin_unlock_irqrestore(&sns_ctl.sns_lock, flags);
wake_up(&sns_ctl.sns_wait);
}
static void sns_ocmem_smd_read(struct work_struct *ws)
{
struct smd_channel *ch = sns_ctl.smd_ch;
unsigned char *buf = NULL;
int sz, len;
for (;;) {
sz = smd_cur_packet_size(ch);
BUG_ON(sz > SMD_BUF_SIZE);
len = smd_read_avail(ch);
pr_debug("%s: sz=%d, len=%d\n", __func__, sz, len);
if (len == 0 || len < sz)
break;
buf = kzalloc(SMD_BUF_SIZE, GFP_KERNEL);
if (buf == NULL) {
pr_err("%s: malloc failed", __func__);
break;
}
if (smd_read(ch, buf, sz) != sz) {
pr_err("%s: not enough data?!\n", __func__);
kfree(buf);
continue;
}
sns_ocmem_smd_process((struct sns_ocmem_hdr_s *)buf,
(void *)((char *)buf +
sizeof(struct sns_ocmem_hdr_s)));
kfree(buf);
}
}
/*
* All SMD notifications and messages from Sensors on ADSP are
* received by this function
*
*/
void sns_ocmem_smd_notify_data(void *data, unsigned int event)
{
if (event == SMD_EVENT_DATA) {
int sz;
pr_debug("%s: Received SMD event Data\n", __func__);
sz = smd_cur_packet_size(sns_ctl.smd_ch);
if ((sz > 0) && (sz <= smd_read_avail(sns_ctl.smd_ch)))
queue_work(sns_ctl.smd_wq, &sns_ctl.smd_read_work);
} else if (event == SMD_EVENT_OPEN) {
pr_debug("%s: Received SMD event Open\n", __func__);
} else if (event == SMD_EVENT_CLOSE) {
pr_debug("%s: Received SMD event Close\n", __func__);
}
}
static bool sns_ocmem_is_status_set(uint32_t sns_ocmem_status)
{
unsigned long flags;
bool is_set;
spin_lock_irqsave(&sns_ctl.sns_lock, flags);
is_set = sns_ctl.sns_ocmem_status & sns_ocmem_status;
spin_unlock_irqrestore(&sns_ctl.sns_lock, flags);
return is_set;
}
/*
* Wait for a response from ADSP or OCMEM Driver, timeout if necessary
*
* @param sns_ocmem_status Status flags to wait for.
* @param timeout_sec Seconds to wait before timeout
* @param timeout_nsec Nanoseconds to wait. Total timeout = nsec + sec
*
* @return 0 If any status flag is set at any time prior to a timeout.
* 0 if success or timedout ; <0 for failures
*/
static int sns_ocmem_wait(uint32_t sns_ocmem_status,
uint32_t timeout_ms)
{
int err;
if (timeout_ms) {
err = wait_event_interruptible_timeout(sns_ctl.sns_wait,
sns_ocmem_is_status_set(sns_ocmem_status),
msecs_to_jiffies(timeout_ms));
if (err == 0)
pr_err("%s: interruptible_timeout timeout err=%i\n",
__func__, err);
else if (err < 0)
pr_err("%s: interruptible_timeout failed err=%i\n",
__func__, err);
} else { /* no timeout */
err = wait_event_interruptible(sns_ctl.sns_wait,
sns_ocmem_is_status_set(sns_ocmem_status));
if (err < 0)
pr_err("%s: wait_event_interruptible failed err=%i\n",
__func__, err);
}
return err;
}
/*
* Sends a message to the ADSP via SMD.
*
* @param hdr Specifies message type and other meta data
* @param msg_ptr Pointer to the message contents.
* Must be freed within this function if no error is returned.
*
* @return 0 upon success; < 0 upon error
*/
static int
sns_ocmem_send_msg(struct sns_ocmem_hdr_s *hdr, void const *msg_ptr)
{
int rv = 0;
int err = 0;
void *temp = NULL;
int size = sizeof(struct sns_ocmem_hdr_s) + hdr->msg_size;
temp = kzalloc(sizeof(struct sns_ocmem_hdr_s) + hdr->msg_size,
GFP_KERNEL);
if (temp == NULL) {
pr_err("%s: allocation failure\n", __func__);
rv = -ENOMEM;
goto out;
}
hdr->dst_module = SNS_OCMEM_MODULE_ADSP;
hdr->src_module = SNS_OCMEM_MODULE_KERNEL;
memcpy(temp, hdr, sizeof(struct sns_ocmem_hdr_s));
memcpy((char *)temp + sizeof(struct sns_ocmem_hdr_s),
msg_ptr, hdr->msg_size);
pr_debug("%s: send msg type: %i size: %i id: %i dst: %i src: %i\n",
__func__, hdr->msg_type, hdr->msg_size,
hdr->msg_id, hdr->dst_module, hdr->src_module);
if (hdr == NULL) {
pr_err("%s: NULL message header\n", __func__);
rv = -EINVAL;
} else {
if (sns_ctl.smd_ch == NULL) {
pr_err("%s: null smd_ch\n", __func__);
rv = -EINVAL;
}
err = smd_write(sns_ctl.smd_ch, temp, size);
if (err < 0) {
pr_err("%s: smd_write failed %i\n", __func__, err);
rv = -ECOMM;
} else {
pr_debug("%s smd_write successful ret=%d\n",
__func__, err);
}
}
kfree(temp);
out:
return rv;
}
/*
* Load ADSP Firmware.
*/
static int sns_load_adsp(void)
{
sns_ctl.pil = subsystem_get("adsp");
if (IS_ERR(sns_ctl.pil)) {
pr_err("%s: fail to load ADSP firmware\n", __func__);
return -ENODEV;
}
pr_debug("%s: Q6/ADSP image is loaded\n", __func__);
return 0;
}
static int sns_ocmem_platform_data_populate(struct platform_device *pdev)
{
int ret;
struct msm_bus_scale_pdata *sns_ocmem_bus_scale_pdata = NULL;
struct msm_bus_vectors *sns_ocmem_bus_vectors = NULL;
struct msm_bus_paths *ocmem_sns_bus_paths = NULL;
u32 val;
if (!pdev->dev.of_node) {
pr_err("%s: device tree information missing\n", __func__);
return -ENODEV;
}
sns_ocmem_bus_vectors = kzalloc(sizeof(struct msm_bus_vectors),
GFP_KERNEL);
if (!sns_ocmem_bus_vectors) {
dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
return -ENOMEM;
}
ret = of_property_read_u32(pdev->dev.of_node,
"qcom,src-id", &val);
if (ret) {
dev_err(&pdev->dev, "%s: qcom,src-id missing in DT node\n",
__func__);
goto fail1;
}
sns_ocmem_bus_vectors->src = val;
ret = of_property_read_u32(pdev->dev.of_node,
"qcom,dst-id", &val);
if (ret) {
dev_err(&pdev->dev, "%s: qcom,dst-id missing in DT node\n",
__func__);
goto fail1;
}
sns_ocmem_bus_vectors->dst = val;
ret = of_property_read_u32(pdev->dev.of_node,
"qcom,ab", &val);
if (ret) {
dev_err(&pdev->dev, "%s: qcom,ab missing in DT node\n",
__func__);
goto fail1;
}
sns_ocmem_bus_vectors->ab = val;
ret = of_property_read_u32(pdev->dev.of_node,
"qcom,ib", &val);
if (ret) {
dev_err(&pdev->dev, "%s: qcom,ib missing in DT node\n",
__func__);
goto fail1;
}
sns_ocmem_bus_vectors->ib = val;
ocmem_sns_bus_paths = kzalloc(sizeof(struct msm_bus_paths),
GFP_KERNEL);
if (!ocmem_sns_bus_paths) {
dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
goto fail1;
}
ocmem_sns_bus_paths->num_paths = 1;
ocmem_sns_bus_paths->vectors = sns_ocmem_bus_vectors;
sns_ocmem_bus_scale_pdata =
kzalloc(sizeof(struct msm_bus_scale_pdata), GFP_KERNEL);
if (!sns_ocmem_bus_scale_pdata) {
dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
goto fail2;
}
sns_ocmem_bus_scale_pdata->usecase = ocmem_sns_bus_paths;
sns_ocmem_bus_scale_pdata->num_usecases = 1;
sns_ocmem_bus_scale_pdata->name = "sensors-ocmem";
dev_set_drvdata(&pdev->dev, sns_ocmem_bus_scale_pdata);
return ret;
fail2:
kfree(ocmem_sns_bus_paths);
fail1:
kfree(sns_ocmem_bus_vectors);
return ret;
}
/*
* Initialize all sensors ocmem driver data fields and register with the
* ocmem driver.
*
* @return 0 upon success; < 0 upon error
*/
static int sns_ocmem_init(void)
{
int i, err, ret;
struct sns_ocmem_hdr_s addr_req_hdr;
struct msm_bus_scale_pdata *sns_ocmem_bus_scale_pdata = NULL;
/* register from OCMEM callack */
sns_ctl.ocmem_handle =
ocmem_notifier_register(SNS_OCMEM_CLIENT_ID,
&sns_ctl.ocmem_nb);
if (sns_ctl.ocmem_handle == NULL) {
pr_err("OCMEM notifier registration failed\n");
return -EFAULT;
}
/* populate platform data */
ret = sns_ocmem_platform_data_populate(sns_ctl.pdev);
if (ret) {
dev_err(&sns_ctl.pdev->dev,
"%s: failed to populate platform data, rc = %d\n",
__func__, ret);
return -ENODEV;
}
sns_ocmem_bus_scale_pdata = dev_get_drvdata(&sns_ctl.pdev->dev);
sns_ctl.sns_ocmem_bus_client =
msm_bus_scale_register_client(sns_ocmem_bus_scale_pdata);
if (!sns_ctl.sns_ocmem_bus_client) {
pr_err("%s: msm_bus_scale_register_client() failed\n",
__func__);
return -EFAULT;
}
/* load ADSP first */
if (sns_load_adsp() != 0) {
pr_err("%s: sns_load_adsp failed\n", __func__);
return -EFAULT;
}
/*
* wait before open SMD channel from kernel to ensure
* channel has been openned already from ADSP side
*/
msleep(1000);
err = smd_named_open_on_edge(SNS_OCMEM_SMD_CHANNEL,
SMD_APPS_QDSP,
&sns_ctl.smd_ch,
NULL,
sns_ocmem_smd_notify_data);
if (err != 0) {
pr_err("%s: smd_named_open_on_edge failed %i\n", __func__, err);
return -EFAULT;
}
pr_debug("%s: SMD channel openned successfuly!\n", __func__);
/* wait for the channel ready before writing data */
msleep(1000);
addr_req_hdr.msg_id = SNS_OCMEM_PHYS_ADDR_REQ_V01;
addr_req_hdr.msg_type = SNS_OCMEM_MSG_TYPE_REQ;
addr_req_hdr.msg_size = 0;
err = sns_ocmem_send_msg(&addr_req_hdr, NULL);
if (err != 0) {
pr_err("%s: sns_ocmem_send_msg failed %i\n", __func__, err);
return -ECOMM;
}
err = sns_ocmem_wait(DSPS_PHYS_ADDR_SET, 0);
if (err != 0) {
pr_err("%s: sns_ocmem_wait failed %i\n", __func__, err);
return -EFAULT;
}
sns_ctl.map_list.num_chunks = sns_ctl.mem_segments_size;
for (i = 0; i < sns_ctl.mem_segments_size; i++) {
sns_ctl.map_list.chunks[i].ro =
sns_ctl.mem_segments[i].type == 1 ? true : false;
sns_ctl.map_list.chunks[i].ddr_paddr =
sns_ctl.mem_segments[i].start_address;
sns_ctl.map_list.chunks[i].size =
sns_ctl.mem_segments[i].size;
pr_debug("%s: chunks[%d]: ro=%d, ddr_paddr=0x%lx, size=%li",
__func__, i,
sns_ctl.map_list.chunks[i].ro,
sns_ctl.map_list.chunks[i].ddr_paddr,
sns_ctl.map_list.chunks[i].size);
}
return 0;
}
/*
* Unmaps memory in ocmem back to DDR, indicates to the ADSP its completion,
* and waits for it to finish removing its bandwidth vote.
*/
static void sns_ocmem_unmap(void)
{
unsigned long flags;
int err = 0;
ocmem_set_power_state(SNS_OCMEM_CLIENT_ID,
sns_ctl.buf, OCMEM_ON);
spin_lock_irqsave(&sns_ctl.sns_lock, flags);
sns_ctl.sns_ocmem_status &= (~SNS_OCMEM_UNMAP_FAIL &
~SNS_OCMEM_UNMAP_DONE);
spin_unlock_irqrestore(&sns_ctl.sns_lock, flags);
err = ocmem_unmap(SNS_OCMEM_CLIENT_ID,
sns_ctl.buf,
&sns_ctl.map_list);
if (err != 0) {
pr_err("ocmem_unmap failed %i\n", err);
} else {
err = sns_ocmem_wait(SNS_OCMEM_UNMAP_DONE |
SNS_OCMEM_UNMAP_FAIL, 0);
if (err == 0) {
if (sns_ocmem_is_status_set(SNS_OCMEM_UNMAP_DONE))
pr_debug("%s: OCMEM_UNMAP_DONE\n", __func__);
else if (sns_ocmem_is_status_set(
SNS_OCMEM_UNMAP_FAIL)) {
pr_err("%s: OCMEM_UNMAP_FAIL\n", __func__);
BUG_ON(true);
} else
pr_err("%s: status flag not set\n", __func__);
} else {
pr_err("%s: sns_ocmem_wait failed %i\n",
__func__, err);
}
}
ocmem_set_power_state(SNS_OCMEM_CLIENT_ID,
sns_ctl.buf, OCMEM_OFF);
}
/*
* Waits for allocation to succeed. This may take considerable time if the device
* is presently in a high-power use case.
*
* @return 0 on success; < 0 upon error
*/
static int sns_ocmem_wait_for_alloc(void)
{
int err = 0;
err = sns_ocmem_wait(SNS_OCMEM_ALLOC_GROW |
DSPS_HAS_NO_CLIENT, 0);
if (err == 0) {
if (sns_ocmem_is_status_set(DSPS_HAS_NO_CLIENT)) {
pr_debug("%s: Lost client while waiting for GROW\n",
__func__);
ocmem_free(SNS_OCMEM_CLIENT_ID, sns_ctl.buf);
sns_ctl.buf = NULL;
return -EPIPE;
}
} else {
pr_err("sns_ocmem_wait failed %i\n", err);
return -EFAULT;
}
return 0;
}
/*
* Kicks-off the mapping of memory from DDR to ocmem. Waits for the process
* to complete, then indicates so to the ADSP.
*
* @return 0: Success; < 0: Other error
*/
static int sns_ocmem_map(void)
{
int err = 0;
unsigned long flags;
spin_lock_irqsave(&sns_ctl.sns_lock, flags);
sns_ctl.sns_ocmem_status &=
(~SNS_OCMEM_MAP_FAIL & ~SNS_OCMEM_MAP_DONE);
spin_unlock_irqrestore(&sns_ctl.sns_lock, flags);
/* vote for ocmem bus bandwidth */
err = msm_bus_scale_client_update_request(
sns_ctl.sns_ocmem_bus_client,
0);
if (err)
pr_err("%s: failed to vote for bus bandwidth\n", __func__);
err = ocmem_map(SNS_OCMEM_CLIENT_ID,
sns_ctl.buf,
&sns_ctl.map_list);
if (err != 0) {
pr_debug("ocmem_map failed %i\n", err);
ocmem_set_power_state(SNS_OCMEM_CLIENT_ID,
sns_ctl.buf, OCMEM_OFF);
ocmem_free(SNS_OCMEM_CLIENT_ID, sns_ctl.buf);
sns_ctl.buf = NULL;
} else {
err = sns_ocmem_wait(SNS_OCMEM_ALLOC_SHRINK |
DSPS_HAS_NO_CLIENT |
SNS_OCMEM_MAP_DONE |
SNS_OCMEM_MAP_FAIL, 0);
if (err == 0) {
if (sns_ocmem_is_status_set(SNS_OCMEM_MAP_DONE))
pr_debug("%s: OCMEM mapping DONE\n", __func__);
else if (sns_ocmem_is_status_set(DSPS_HAS_NO_CLIENT)) {
pr_debug("%s: Lost client while waiting for MAP\n",
__func__);
sns_ocmem_unmap();
ocmem_free(SNS_OCMEM_CLIENT_ID,
sns_ctl.buf);
sns_ctl.buf = NULL;
err = -EPIPE;
} else if (sns_ocmem_is_status_set(
SNS_OCMEM_ALLOC_SHRINK)) {
pr_debug("%s: SHRINK while wait for MAP\n",
__func__);
sns_ocmem_unmap();
err = ocmem_shrink(SNS_OCMEM_CLIENT_ID,
sns_ctl.buf, 0);
BUG_ON(err != 0);
err = -EFAULT;
} else if (sns_ocmem_is_status_set(
SNS_OCMEM_MAP_FAIL)) {
pr_err("%s: OCMEM mapping fails\n", __func__);
ocmem_set_power_state(SNS_OCMEM_CLIENT_ID,
sns_ctl.buf,
OCMEM_OFF);
ocmem_free(SNS_OCMEM_CLIENT_ID,
sns_ctl.buf);
sns_ctl.buf = NULL;
} else
pr_err("%s: status flag not set\n", __func__);
} else {
pr_err("sns_ocmem_wait failed %i\n", err);
}
}
return err;
}
/*
* Allocates memory in ocmem and maps to it from DDR.
*
* @return 0 upon success; <0 upon failure;
*/
static int sns_ocmem_alloc(void)
{
int err = 0;
unsigned long flags;
if (sns_ctl.buf == NULL) {
spin_lock_irqsave(&sns_ctl.sns_lock, flags);
sns_ctl.sns_ocmem_status &= ~SNS_OCMEM_ALLOC_GROW &
~SNS_OCMEM_ALLOC_SHRINK;
spin_unlock_irqrestore(&sns_ctl.sns_lock, flags);
sns_ctl.buf = ocmem_allocate_nb(SNS_OCMEM_CLIENT_ID,
SNS_OCMEM_SIZE);
if (sns_ctl.buf == NULL) {
pr_err("ocmem_allocate_nb returned NULL\n");
sns_ctl.ocmem_enabled = false;
err = -EFAULT;
} else if (sns_ctl.buf->len != 0 &&
SNS_OCMEM_SIZE > sns_ctl.buf->len) {
pr_err("ocmem_allocate_nb: invalid len %li, Req: %i)\n",
sns_ctl.buf->len, SNS_OCMEM_SIZE);
sns_ctl.ocmem_enabled = false;
err = -EFAULT;
}
}
pr_debug("%s OCMEM buf=%lx, buffer len=%li\n", __func__,
sns_ctl.buf->addr, sns_ctl.buf->len);
while (sns_ctl.ocmem_enabled) {
if (sns_ctl.buf->len == 0) {
pr_debug("%s: Waiting for memory allocation\n",
__func__);
err = sns_ocmem_wait_for_alloc();
if (err == -EPIPE) {
pr_debug("%s:Lost client while wait for alloc\n",
__func__);
break;
} else if (err != 0) {
pr_err("sns_ocmem_wait_for_alloc failed %i\n",
err);
break;
}
}
ocmem_set_power_state(SNS_OCMEM_CLIENT_ID,
sns_ctl.buf,
OCMEM_ON);
err = sns_ocmem_map();
if (err == -EPIPE) {
pr_debug("%s: Lost client while waiting for mapping\n",
__func__);
break;
} else if (err < 0) {
pr_debug("%s: Mapping failed, will try again\n",
__func__);
break;
} else if (err == 0) {
pr_debug("%s: Mapping finished\n", __func__);
break;
}
}
return err;
}
/*
* Indicate to the ADSP that unmapping has completed, and wait for the response
* that its bandwidth vote has been removed.
*
* @return 0 Upon success; < 0 upon error
*/
static int sns_ocmem_unmap_send(void)
{
int err;
struct sns_ocmem_hdr_s msg_hdr;
struct sns_ocmem_bw_vote_req_msg_v01 msg;
memset(&msg, 0, sizeof(struct sns_ocmem_bw_vote_req_msg_v01));
msg_hdr.msg_id = SNS_OCMEM_BW_VOTE_REQ_V01;
msg_hdr.msg_type = SNS_OCMEM_MSG_TYPE_REQ;
msg_hdr.msg_size = sizeof(struct sns_ocmem_bw_vote_req_msg_v01);
msg.is_map = 0;
msg.vectors_valid = 0;
msg.vectors_len = 0;
pr_debug("%s: send bw_vote OFF\n", __func__);
err = sns_ocmem_send_msg(&msg_hdr, &msg);
if (err != 0) {
pr_err("%s: sns_ocmem_send_msg failed %i\n",
__func__, err);
} else {
err = sns_ocmem_wait(DSPS_BW_VOTE_OFF, 0);
if (err != 0)
pr_err("%s: sns_ocmem_wait failed %i\n", __func__, err);
}
return err;
}
/*
* Indicate to the ADSP that mapping has completed, and wait for the response
* that its bandwidth vote has been made.
*
* @return 0 Upon success; < 0 upon error
*/
static int sns_ocmem_map_send(void)
{
int err;
struct sns_ocmem_hdr_s msg_hdr;
struct sns_ocmem_bw_vote_req_msg_v01 msg;
struct ocmem_vectors *vectors;
memset(&msg, 0, sizeof(struct sns_ocmem_bw_vote_req_msg_v01));
msg_hdr.msg_id = SNS_OCMEM_BW_VOTE_REQ_V01;
msg_hdr.msg_type = SNS_OCMEM_MSG_TYPE_REQ;
msg_hdr.msg_size = sizeof(struct sns_ocmem_bw_vote_req_msg_v01);
msg.is_map = 1;
vectors = ocmem_get_vectors(SNS_OCMEM_CLIENT_ID, sns_ctl.buf);
if ((vectors != NULL)) {
memcpy(&msg.vectors, vectors, sizeof(*vectors));
/* TODO: set vectors_len */
msg.vectors_valid = true;
msg.vectors_len = 0;
}
pr_debug("%s: send bw_vote ON\n", __func__);
err = sns_ocmem_send_msg(&msg_hdr, &msg);
if (err != 0) {
pr_err("%s: sns_ocmem_send_msg failed %i\n", __func__, err);
} else {
err = sns_ocmem_wait(DSPS_BW_VOTE_ON |
SNS_OCMEM_ALLOC_SHRINK, 0);
if (err != 0)
pr_err("%s: sns_ocmem_wait failed %i\n", __func__, err);
}
return err;
}
/*
* Perform the encessary operations to clean-up OCMEM after being notified that
* there is no longer a client; if sensors was evicted; or if some error
* has occurred.
*
* @param[i] do_free Whether the memory should be freed (true) or if shrink
* should be called instead (false).
*/
static void sns_ocmem_evicted(bool do_free)
{
int err = 0;
sns_ocmem_unmap();
if (do_free) {
ocmem_free(SNS_OCMEM_CLIENT_ID, sns_ctl.buf);
sns_ctl.buf = NULL;
} else {
err = ocmem_shrink(SNS_OCMEM_CLIENT_ID, sns_ctl.buf, 0);
BUG_ON(err != 0);
}
err = sns_ocmem_unmap_send();
if (err != 0)
pr_err("sns_ocmem_unmap_send failed %i\n", err);
}
/*
* After mapping has completed and the ADSP has reacted appropriately, wait
* for a shrink command or word from the ADSP that it no longer has a client.
*
* @return 0 If no clients; < 0 upon error;
*/
static int sns_ocmem_map_done(void)
{
int err = 0;
unsigned long flags;
err = sns_ocmem_map_send();
if (err != 0) {
pr_err("sns_ocmem_map_send failed %i\n", err);
sns_ocmem_evicted(true);
} else {
ocmem_set_power_state(SNS_OCMEM_CLIENT_ID,
sns_ctl.buf, OCMEM_OFF);
pr_debug("%s: Waiting for shrink or 'no client' updates\n",
__func__);
err = sns_ocmem_wait(DSPS_HAS_NO_CLIENT |
SNS_OCMEM_ALLOC_SHRINK, 0);
if (err == 0) {
if (sns_ocmem_is_status_set(DSPS_HAS_NO_CLIENT)) {
pr_debug("%s: No longer have a client\n",
__func__);
sns_ocmem_evicted(true);
} else if (sns_ocmem_is_status_set(
SNS_OCMEM_ALLOC_SHRINK)) {
pr_debug("%s: Received SHRINK\n", __func__);
sns_ocmem_evicted(false);
spin_lock_irqsave(&sns_ctl.sns_lock, flags);
sns_ctl.sns_ocmem_status &=
~SNS_OCMEM_ALLOC_SHRINK;
spin_unlock_irqrestore(&sns_ctl.sns_lock,
flags);
err = -EFAULT;
}
} else {
pr_err("sns_ocmem_wait failed %i\n", err);
}
}
return err;
}
/*
* Main function.
* Initializes sensors ocmem feature, and waits for an ADSP client.
*/
static void sns_ocmem_main(struct work_struct *work)
{
int err = 0;
pr_debug("%s\n", __func__);
err = sns_ocmem_init();
if (err != 0) {
pr_err("%s: sns_ocmem_init failed %i\n", __func__, err);
return;
}
while (true) {
pr_debug("%s: Waiting for sensor client\n", __func__);
if (sns_ocmem_is_status_set(DSPS_HAS_CLIENT) ||
!sns_ocmem_wait(DSPS_HAS_CLIENT, 0)) {
pr_debug("%s: DSPS_HAS_CLIENT\n", __func__);
err = sns_ocmem_alloc();
if (err != 0) {
pr_err("sns_ocmem_alloc failed %i\n", err);
return;
} else {
err = sns_ocmem_map_done();
if (err != 0) {
pr_err("sns_ocmem_map_done failed %i",
err);
return;
}
}
}
}
ocmem_notifier_unregister(sns_ctl.ocmem_handle,
&sns_ctl.ocmem_nb);
}
static int sensors_adsp_open(struct inode *ip, struct file *fp)
{
int ret = 0;
return ret;
}
static int sensors_adsp_release(struct inode *inode, struct file *file)
{
return 0;
}
/*
* Read QTimer clock ticks and scale down to 32KHz clock as used
* in DSPS
*/
static u32 sns_read_qtimer(void)
{
u64 val;
val = arch_counter_get_cntpct();
/*
* To convert ticks from 19.2 Mhz clock to 32768 Hz clock:
* x = (value * 32768) / 19200000
* This is same as first left shift the value by 4 bits, i.e. mutiply
* by 16, and then divide by 9375. The latter is preferable since
* QTimer tick (value) is 56-bit, so (value * 32768) could overflow,
* while (value * 16) will never do
*/
val <<= 4;
do_div(val, 9375);
return (u32)val;
}
/*
* IO Control - handle commands from client.
*/
static long sensors_adsp_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
int ret = 0;
u32 val = 0;
switch (cmd) {
case DSPS_IOCTL_READ_SLOW_TIMER:
val = sns_read_qtimer();
ret = put_user(val, (u32 __user *) arg);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
/*
* platform driver
*/
const struct file_operations sensors_adsp_fops = {
.owner = THIS_MODULE,
.open = sensors_adsp_open,
.release = sensors_adsp_release,
.unlocked_ioctl = sensors_adsp_ioctl,
};
static int sensors_adsp_probe(struct platform_device *pdev)
{
int ret = 0;
sns_ctl.dev_class = class_create(THIS_MODULE, DRV_NAME);
if (sns_ctl.dev_class == NULL) {
pr_err("%s: class_create fail.\n", __func__);
goto res_err;
}
ret = alloc_chrdev_region(&sns_ctl.dev_num, 0, 1, DRV_NAME);
if (ret) {
pr_err("%s: alloc_chrdev_region fail.\n", __func__);
goto alloc_chrdev_region_err;
}
sns_ctl.dev = device_create(sns_ctl.dev_class, NULL,
sns_ctl.dev_num,
&sns_ctl, DRV_NAME);
if (IS_ERR(sns_ctl.dev)) {
pr_err("%s: device_create fail.\n", __func__);
goto device_create_err;
}
sns_ctl.cdev = cdev_alloc();
if (sns_ctl.cdev == NULL) {
pr_err("%s: cdev_alloc fail.\n", __func__);
goto cdev_alloc_err;
}
cdev_init(sns_ctl.cdev, &sensors_adsp_fops);
sns_ctl.cdev->owner = THIS_MODULE;
ret = cdev_add(sns_ctl.cdev, sns_ctl.dev_num, 1);
if (ret) {
pr_err("%s: cdev_add fail.\n", __func__);
goto cdev_add_err;
}
sns_ctl.sns_workqueue =
alloc_workqueue("sns_ocmem", WQ_NON_REENTRANT, 0);
if (!sns_ctl.sns_workqueue) {
pr_err("%s: Failed to create work queue\n",
__func__);
goto cdev_add_err;
}
sns_ctl.smd_wq =
alloc_workqueue("smd_wq", WQ_NON_REENTRANT, 0);
if (!sns_ctl.smd_wq) {
pr_err("%s: Failed to create work queue\n",
__func__);
goto cdev_add_err;
}
init_waitqueue_head(&sns_ctl.sns_wait);
spin_lock_init(&sns_ctl.sns_lock);
sns_ctl.ocmem_handle = NULL;
sns_ctl.buf = NULL;
sns_ctl.sns_ocmem_status = 0;
sns_ctl.ocmem_enabled = true;
sns_ctl.ocmem_nb.notifier_call = sns_ocmem_drv_cb;
sns_ctl.smd_ch = NULL;
sns_ctl.pdev = pdev;
INIT_WORK(&sns_ctl.sns_work, sns_ocmem_main);
INIT_WORK(&sns_ctl.smd_read_work, sns_ocmem_smd_read);
queue_work(sns_ctl.sns_workqueue, &sns_ctl.sns_work);
return 0;
cdev_add_err:
kfree(sns_ctl.cdev);
cdev_alloc_err:
device_destroy(sns_ctl.dev_class, sns_ctl.dev_num);
device_create_err:
unregister_chrdev_region(sns_ctl.dev_num, 1);
alloc_chrdev_region_err:
class_destroy(sns_ctl.dev_class);
res_err:
return -ENODEV;
}
static int sensors_adsp_remove(struct platform_device *pdev)
{
struct msm_bus_scale_pdata *sns_ocmem_bus_scale_pdata = NULL;
sns_ocmem_bus_scale_pdata = (struct msm_bus_scale_pdata *)
dev_get_drvdata(&pdev->dev);
kfree(sns_ocmem_bus_scale_pdata->usecase->vectors);
kfree(sns_ocmem_bus_scale_pdata->usecase);
kfree(sns_ocmem_bus_scale_pdata);
ocmem_notifier_unregister(sns_ctl.ocmem_handle,
&sns_ctl.ocmem_nb);
destroy_workqueue(sns_ctl.sns_workqueue);
destroy_workqueue(sns_ctl.smd_wq);
cdev_del(sns_ctl.cdev);
kfree(sns_ctl.cdev);
sns_ctl.cdev = NULL;
device_destroy(sns_ctl.dev_class, sns_ctl.dev_num);
unregister_chrdev_region(sns_ctl.dev_num, 1);
class_destroy(sns_ctl.dev_class);
return 0;
}
static const struct of_device_id msm_adsp_sensors_dt_match[] = {
{.compatible = "qcom,msm-adsp-sensors"},
{}
};
MODULE_DEVICE_TABLE(of, msm_adsp_sensors_dt_match);
static struct platform_driver sensors_adsp_driver = {
.driver = {
.name = "sensors-adsp",
.owner = THIS_MODULE,
.of_match_table = msm_adsp_sensors_dt_match,
},
.probe = sensors_adsp_probe,
.remove = sensors_adsp_remove,
};
/*
* Module Init.
*/
static int sensors_adsp_init(void)
{
int rc;
pr_debug("%s driver version %s.\n", DRV_NAME, DRV_VERSION);
rc = platform_driver_register(&sensors_adsp_driver);
if (rc) {
pr_err("%s: Failed to register sensors adsp driver\n",
__func__);
return rc;
}
return 0;
}
/*
* Module Exit.
*/
static void sensors_adsp_exit(void)
{
platform_driver_unregister(&sensors_adsp_driver);
}
module_init(sensors_adsp_init);
module_exit(sensors_adsp_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Sensors ADSP driver");
| gpl-2.0 |
timeraider4u/mkvtoolnix | src/info/info_cli_parser.cpp | 2 | 3498 | /** \brief command line parsing
mkvinfo -- info tracks from Matroska files into other files
Distributed under the GPL v2
see the file COPYING for details
or visit http://www.gnu.org/copyleft/gpl.html
\file
\author Written by Moritz Bunkus <moritz@bunkus.org>.
*/
#include "common/common_pch.h"
#include "common/ebml.h"
#include "common/strings/formatting.h"
#include "common/strings/parsing.h"
#include "common/translation.h"
#include "info/info_cli_parser.h"
#include "info/options.h"
info_cli_parser_c::info_cli_parser_c(const std::vector<std::string> &args)
: mtx::cli::parser_c{args}
{
verbose = 0;
}
#define OPT(spec, func, description) add_option(spec, std::bind(&info_cli_parser_c::func, this), description)
void
info_cli_parser_c::init_parser() {
add_information(YT("mkvinfo [options] <inname>"));
add_section_header(YT("Options"));
#if defined(HAVE_QT)
OPT("G|no-gui", set_no_gui, YT("Do not start the GUI."));
OPT("g|gui", set_gui, YT("Start the GUI (and open inname if it was given)."));
#endif
OPT("c|checksum", set_checksum, YT("Calculate and display checksums of frame contents."));
OPT("C|check-mode", set_check_mode, YT("Calculate and display checksums and use verbosity level 4."));
OPT("s|summary", set_summary, YT("Only show summaries of the contents, not each element."));
OPT("t|track-info", set_track_info, YT("Show statistics for each track in verbose mode."));
OPT("x|hexdump", set_hexdump, YT("Show the first 16 bytes of each frame as a hex dump."));
OPT("X|full-hexdump", set_full_hexdump, YT("Show all bytes of each frame as a hex dump."));
OPT("p|hex-positions", set_hex_positions, YT("Show positions in hexadecimal."));
OPT("z|size", set_size, YT("Show the size of each element including its header."));
add_common_options();
add_hook(mtx::cli::parser_c::ht_unknown_option, std::bind(&info_cli_parser_c::set_file_name, this));
}
#undef OPT
void
info_cli_parser_c::set_gui() {
if (!ui_graphical_available())
mxerror("mkvinfo was compiled without GUI support.\n");
m_options.m_use_gui = true;
}
void
info_cli_parser_c::set_no_gui() {
if (!ui_graphical_available())
mxerror("mkvinfo was compiled without GUI support.\n");
m_options.m_use_gui = false;
}
void
info_cli_parser_c::set_checksum() {
m_options.m_calc_checksums = true;
}
void
info_cli_parser_c::set_check_mode() {
m_options.m_calc_checksums = true;
verbose = 4;
}
void
info_cli_parser_c::set_summary() {
m_options.m_calc_checksums = true;
m_options.m_show_summary = true;
}
void
info_cli_parser_c::set_hexdump() {
m_options.m_show_hexdump = true;
}
void
info_cli_parser_c::set_full_hexdump() {
m_options.m_show_hexdump = true;
m_options.m_hexdump_max_size = INT_MAX;
}
void
info_cli_parser_c::set_size() {
m_options.m_show_size = true;
}
void
info_cli_parser_c::set_track_info() {
m_options.m_show_track_info = true;
if (0 == verbose)
verbose = 1;
}
void
info_cli_parser_c::set_file_name() {
if (!m_options.m_file_name.empty())
mxerror(Y("Only one source file is allowed.\n"));
m_options.m_file_name = m_current_arg;
}
void
info_cli_parser_c::set_hex_positions() {
m_options.m_hex_positions = true;
}
options_c
info_cli_parser_c::run() {
init_parser();
parse_args();
m_options.m_verbose = verbose;
verbose = 0;
return m_options;
}
| gpl-2.0 |
citra-emu/citra | src/core/hle/kernel/shared_page.cpp | 2 | 5622 | // Copyright 2015 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <chrono>
#include <cstring>
#include "common/archives.h"
#include "common/assert.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/kernel/shared_page.h"
#include "core/hle/service/ptm/ptm.h"
#include "core/movie.h"
#include "core/settings.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
SERIALIZE_EXPORT_IMPL(SharedPage::Handler)
namespace boost::serialization {
template <class Archive>
void load_construct_data(Archive& ar, SharedPage::Handler* t, const unsigned int) {
::new (t) SharedPage::Handler(Core::System::GetInstance().CoreTiming());
}
template void load_construct_data<iarchive>(iarchive& ar, SharedPage::Handler* t,
const unsigned int);
} // namespace boost::serialization
namespace SharedPage {
static std::chrono::seconds GetInitTime() {
const u64 override_init_time = Core::Movie::GetInstance().GetOverrideInitTime();
if (override_init_time != 0) {
// Override the clock init time with the one in the movie
return std::chrono::seconds(override_init_time);
}
switch (Settings::values.init_clock) {
case Settings::InitClock::SystemTime: {
auto now = std::chrono::system_clock::now();
// If the system time is in daylight saving, we give an additional hour to console time
std::time_t now_time_t = std::chrono::system_clock::to_time_t(now);
std::tm* now_tm = std::localtime(&now_time_t);
if (now_tm && now_tm->tm_isdst > 0)
now = now + std::chrono::hours(1);
// add the offset
s64 init_time_offset = Settings::values.init_time_offset;
long long days_offset = init_time_offset / 86400;
long long days_offset_in_seconds = days_offset * 86400; // h/m/s truncated
unsigned long long seconds_offset =
std::abs(init_time_offset) - std::abs(days_offset_in_seconds);
now = now + std::chrono::seconds(seconds_offset);
now = now + std::chrono::seconds(days_offset_in_seconds);
return std::chrono::duration_cast<std::chrono::seconds>(now.time_since_epoch());
}
case Settings::InitClock::FixedTime:
return std::chrono::seconds(Settings::values.init_time);
default:
UNREACHABLE_MSG("Invalid InitClock value ({})", Settings::values.init_clock);
}
}
Handler::Handler(Core::Timing& timing) : timing(timing) {
std::memset(&shared_page, 0, sizeof(shared_page));
shared_page.running_hw = 0x1; // product
// Some games wait until this value becomes 0x1, before asking running_hw
shared_page.unknown_value = 0x1;
// Set to a completely full battery
shared_page.battery_state.charge_level.Assign(
static_cast<u8>(Service::PTM::ChargeLevels::CompletelyFull));
shared_page.battery_state.is_adapter_connected.Assign(1);
shared_page.battery_state.is_charging.Assign(1);
init_time = GetInitTime();
using namespace std::placeholders;
update_time_event = timing.RegisterEvent("SharedPage::UpdateTimeCallback",
std::bind(&Handler::UpdateTimeCallback, this, _1, _2));
timing.ScheduleEvent(0, update_time_event, 0, 0);
float slidestate = Settings::values.factor_3d / 100.0f;
shared_page.sliderstate_3d = static_cast<float_le>(slidestate);
}
/// Gets system time in 3DS format. The epoch is Jan 1900, and the unit is millisecond.
u64 Handler::GetSystemTime() const {
std::chrono::milliseconds now =
init_time + std::chrono::duration_cast<std::chrono::milliseconds>(timing.GetGlobalTimeUs());
// 3DS system does't allow user to set a time before Jan 1 2000,
// so we use it as an auxiliary epoch to calculate the console time.
std::tm epoch_tm;
epoch_tm.tm_sec = 0;
epoch_tm.tm_min = 0;
epoch_tm.tm_hour = 0;
epoch_tm.tm_mday = 1;
epoch_tm.tm_mon = 0;
epoch_tm.tm_year = 100;
epoch_tm.tm_isdst = 0;
s64 epoch = std::mktime(&epoch_tm) * 1000;
// 3DS console time uses Jan 1 1900 as internal epoch,
// so we use the milliseconds between 1900 and 2000 as base console time
u64 console_time = 3155673600000ULL;
// Only when system time is after 2000, we set it as 3DS system time
if (now.count() > epoch) {
console_time += (now.count() - epoch);
}
return console_time;
}
void Handler::UpdateTimeCallback(std::uintptr_t user_data, int cycles_late) {
DateTime& date_time =
shared_page.date_time_counter % 2 ? shared_page.date_time_0 : shared_page.date_time_1;
date_time.date_time = GetSystemTime();
date_time.update_tick = timing.GetTicks();
date_time.tick_to_second_coefficient = BASE_CLOCK_RATE_ARM11;
date_time.tick_offset = 0;
++shared_page.date_time_counter;
// system time is updated hourly
timing.ScheduleEvent(msToCycles(60 * 60 * 1000) - cycles_late, update_time_event);
}
void Handler::SetMacAddress(const MacAddress& addr) {
std::memcpy(shared_page.wifi_macaddr, addr.data(), sizeof(MacAddress));
}
void Handler::SetWifiLinkLevel(WifiLinkLevel level) {
shared_page.wifi_link_level = static_cast<u8>(level);
}
void Handler::Set3DLed(u8 state) {
shared_page.ledstate_3d = state;
}
void Handler::Set3DSlider(float slidestate) {
shared_page.sliderstate_3d = static_cast<float_le>(slidestate);
}
SharedPageDef& Handler::GetSharedPage() {
return shared_page;
}
} // namespace SharedPage
| gpl-2.0 |
alyubomirov/Amlogic_s905-kernel | kernel/sched/core.c | 2 | 195804 | /*
* kernel/sched/core.c
*
* Kernel scheduler and related syscalls
*
* Copyright (C) 1991-2002 Linus Torvalds
*
* 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
* make semaphores SMP safe
* 1998-11-19 Implemented schedule_timeout() and related stuff
* by Andrea Arcangeli
* 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
* hybrid priority-list and round-robin design with
* an array-switch method of distributing timeslices
* and per-CPU runqueues. Cleanups and useful suggestions
* by Davide Libenzi, preemptible kernel bits by Robert Love.
* 2003-09-03 Interactivity tuning by Con Kolivas.
* 2004-04-02 Scheduler domains code by Nick Piggin
* 2007-04-15 Work begun on replacing all interactivity tuning with a
* fair scheduling design by Con Kolivas.
* 2007-05-05 Load balancing (smp-nice) and other improvements
* by Peter Williams
* 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
* 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
* 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
* Thomas Gleixner, Mike Kravetz
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <asm/mmu_context.h>
#include <linux/interrupt.h>
#include <linux/capability.h>
#include <linux/completion.h>
#include <linux/kernel_stat.h>
#include <linux/debug_locks.h>
#include <linux/perf_event.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/profile.h>
#include <linux/freezer.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/pid_namespace.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/timer.h>
#include <linux/rcupdate.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/percpu.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/sysctl.h>
#include <linux/syscalls.h>
#include <linux/times.h>
#include <linux/tsacct_kern.h>
#include <linux/kprobes.h>
#include <linux/delayacct.h>
#include <linux/unistd.h>
#include <linux/pagemap.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/debugfs.h>
#include <linux/ctype.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
#include <linux/init_task.h>
#include <linux/binfmts.h>
#include <linux/context_tracking.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
#include <asm/irq_regs.h>
#include <asm/mutex.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
#include "sched.h"
#include "../workqueue_internal.h"
#include "../smpboot.h"
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
{
unsigned long delta;
ktime_t soft, hard, now;
for (;;) {
if (hrtimer_active(period_timer))
break;
now = hrtimer_cb_get_time(period_timer);
hrtimer_forward(period_timer, now, period);
soft = hrtimer_get_softexpires(period_timer);
hard = hrtimer_get_expires(period_timer);
delta = ktime_to_ns(ktime_sub(hard, soft));
__hrtimer_start_range_ns(period_timer, soft, delta,
HRTIMER_MODE_ABS_PINNED, 0);
}
}
DEFINE_MUTEX(sched_domains_mutex);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
static void update_rq_clock_task(struct rq *rq, s64 delta);
void update_rq_clock(struct rq *rq)
{
s64 delta;
if (rq->skip_clock_update > 0)
return;
delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
rq->clock += delta;
update_rq_clock_task(rq, delta);
}
/*
* Debugging: various feature bits
*/
#define SCHED_FEAT(name, enabled) \
(1UL << __SCHED_FEAT_##name) * enabled |
const_debug unsigned int sysctl_sched_features =
#include "features.h"
0;
#undef SCHED_FEAT
#ifdef CONFIG_SCHED_DEBUG
#define SCHED_FEAT(name, enabled) \
#name ,
static const char * const sched_feat_names[] = {
#include "features.h"
};
#undef SCHED_FEAT
static int sched_feat_show(struct seq_file *m, void *v)
{
int i;
for (i = 0; i < __SCHED_FEAT_NR; i++) {
if (!(sysctl_sched_features & (1UL << i)))
seq_puts(m, "NO_");
seq_printf(m, "%s ", sched_feat_names[i]);
}
seq_puts(m, "\n");
return 0;
}
#ifdef HAVE_JUMP_LABEL
#define jump_label_key__true STATIC_KEY_INIT_TRUE
#define jump_label_key__false STATIC_KEY_INIT_FALSE
#define SCHED_FEAT(name, enabled) \
jump_label_key__##enabled ,
struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
#include "features.h"
};
#undef SCHED_FEAT
static void sched_feat_disable(int i)
{
if (static_key_enabled(&sched_feat_keys[i]))
static_key_slow_dec(&sched_feat_keys[i]);
}
static void sched_feat_enable(int i)
{
if (!static_key_enabled(&sched_feat_keys[i]))
static_key_slow_inc(&sched_feat_keys[i]);
}
#else
static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
#endif /* HAVE_JUMP_LABEL */
static int sched_feat_set(char *cmp)
{
int i;
int neg = 0;
if (strncmp(cmp, "NO_", 3) == 0) {
neg = 1;
cmp += 3;
}
for (i = 0; i < __SCHED_FEAT_NR; i++) {
if (strcmp(cmp, sched_feat_names[i]) == 0) {
if (neg) {
sysctl_sched_features &= ~(1UL << i);
sched_feat_disable(i);
} else {
sysctl_sched_features |= (1UL << i);
sched_feat_enable(i);
}
break;
}
}
return i;
}
static ssize_t
sched_feat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
char *cmp;
int i;
if (cnt > 63)
cnt = 63;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
cmp = strstrip(buf);
i = sched_feat_set(cmp);
if (i == __SCHED_FEAT_NR)
return -EINVAL;
*ppos += cnt;
return cnt;
}
static int sched_feat_open(struct inode *inode, struct file *filp)
{
return single_open(filp, sched_feat_show, NULL);
}
static const struct file_operations sched_feat_fops = {
.open = sched_feat_open,
.write = sched_feat_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static __init int sched_init_debug(void)
{
debugfs_create_file("sched_features", 0644, NULL, NULL,
&sched_feat_fops);
return 0;
}
late_initcall(sched_init_debug);
#endif /* CONFIG_SCHED_DEBUG */
/*
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
const_debug unsigned int sysctl_sched_nr_migrate = 32;
/*
* period over which we average the RT time consumption, measured
* in ms.
*
* default: 1s
*/
const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
/*
* period over which we measure -rt task cpu usage in us.
* default: 1s
*/
unsigned int sysctl_sched_rt_period = 1000000;
__read_mostly int scheduler_running;
/*
* part of the period that we allow rt tasks to run in us.
* default: 0.95s
*/
int sysctl_sched_rt_runtime = 950000;
/*
* __task_rq_lock - lock the rq @p resides on.
*/
static inline struct rq *__task_rq_lock(struct task_struct *p)
__acquires(rq->lock)
{
struct rq *rq;
lockdep_assert_held(&p->pi_lock);
for (;;) {
rq = task_rq(p);
raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
raw_spin_unlock(&rq->lock);
}
}
/*
* task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
*/
static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
__acquires(p->pi_lock)
__acquires(rq->lock)
{
struct rq *rq;
for (;;) {
raw_spin_lock_irqsave(&p->pi_lock, *flags);
rq = task_rq(p);
raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
raw_spin_unlock(&rq->lock);
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
}
}
static void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
raw_spin_unlock(&rq->lock);
}
static inline void
task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
__releases(rq->lock)
__releases(p->pi_lock)
{
raw_spin_unlock(&rq->lock);
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
}
/*
* this_rq_lock - lock this runqueue and disable interrupts.
*/
static struct rq *this_rq_lock(void)
__acquires(rq->lock)
{
struct rq *rq;
local_irq_disable();
rq = this_rq();
raw_spin_lock(&rq->lock);
return rq;
}
#ifdef CONFIG_SCHED_HRTICK
/*
* Use HR-timers to deliver accurate preemption points.
*/
static void hrtick_clear(struct rq *rq)
{
if (hrtimer_active(&rq->hrtick_timer))
hrtimer_cancel(&rq->hrtick_timer);
}
/*
* High-resolution timer tick.
* Runs from hardirq context with interrupts disabled.
*/
static enum hrtimer_restart hrtick(struct hrtimer *timer)
{
struct rq *rq = container_of(timer, struct rq, hrtick_timer);
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
raw_spin_lock(&rq->lock);
update_rq_clock(rq);
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
raw_spin_unlock(&rq->lock);
return HRTIMER_NORESTART;
}
#ifdef CONFIG_SMP
static int __hrtick_restart(struct rq *rq)
{
struct hrtimer *timer = &rq->hrtick_timer;
ktime_t time = hrtimer_get_softexpires(timer);
return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0);
}
/*
* called from hardirq (IPI) context
*/
static void __hrtick_start(void *arg)
{
struct rq *rq = arg;
raw_spin_lock(&rq->lock);
__hrtick_restart(rq);
rq->hrtick_csd_pending = 0;
raw_spin_unlock(&rq->lock);
}
/*
* Called to set the hrtick timer state.
*
* called with rq->lock held and irqs disabled
*/
void hrtick_start(struct rq *rq, u64 delay)
{
struct hrtimer *timer = &rq->hrtick_timer;
ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
hrtimer_set_expires(timer, time);
if (rq == this_rq()) {
__hrtick_restart(rq);
} else if (!rq->hrtick_csd_pending) {
__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
rq->hrtick_csd_pending = 1;
}
}
static int
hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int cpu = (int)(long)hcpu;
switch (action) {
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
hrtick_clear(cpu_rq(cpu));
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
static __init void init_hrtick(void)
{
hotcpu_notifier(hotplug_hrtick, 0);
}
#else
/*
* Called to set the hrtick timer state.
*
* called with rq->lock held and irqs disabled
*/
void hrtick_start(struct rq *rq, u64 delay)
{
__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
HRTIMER_MODE_REL_PINNED, 0);
}
static inline void init_hrtick(void)
{
}
#endif /* CONFIG_SMP */
static void init_rq_hrtick(struct rq *rq)
{
#ifdef CONFIG_SMP
rq->hrtick_csd_pending = 0;
rq->hrtick_csd.flags = 0;
rq->hrtick_csd.func = __hrtick_start;
rq->hrtick_csd.info = rq;
#endif
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
{
}
static inline void init_rq_hrtick(struct rq *rq)
{
}
static inline void init_hrtick(void)
{
}
#endif /* CONFIG_SCHED_HRTICK */
/*
* resched_task - mark a task 'to be rescheduled now'.
*
* On UP this means the setting of the need_resched flag, on SMP it
* might also involve a cross-CPU call to trigger the scheduler on
* the target CPU.
*/
void resched_task(struct task_struct *p)
{
int cpu;
lockdep_assert_held(&task_rq(p)->lock);
if (test_tsk_need_resched(p))
return;
set_tsk_need_resched(p);
cpu = task_cpu(p);
if (cpu == smp_processor_id()) {
set_preempt_need_resched();
return;
}
/* NEED_RESCHED must be visible before we test polling */
smp_mb();
if (!tsk_is_polling(p))
smp_send_reschedule(cpu);
}
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
if (!raw_spin_trylock_irqsave(&rq->lock, flags))
return;
resched_task(cpu_curr(cpu));
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
#ifdef CONFIG_SMP
#ifdef CONFIG_NO_HZ_COMMON
/*
* In the semi idle case, use the nearest busy cpu for migrating timers
* from an idle cpu. This is good for power-savings.
*
* We don't do similar optimization for completely idle system, as
* selecting an idle cpu will add more delays to the timers than intended
* (as that cpu's timer base may not be uptodate wrt jiffies etc).
*/
int get_nohz_timer_target(void)
{
int cpu = smp_processor_id();
int i;
struct sched_domain *sd;
rcu_read_lock();
for_each_domain(cpu, sd) {
for_each_cpu(i, sched_domain_span(sd)) {
if (!idle_cpu(i)) {
cpu = i;
goto unlock;
}
}
}
unlock:
rcu_read_unlock();
return cpu;
}
/*
* When add_timer_on() enqueues a timer into the timer wheel of an
* idle CPU then this timer might expire before the next timer event
* which is scheduled to wake up that CPU. In case of a completely
* idle system the next event might even be infinite time into the
* future. wake_up_idle_cpu() ensures that the CPU is woken up and
* leaves the inner idle loop so the newly added timer is taken into
* account when the CPU goes back to idle and evaluates the timer
* wheel for the next timer event.
*/
static void wake_up_idle_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (cpu == smp_processor_id())
return;
/*
* This is safe, as this function is called with the timer
* wheel base lock of (cpu) held. When the CPU is on the way
* to idle and has not yet set rq->curr to idle then it will
* be serialized on the timer wheel base lock and take the new
* timer into account automatically.
*/
if (rq->curr != rq->idle)
return;
/*
* We can set TIF_RESCHED on the idle task of the other CPU
* lockless. The worst case is that the other CPU runs the
* idle task through an additional NOOP schedule()
*/
set_tsk_need_resched(rq->idle);
/* NEED_RESCHED must be visible before we test polling */
smp_mb();
if (!tsk_is_polling(rq->idle))
smp_send_reschedule(cpu);
}
static bool wake_up_full_nohz_cpu(int cpu)
{
if (tick_nohz_full_cpu(cpu)) {
if (cpu != smp_processor_id() ||
tick_nohz_tick_stopped())
smp_send_reschedule(cpu);
return true;
}
return false;
}
void wake_up_nohz_cpu(int cpu)
{
if (!wake_up_full_nohz_cpu(cpu))
wake_up_idle_cpu(cpu);
}
static inline bool got_nohz_idle_kick(void)
{
int cpu = smp_processor_id();
if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
return false;
if (idle_cpu(cpu) && !need_resched())
return true;
/*
* We can't run Idle Load Balance on this CPU for this time so we
* cancel it and clear NOHZ_BALANCE_KICK
*/
clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
return false;
}
#else /* CONFIG_NO_HZ_COMMON */
static inline bool got_nohz_idle_kick(void)
{
return false;
}
#endif /* CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL
bool sched_can_stop_tick(void)
{
struct rq *rq;
rq = this_rq();
/* Make sure rq->nr_running update is visible after the IPI */
smp_rmb();
/* More than one running task need preemption */
if (rq->nr_running > 1)
return false;
return true;
}
#endif /* CONFIG_NO_HZ_FULL */
void sched_avg_update(struct rq *rq)
{
s64 period = sched_avg_period();
while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
/*
* Inline assembly required to prevent the compiler
* optimising this loop into a divmod call.
* See __iter_div_u64_rem() for another example of this.
*/
asm("" : "+rm" (rq->age_stamp));
rq->age_stamp += period;
rq->rt_avg /= 2;
}
}
#endif /* CONFIG_SMP */
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
/*
* Iterate task_group tree rooted at *from, calling @down when first entering a
* node and @up when leaving it for the final time.
*
* Caller must hold rcu_lock or sufficient equivalent.
*/
int walk_tg_tree_from(struct task_group *from,
tg_visitor down, tg_visitor up, void *data)
{
struct task_group *parent, *child;
int ret;
parent = from;
down:
ret = (*down)(parent, data);
if (ret)
goto out;
list_for_each_entry_rcu(child, &parent->children, siblings) {
parent = child;
goto down;
up:
continue;
}
ret = (*up)(parent, data);
if (ret || parent == from)
goto out;
child = parent;
parent = parent->parent;
if (parent)
goto up;
out:
return ret;
}
int tg_nop(struct task_group *tg, void *data)
{
return 0;
}
#endif
static void set_load_weight(struct task_struct *p)
{
int prio = p->static_prio - MAX_RT_PRIO;
struct load_weight *load = &p->se.load;
/*
* SCHED_IDLE tasks get minimal weight:
*/
if (p->policy == SCHED_IDLE) {
load->weight = scale_load(WEIGHT_IDLEPRIO);
load->inv_weight = WMULT_IDLEPRIO;
return;
}
load->weight = scale_load(prio_to_weight[prio]);
load->inv_weight = prio_to_wmult[prio];
}
static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
{
update_rq_clock(rq);
sched_info_queued(rq, p);
p->sched_class->enqueue_task(rq, p, flags);
}
static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
update_rq_clock(rq);
sched_info_dequeued(rq, p);
p->sched_class->dequeue_task(rq, p, flags);
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
enqueue_task(rq, p, flags);
}
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
rq->nr_uninterruptible++;
dequeue_task(rq, p, flags);
}
static void update_rq_clock_task(struct rq *rq, s64 delta)
{
/*
* In theory, the compile should just see 0 here, and optimize out the call
* to sched_rt_avg_update. But I don't trust it...
*/
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
s64 steal = 0, irq_delta = 0;
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
/*
* Since irq_time is only updated on {soft,}irq_exit, we might run into
* this case when a previous update_rq_clock() happened inside a
* {soft,}irq region.
*
* When this happens, we stop ->clock_task and only update the
* prev_irq_time stamp to account for the part that fit, so that a next
* update will consume the rest. This ensures ->clock_task is
* monotonic.
*
* It does however cause some slight miss-attribution of {soft,}irq
* time, a more accurate solution would be to update the irq_time using
* the current rq->clock timestamp, except that would require using
* atomic ops.
*/
if (irq_delta > delta)
irq_delta = delta;
rq->prev_irq_time += irq_delta;
delta -= irq_delta;
#endif
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
if (static_key_false((¶virt_steal_rq_enabled))) {
u64 st;
steal = paravirt_steal_clock(cpu_of(rq));
steal -= rq->prev_steal_time_rq;
if (unlikely(steal > delta))
steal = delta;
st = steal_ticks(steal);
steal = st * TICK_NSEC;
rq->prev_steal_time_rq += steal;
delta -= steal;
}
#endif
rq->clock_task += delta;
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
sched_rt_avg_update(rq, irq_delta + steal);
#endif
}
void sched_set_stop_task(int cpu, struct task_struct *stop)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
struct task_struct *old_stop = cpu_rq(cpu)->stop;
if (stop) {
/*
* Make it appear like a SCHED_FIFO task, its something
* userspace knows about and won't get confused about.
*
* Also, it will make PI more or less work without too
* much confusion -- but then, stop work should not
* rely on PI working anyway.
*/
sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
stop->sched_class = &stop_sched_class;
}
cpu_rq(cpu)->stop = stop;
if (old_stop) {
/*
* Reset it back to a normal scheduling class so that
* it can die in pieces.
*/
old_stop->sched_class = &rt_sched_class;
}
}
/*
* __normal_prio - return the priority that is based on the static prio
*/
static inline int __normal_prio(struct task_struct *p)
{
return p->static_prio;
}
/*
* Calculate the expected normal priority: i.e. priority
* without taking RT-inheritance into account. Might be
* boosted by interactivity modifiers. Changes upon fork,
* setprio syscalls, and whenever the interactivity
* estimator recalculates.
*/
static inline int normal_prio(struct task_struct *p)
{
int prio;
if (task_has_dl_policy(p))
prio = MAX_DL_PRIO-1;
else if (task_has_rt_policy(p))
prio = MAX_RT_PRIO-1 - p->rt_priority;
else
prio = __normal_prio(p);
return prio;
}
/*
* Calculate the current priority, i.e. the priority
* taken into account by the scheduler. This value might
* be boosted by RT tasks, or might be boosted by
* interactivity modifiers. Will be RT if the task got
* RT-boosted. If not then it returns p->normal_prio.
*/
static int effective_prio(struct task_struct *p)
{
p->normal_prio = normal_prio(p);
/*
* If we are RT tasks or we were boosted to RT priority,
* keep the priority unchanged. Otherwise, update priority
* to the normal priority:
*/
if (!rt_prio(p->prio))
return p->normal_prio;
return p->prio;
}
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
*
* Return: 1 if the task is currently executing. 0 otherwise.
*/
inline int task_curr(const struct task_struct *p)
{
return cpu_curr(task_cpu(p)) == p;
}
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
const struct sched_class *prev_class,
int oldprio)
{
if (prev_class != p->sched_class) {
if (prev_class->switched_from)
prev_class->switched_from(rq, p);
p->sched_class->switched_to(rq, p);
} else if (oldprio != p->prio || dl_task(p))
p->sched_class->prio_changed(rq, p, oldprio);
}
void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
const struct sched_class *class;
if (p->sched_class == rq->curr->sched_class) {
rq->curr->sched_class->check_preempt_curr(rq, p, flags);
} else {
for_each_class(class) {
if (class == rq->curr->sched_class)
break;
if (class == p->sched_class) {
resched_task(rq->curr);
break;
}
}
}
/*
* A queue event has occurred, and we're going to schedule. In
* this case, we can save a useless back to back clock update.
*/
if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
rq->skip_clock_update = 1;
}
#ifdef CONFIG_SMP
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
#ifdef CONFIG_SCHED_DEBUG
/*
* We should never call set_task_cpu() on a blocked task,
* ttwu() will sort out the placement.
*/
WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
!(task_preempt_count(p) & PREEMPT_ACTIVE));
#ifdef CONFIG_LOCKDEP
/*
* The caller should hold either p->pi_lock or rq->lock, when changing
* a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
*
* sched_move_task() holds both and thus holding either pins the cgroup,
* see task_group().
*
* Furthermore, all task_rq users should acquire both locks, see
* task_rq_lock().
*/
WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
lockdep_is_held(&task_rq(p)->lock)));
#endif
#endif
trace_sched_migrate_task(p, new_cpu);
if (task_cpu(p) != new_cpu) {
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
}
__set_task_cpu(p, new_cpu);
}
static void __migrate_swap_task(struct task_struct *p, int cpu)
{
if (p->on_rq) {
struct rq *src_rq, *dst_rq;
src_rq = task_rq(p);
dst_rq = cpu_rq(cpu);
deactivate_task(src_rq, p, 0);
set_task_cpu(p, cpu);
activate_task(dst_rq, p, 0);
check_preempt_curr(dst_rq, p, 0);
} else {
/*
* Task isn't running anymore; make it appear like we migrated
* it before it went to sleep. This means on wakeup we make the
* previous cpu our targer instead of where it really is.
*/
p->wake_cpu = cpu;
}
}
struct migration_swap_arg {
struct task_struct *src_task, *dst_task;
int src_cpu, dst_cpu;
};
static int migrate_swap_stop(void *data)
{
struct migration_swap_arg *arg = data;
struct rq *src_rq, *dst_rq;
int ret = -EAGAIN;
src_rq = cpu_rq(arg->src_cpu);
dst_rq = cpu_rq(arg->dst_cpu);
double_raw_lock(&arg->src_task->pi_lock,
&arg->dst_task->pi_lock);
double_rq_lock(src_rq, dst_rq);
if (task_cpu(arg->dst_task) != arg->dst_cpu)
goto unlock;
if (task_cpu(arg->src_task) != arg->src_cpu)
goto unlock;
if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
goto unlock;
if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
goto unlock;
__migrate_swap_task(arg->src_task, arg->dst_cpu);
__migrate_swap_task(arg->dst_task, arg->src_cpu);
ret = 0;
unlock:
double_rq_unlock(src_rq, dst_rq);
raw_spin_unlock(&arg->dst_task->pi_lock);
raw_spin_unlock(&arg->src_task->pi_lock);
return ret;
}
/*
* Cross migrate two tasks
*/
int migrate_swap(struct task_struct *cur, struct task_struct *p)
{
struct migration_swap_arg arg;
int ret = -EINVAL;
arg = (struct migration_swap_arg){
.src_task = cur,
.src_cpu = task_cpu(cur),
.dst_task = p,
.dst_cpu = task_cpu(p),
};
if (arg.src_cpu == arg.dst_cpu)
goto out;
/*
* These three tests are all lockless; this is OK since all of them
* will be re-checked with proper locks held further down the line.
*/
if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
goto out;
if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
goto out;
if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
goto out;
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
out:
return ret;
}
struct migration_arg {
struct task_struct *task;
int dest_cpu;
};
static int migration_cpu_stop(void *data);
/*
* wait_task_inactive - wait for a thread to unschedule.
*
* If @match_state is nonzero, it's the @p->state value just checked and
* not expected to change. If it changes, i.e. @p might have woken up,
* then return zero. When we succeed in waiting for @p to be off its CPU,
* we return a positive number (its total switch count). If a second call
* a short while later returns the same number, the caller can be sure that
* @p has remained unscheduled the whole time.
*
* The caller must ensure that the task *will* unschedule sometime soon,
* else this function might spin for a *long* time. This function can't
* be called with interrupts off, or it may introduce deadlock with
* smp_call_function() if an IPI is sent by the same process we are
* waiting to become inactive.
*/
unsigned long wait_task_inactive(struct task_struct *p, long match_state)
{
unsigned long flags;
int running, on_rq;
unsigned long ncsw;
struct rq *rq;
for (;;) {
/*
* We do the initial early heuristics without holding
* any task-queue locks at all. We'll only try to get
* the runqueue lock when things look like they will
* work out!
*/
rq = task_rq(p);
/*
* If the task is actively running on another CPU
* still, just relax and busy-wait without holding
* any locks.
*
* NOTE! Since we don't hold any locks, it's not
* even sure that "rq" stays as the right runqueue!
* But we don't care, since "task_running()" will
* return false if the runqueue has changed and p
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
if (match_state && unlikely(p->state != match_state))
return 0;
cpu_relax();
}
/*
* Ok, time to look more closely! We need the rq
* lock now, to be *sure*. If we're wrong, we'll
* just go back and repeat.
*/
rq = task_rq_lock(p, &flags);
trace_sched_wait_task(p);
running = task_running(rq, p);
on_rq = p->on_rq;
ncsw = 0;
if (!match_state || p->state == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &flags);
/*
* If it changed from the expected state, bail out now.
*/
if (unlikely(!ncsw))
break;
/*
* Was it really running after all now that we
* checked with the proper locks actually held?
*
* Oops. Go back and try again..
*/
if (unlikely(running)) {
cpu_relax();
continue;
}
/*
* It's not enough that it's not actively running,
* it must be off the runqueue _entirely_, and not
* preempted!
*
* So if it was still runnable (but just not actively
* running right now), it's preempted, and we should
* yield - it could be a while.
*/
if (unlikely(on_rq)) {
ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_hrtimeout(&to, HRTIMER_MODE_REL);
continue;
}
/*
* Ahh, all good. It wasn't running, and it wasn't
* runnable, which means that it will never become
* running in the future either. We're all done!
*/
break;
}
return ncsw;
}
/***
* kick_process - kick a running thread to enter/exit the kernel
* @p: the to-be-kicked thread
*
* Cause a process which is running on another CPU to enter
* kernel-mode, without any delay. (to get signals handled.)
*
* NOTE: this function doesn't have to take the runqueue lock,
* because all it wants to ensure is that the remote task enters
* the kernel. If the IPI races and the task has been migrated
* to another CPU then no harm is done and the purpose has been
* achieved as well.
*/
void kick_process(struct task_struct *p)
{
int cpu;
preempt_disable();
cpu = task_cpu(p);
if ((cpu != smp_processor_id()) && task_curr(p))
smp_send_reschedule(cpu);
preempt_enable();
}
EXPORT_SYMBOL_GPL(kick_process);
#endif /* CONFIG_SMP */
#ifdef CONFIG_SMP
/*
* ->cpus_allowed is protected by both rq->lock and p->pi_lock
*/
static int select_fallback_rq(int cpu, struct task_struct *p)
{
int nid = cpu_to_node(cpu);
const struct cpumask *nodemask = NULL;
enum { cpuset, possible, fail } state = cpuset;
int dest_cpu;
/*
* If the node that the cpu is on has been offlined, cpu_to_node()
* will return -1. There is no cpu on the node, and we should
* select the cpu on the other node.
*/
if (nid != -1) {
nodemask = cpumask_of_node(nid);
/* Look for allowed, online CPU in same node. */
for_each_cpu(dest_cpu, nodemask) {
if (!cpu_online(dest_cpu))
continue;
if (!cpu_active(dest_cpu))
continue;
if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
return dest_cpu;
}
}
for (;;) {
/* Any allowed, online CPU? */
for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
if (!cpu_online(dest_cpu))
continue;
if (!cpu_active(dest_cpu))
continue;
goto out;
}
switch (state) {
case cpuset:
/* No more Mr. Nice Guy. */
cpuset_cpus_allowed_fallback(p);
state = possible;
break;
case possible:
do_set_cpus_allowed(p, cpu_possible_mask);
state = fail;
break;
case fail:
BUG();
break;
}
}
out:
if (state != cpuset) {
/*
* Don't tell them about moving exiting tasks or
* kernel threads (both mm NULL), since they never
* leave kernel.
*/
if (p->mm && printk_ratelimit()) {
printk_deferred("process %d (%s) no longer affine to cpu%d\n",
task_pid_nr(p), p->comm, cpu);
}
}
return dest_cpu;
}
/*
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
*/
static inline
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
{
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
/*
* In order not to call set_task_cpu() on a blocking task we need
* to rely on ttwu() to place the task on a valid ->cpus_allowed
* cpu.
*
* Since this is common to all placement strategies, this lives here.
*
* [ this allows ->select_task() to simply return task_cpu(p) and
* not worry about this generic constraint ]
*/
if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
!cpu_online(cpu)))
cpu = select_fallback_rq(task_cpu(p), p);
return cpu;
}
static void update_avg(u64 *avg, u64 sample)
{
s64 diff = sample - *avg;
*avg += diff >> 3;
}
int select_cpu_for_hotplug(struct task_struct *p, int cpu,
int sd_flags, int wake_flags)
{
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
/*
* In order not to call set_task_cpu() on a blocking task we need
* to rely on ttwu() to place the task on a valid ->cpus_allowed
* cpu.
*
* Since this is common to all placement strategies, this lives here.
*
* [ this allows ->select_task() to simply return task_cpu(p) and
* not worry about this generic constraint ]
*/
if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
!cpu_online(cpu)))
cpu = select_fallback_rq(task_cpu(p), p);
return cpu;
}
#else
int select_cpu_for_hotplug(struct task_struct *p,
int cpu, int sd_flags, int wake_flags)
{
return 0;
}
#endif
EXPORT_SYMBOL(select_cpu_for_hotplug);
static void
ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
{
#ifdef CONFIG_SCHEDSTATS
struct rq *rq = this_rq();
#ifdef CONFIG_SMP
int this_cpu = smp_processor_id();
if (cpu == this_cpu) {
schedstat_inc(rq, ttwu_local);
schedstat_inc(p, se.statistics.nr_wakeups_local);
} else {
struct sched_domain *sd;
schedstat_inc(p, se.statistics.nr_wakeups_remote);
rcu_read_lock();
for_each_domain(this_cpu, sd) {
if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
schedstat_inc(sd, ttwu_wake_remote);
break;
}
}
rcu_read_unlock();
}
if (wake_flags & WF_MIGRATED)
schedstat_inc(p, se.statistics.nr_wakeups_migrate);
#endif /* CONFIG_SMP */
schedstat_inc(rq, ttwu_count);
schedstat_inc(p, se.statistics.nr_wakeups);
if (wake_flags & WF_SYNC)
schedstat_inc(p, se.statistics.nr_wakeups_sync);
#endif /* CONFIG_SCHEDSTATS */
}
static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
{
activate_task(rq, p, en_flags);
p->on_rq = 1;
/* if a worker is waking up, notify workqueue */
if (p->flags & PF_WQ_WORKER)
wq_worker_waking_up(p, cpu_of(rq));
}
/*
* Mark the task runnable and perform wakeup-preemption.
*/
static void
ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
{
check_preempt_curr(rq, p, wake_flags);
trace_sched_wakeup(p, true);
p->state = TASK_RUNNING;
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
if (rq->idle_stamp) {
u64 delta = rq_clock(rq) - rq->idle_stamp;
u64 max = 2*rq->max_idle_balance_cost;
update_avg(&rq->avg_idle, delta);
if (rq->avg_idle > max)
rq->avg_idle = max;
rq->idle_stamp = 0;
}
#endif
}
static void
ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
{
#ifdef CONFIG_SMP
if (p->sched_contributes_to_load)
rq->nr_uninterruptible--;
#endif
ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
ttwu_do_wakeup(rq, p, wake_flags);
}
/*
* Called in case the task @p isn't fully descheduled from its runqueue,
* in this case we must do a remote wakeup. Its a 'light' wakeup though,
* since all we need to do is flip p->state to TASK_RUNNING, since
* the task is still ->on_rq.
*/
static int ttwu_remote(struct task_struct *p, int wake_flags)
{
struct rq *rq;
int ret = 0;
rq = __task_rq_lock(p);
if (p->on_rq) {
/* check_preempt_curr() may use rq clock */
update_rq_clock(rq);
ttwu_do_wakeup(rq, p, wake_flags);
ret = 1;
}
__task_rq_unlock(rq);
return ret;
}
#ifdef CONFIG_SMP
static void sched_ttwu_pending(void)
{
struct rq *rq = this_rq();
struct llist_node *llist = llist_del_all(&rq->wake_list);
struct task_struct *p;
raw_spin_lock(&rq->lock);
while (llist) {
p = llist_entry(llist, struct task_struct, wake_entry);
llist = llist_next(llist);
ttwu_do_activate(rq, p, 0);
}
raw_spin_unlock(&rq->lock);
}
void scheduler_ipi(void)
{
/*
* Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
* TIF_NEED_RESCHED remotely (for the first time) will also send
* this IPI.
*/
preempt_fold_need_resched();
if (llist_empty(&this_rq()->wake_list)
&& !tick_nohz_full_cpu(smp_processor_id())
&& !got_nohz_idle_kick()
#ifdef CONFIG_SCHED_HMP
&& !this_rq()->wake_for_idle_pull
#endif
)
return;
/*
* Not all reschedule IPI handlers call irq_enter/irq_exit, since
* traditionally all their work was done from the interrupt return
* path. Now that we actually do some work, we need to make sure
* we do call them.
*
* Some archs already do call them, luckily irq_enter/exit nest
* properly.
*
* Arguably we should visit all archs and update all handlers,
* however a fair share of IPIs are still resched only so this would
* somewhat pessimize the simple resched case.
*/
irq_enter();
tick_nohz_full_check();
sched_ttwu_pending();
/*
* Check if someone kicked us for doing the nohz idle load balance.
*/
if (unlikely(got_nohz_idle_kick())) {
this_rq()->idle_balance = 1;
raise_softirq_irqoff(SCHED_SOFTIRQ);
}
#ifdef CONFIG_SCHED_HMP
else if (unlikely(this_rq()->wake_for_idle_pull))
raise_softirq_irqoff(SCHED_SOFTIRQ);
#endif
irq_exit();
}
static void ttwu_queue_remote(struct task_struct *p, int cpu)
{
if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
smp_send_reschedule(cpu);
}
bool cpus_share_cache(int this_cpu, int that_cpu)
{
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
}
#endif /* CONFIG_SMP */
static void ttwu_queue(struct task_struct *p, int cpu)
{
struct rq *rq = cpu_rq(cpu);
#if defined(CONFIG_SMP)
if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
sched_clock_cpu(cpu); /* sync clocks x-cpu */
ttwu_queue_remote(p, cpu);
return;
}
#endif
raw_spin_lock(&rq->lock);
ttwu_do_activate(rq, p, 0);
raw_spin_unlock(&rq->lock);
}
/**
* try_to_wake_up - wake up a thread
* @p: the thread to be awakened
* @state: the mask of task states that can be woken
* @wake_flags: wake modifier flags (WF_*)
*
* Put it on the run-queue if it's not already there. The "current"
* thread is always on the run-queue (except when the actual
* re-schedule is in progress), and as such you're allowed to do
* the simpler "current->state = TASK_RUNNING" to mark yourself
* runnable without the overhead of this.
*
* Return: %true if @p was woken up, %false if it was already running.
* or @state didn't match @p's state.
*/
static int
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
{
unsigned long flags;
int cpu, success = 0;
/*
* If we are going to wake up a thread waiting for CONDITION we
* need to ensure that CONDITION=1 done by the caller can not be
* reordered with p->state check below. This pairs with mb() in
* set_current_state() the waiting thread does.
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
if (!(p->state & state))
goto out;
success = 1; /* we're going to change ->state */
cpu = task_cpu(p);
if (p->on_rq && ttwu_remote(p, wake_flags))
goto stat;
#ifdef CONFIG_SMP
/*
* If the owning (remote) cpu is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
*/
while (p->on_cpu)
cpu_relax();
/*
* Pairs with the smp_wmb() in finish_lock_switch().
*/
smp_rmb();
p->sched_contributes_to_load = !!task_contributes_to_load(p);
p->state = TASK_WAKING;
if (p->sched_class->task_waking)
p->sched_class->task_waking(p);
cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
if (task_cpu(p) != cpu) {
wake_flags |= WF_MIGRATED;
set_task_cpu(p, cpu);
}
#endif /* CONFIG_SMP */
ttwu_queue(p, cpu);
stat:
ttwu_stat(p, cpu, wake_flags);
out:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
return success;
}
/**
* try_to_wake_up_local - try to wake up a local task with rq lock held
* @p: the thread to be awakened
*
* Put @p on the run-queue if it's not already there. The caller must
* ensure that this_rq() is locked, @p is bound to this_rq() and not
* the current task.
*/
static void try_to_wake_up_local(struct task_struct *p)
{
struct rq *rq = task_rq(p);
if (WARN_ON_ONCE(rq != this_rq()) ||
WARN_ON_ONCE(p == current))
return;
lockdep_assert_held(&rq->lock);
if (!raw_spin_trylock(&p->pi_lock)) {
raw_spin_unlock(&rq->lock);
raw_spin_lock(&p->pi_lock);
raw_spin_lock(&rq->lock);
}
if (!(p->state & TASK_NORMAL))
goto out;
if (!p->on_rq)
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
ttwu_do_wakeup(rq, p, 0);
ttwu_stat(p, smp_processor_id(), 0);
out:
raw_spin_unlock(&p->pi_lock);
}
/**
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
* Attempt to wake up the nominated process and move it to the set of runnable
* processes.
*
* Return: 1 if the process was woken up, 0 if it was already running.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
int wake_up_process(struct task_struct *p)
{
WARN_ON(task_is_stopped_or_traced(p));
return try_to_wake_up(p, TASK_NORMAL, 0);
}
EXPORT_SYMBOL(wake_up_process);
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
}
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
*
* __sched_fork() is basic setup used by init_idle() too:
*/
static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
{
p->on_rq = 0;
p->se.on_rq = 0;
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
INIT_LIST_HEAD(&p->se.group_node);
/*
* Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
* removed when useful for applications beyond shares distribution (e.g.
* load-balance).
*/
#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
p->se.avg.runnable_avg_period = 0;
p->se.avg.runnable_avg_sum = 0;
#ifdef CONFIG_SCHED_HMP
/* keep LOAD_AVG_MAX in sync with fair.c if load avg series is change */
#define LOAD_AVG_MAX 47742
p->se.avg.hmp_last_up_migration = 0;
p->se.avg.hmp_last_down_migration = 0;
if (hmp_task_should_forkboost(p)) {
p->se.avg.load_avg_ratio = 1023;
p->se.avg.load_avg_contrib =
(1023 * scale_load_down(p->se.load.weight));
p->se.avg.runnable_avg_period = LOAD_AVG_MAX;
p->se.avg.runnable_avg_sum = LOAD_AVG_MAX;
p->se.avg.usage_avg_sum = LOAD_AVG_MAX;
}
#endif
#endif
#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
#endif
RB_CLEAR_NODE(&p->dl.rb_node);
hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
p->dl.dl_runtime = p->dl.runtime = 0;
p->dl.dl_deadline = p->dl.deadline = 0;
p->dl.dl_period = 0;
p->dl.flags = 0;
INIT_LIST_HEAD(&p->rt.run_list);
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&p->preempt_notifiers);
#endif
#ifdef CONFIG_NUMA_BALANCING
if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
p->mm->numa_scan_seq = 0;
}
if (clone_flags & CLONE_VM)
p->numa_preferred_nid = current->numa_preferred_nid;
else
p->numa_preferred_nid = -1;
p->node_stamp = 0ULL;
p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
p->numa_scan_period = sysctl_numa_balancing_scan_delay;
p->numa_work.next = &p->numa_work;
p->numa_faults = NULL;
p->numa_faults_buffer = NULL;
INIT_LIST_HEAD(&p->numa_entry);
p->numa_group = NULL;
#endif /* CONFIG_NUMA_BALANCING */
}
#ifdef CONFIG_NUMA_BALANCING
#ifdef CONFIG_SCHED_DEBUG
void set_numabalancing_state(bool enabled)
{
if (enabled)
sched_feat_set("NUMA");
else
sched_feat_set("NO_NUMA");
}
#else
__read_mostly bool numabalancing_enabled;
void set_numabalancing_state(bool enabled)
{
numabalancing_enabled = enabled;
}
#endif /* CONFIG_SCHED_DEBUG */
#ifdef CONFIG_PROC_SYSCTL
int sysctl_numa_balancing(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
int err;
int state = numabalancing_enabled;
if (write && !capable(CAP_SYS_ADMIN))
return -EPERM;
t = *table;
t.data = &state;
err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
if (err < 0)
return err;
if (write)
set_numabalancing_state(state);
return err;
}
#endif
#endif
/*
* fork()/clone()-time setup:
*/
int sched_fork(unsigned long clone_flags, struct task_struct *p)
{
unsigned long flags;
int cpu = get_cpu();
__sched_fork(clone_flags, p);
/*
* We mark the process as running here. This guarantees that
* nobody will actually run it, and a signal or other external
* event cannot wake it up and insert it on the runqueue either.
*/
p->state = TASK_RUNNING;
/*
* Make sure we do not leak PI boosting priority to the child.
*/
p->prio = current->normal_prio;
/*
* Revert to default priority/policy on fork if requested.
*/
if (unlikely(p->sched_reset_on_fork)) {
if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
p->policy = SCHED_NORMAL;
p->static_prio = NICE_TO_PRIO(0);
p->rt_priority = 0;
} else if (PRIO_TO_NICE(p->static_prio) < 0)
p->static_prio = NICE_TO_PRIO(0);
p->prio = p->normal_prio = __normal_prio(p);
set_load_weight(p);
/*
* We don't need the reset flag anymore after the fork. It has
* fulfilled its duty:
*/
p->sched_reset_on_fork = 0;
}
if (dl_prio(p->prio)) {
put_cpu();
return -EAGAIN;
} else if (rt_prio(p->prio)) {
p->sched_class = &rt_sched_class;
} else {
p->sched_class = &fair_sched_class;
}
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
/*
* The child is not yet in the pid-hash so no cgroup attach races,
* and the cgroup is pinned to this child due to cgroup_fork()
* is ran before sched_fork().
*
* Silence PROVE_RCU.
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
set_task_cpu(p, cpu);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
#if defined(CONFIG_SMP)
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
#endif
put_cpu();
return 0;
}
unsigned long to_ratio(u64 period, u64 runtime)
{
if (runtime == RUNTIME_INF)
return 1ULL << 20;
/*
* Doing this here saves a lot of checks in all
* the calling paths, and returning zero seems
* safe for them anyway.
*/
if (period == 0)
return 0;
return div64_u64(runtime << 20, period);
}
#ifdef CONFIG_SMP
inline struct dl_bw *dl_bw_of(int i)
{
rcu_lockdep_assert(rcu_read_lock_sched_held(),
"sched RCU must be held");
return &cpu_rq(i)->rd->dl_bw;
}
static inline int dl_bw_cpus(int i)
{
struct root_domain *rd = cpu_rq(i)->rd;
int cpus = 0;
rcu_lockdep_assert(rcu_read_lock_sched_held(),
"sched RCU must be held");
for_each_cpu_and(i, rd->span, cpu_active_mask)
cpus++;
return cpus;
}
#else
inline struct dl_bw *dl_bw_of(int i)
{
return &cpu_rq(i)->dl.dl_bw;
}
static inline int dl_bw_cpus(int i)
{
return 1;
}
#endif
static inline
void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
{
dl_b->total_bw -= tsk_bw;
}
static inline
void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
{
dl_b->total_bw += tsk_bw;
}
static inline
bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
{
return dl_b->bw != -1 &&
dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
}
/*
* We must be sure that accepting a new task (or allowing changing the
* parameters of an existing one) is consistent with the bandwidth
* constraints. If yes, this function also accordingly updates the currently
* allocated bandwidth to reflect the new situation.
*
* This function is called while holding p's rq->lock.
*/
static int dl_overflow(struct task_struct *p, int policy,
const struct sched_attr *attr)
{
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
u64 period = attr->sched_period ?: attr->sched_deadline;
u64 runtime = attr->sched_runtime;
u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
int cpus, err = -1;
if (new_bw == p->dl.dl_bw)
return 0;
/*
* Either if a task, enters, leave, or stays -deadline but changes
* its parameters, we may need to update accordingly the total
* allocated bandwidth of the container.
*/
raw_spin_lock(&dl_b->lock);
cpus = dl_bw_cpus(task_cpu(p));
if (dl_policy(policy) && !task_has_dl_policy(p) &&
!__dl_overflow(dl_b, cpus, 0, new_bw)) {
__dl_add(dl_b, new_bw);
err = 0;
} else if (dl_policy(policy) && task_has_dl_policy(p) &&
!__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
__dl_clear(dl_b, p->dl.dl_bw);
__dl_add(dl_b, new_bw);
err = 0;
} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
__dl_clear(dl_b, p->dl.dl_bw);
err = 0;
}
raw_spin_unlock(&dl_b->lock);
return err;
}
extern void init_dl_bw(struct dl_bw *dl_b);
/*
* wake_up_new_task - wake up a newly created task for the first time.
*
* This function will do some initial scheduler statistics housekeeping
* that must be done for every newly created context, then puts the task
* on the runqueue and wakes it.
*/
void wake_up_new_task(struct task_struct *p)
{
unsigned long flags;
struct rq *rq;
raw_spin_lock_irqsave(&p->pi_lock, flags);
#ifdef CONFIG_SMP
/*
* Fork balancing, do it here and not earlier because:
* - cpus_allowed can change in the fork path
* - any previously selected cpu might disappear through hotplug
*/
set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
#endif
/* Initialize new task's runnable average */
init_task_runnable_average(p);
rq = __task_rq_lock(p);
activate_task(rq, p, 0);
p->on_rq = 1;
trace_sched_wakeup_new(p, true);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
#endif
task_rq_unlock(rq, p, &flags);
}
#ifdef CONFIG_PREEMPT_NOTIFIERS
/**
* preempt_notifier_register - tell me when current is being preempted & rescheduled
* @notifier: notifier struct to register
*/
void preempt_notifier_register(struct preempt_notifier *notifier)
{
hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
}
EXPORT_SYMBOL_GPL(preempt_notifier_register);
/**
* preempt_notifier_unregister - no longer interested in preemption notifications
* @notifier: notifier struct to unregister
*
* This is safe to call from within a preemption notifier.
*/
void preempt_notifier_unregister(struct preempt_notifier *notifier)
{
hlist_del(¬ifier->link);
}
EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
struct preempt_notifier *notifier;
hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
notifier->ops->sched_in(notifier, raw_smp_processor_id());
}
static void
fire_sched_out_preempt_notifiers(struct task_struct *curr,
struct task_struct *next)
{
struct preempt_notifier *notifier;
hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
notifier->ops->sched_out(notifier, next);
}
#else /* !CONFIG_PREEMPT_NOTIFIERS */
static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
}
static void
fire_sched_out_preempt_notifiers(struct task_struct *curr,
struct task_struct *next)
{
}
#endif /* CONFIG_PREEMPT_NOTIFIERS */
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
* @prev: the current task that is being switched out
* @next: the task we are going to switch to.
*
* This is called with the rq lock held and interrupts off. It must
* be paired with a subsequent finish_task_switch after the context
* switch.
*
* prepare_task_switch sets up locking and calls architecture specific
* hooks.
*/
static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
trace_sched_switch(prev, next);
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
prepare_lock_switch(rq, next);
prepare_arch_switch(next);
}
/**
* finish_task_switch - clean up after a task-switch
* @rq: runqueue associated with task-switch
* @prev: the thread we just switched away from.
*
* finish_task_switch must be called after the context switch, paired
* with a prepare_task_switch call before the context switch.
* finish_task_switch will reconcile locking set up by prepare_task_switch,
* and do any other architecture-specific cleanup actions.
*
* Note that we may have delayed dropping an mm in context_switch(). If
* so, we finish that here outside of the runqueue lock. (Doing it
* with the lock held can cause deadlocks; see schedule() for
* details.)
*/
static void finish_task_switch(struct rq *rq, struct task_struct *prev)
__releases(rq->lock)
{
struct mm_struct *mm = rq->prev_mm;
long prev_state;
rq->prev_mm = NULL;
/*
* A task struct has one reference for the use as "current".
* If a task dies, then it sets TASK_DEAD in tsk->state and calls
* schedule one last time. The schedule call will never return, and
* the scheduled task must drop that reference.
* The test for TASK_DEAD must occur while the runqueue locks are
* still held, otherwise prev could be scheduled on another cpu, die
* there before we look at prev->state, and then the reference would
* be dropped twice.
* Manfred Spraul <manfred@colorfullife.com>
*/
prev_state = prev->state;
vtime_task_switch(prev);
finish_arch_switch(prev);
perf_event_task_sched_in(prev, current);
finish_lock_switch(rq, prev);
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
if (mm)
mmdrop(mm);
if (unlikely(prev_state == TASK_DEAD)) {
task_numa_free(prev);
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
/*
* Remove function-return probe instances associated with this
* task and put them back on the free list.
*/
kprobe_flush_task(prev);
put_task_struct(prev);
}
tick_nohz_task_switch(current);
}
#ifdef CONFIG_SMP
/* assumes rq->lock is held */
static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
{
if (prev->sched_class->pre_schedule)
prev->sched_class->pre_schedule(rq, prev);
}
/* rq->lock is NOT held, but preemption is disabled */
static inline void post_schedule(struct rq *rq)
{
if (rq->post_schedule) {
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->curr->sched_class->post_schedule)
rq->curr->sched_class->post_schedule(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
rq->post_schedule = 0;
}
}
#else
static inline void pre_schedule(struct rq *rq, struct task_struct *p)
{
}
static inline void post_schedule(struct rq *rq)
{
}
#endif
/**
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
*/
asmlinkage void schedule_tail(struct task_struct *prev)
__releases(rq->lock)
{
struct rq *rq = this_rq();
finish_task_switch(rq, prev);
/*
* FIXME: do we need to worry about rq being invalidated by the
* task_switch?
*/
post_schedule(rq);
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
/* In this case, finish_task_switch does not reenable preemption */
preempt_enable();
#endif
if (current->set_child_tid)
put_user(task_pid_vnr(current), current->set_child_tid);
}
/*
* context_switch - switch to the new MM and the new
* thread's register state.
*/
static inline void
context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
struct mm_struct *mm, *oldmm;
prepare_task_switch(rq, prev, next);
mm = next->mm;
oldmm = prev->active_mm;
/*
* For paravirt, this is coupled with an exit in switch_to to
* combine the page table reload and the switch backend into
* one hypercall.
*/
arch_start_context_switch(prev);
if (!mm) {
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next);
} else
switch_mm(oldmm, mm, next);
if (!prev->mm) {
prev->active_mm = NULL;
rq->prev_mm = oldmm;
}
/*
* Since the runqueue lock will be released by the next
* task (which is an invalid locking op but in the case
* of the scheduler it's an obvious special-case), so we
* do an early lockdep release here:
*/
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
#endif
context_tracking_task_switch(prev, next);
/* Here we just switch the register state and the stack. */
switch_to(prev, next, prev);
barrier();
/*
* this_rq must be evaluated again because prev may have moved
* CPUs since it called schedule(), thus the 'rq' on its stack
* frame will be invalid.
*/
finish_task_switch(this_rq(), prev);
}
/*
* nr_running and nr_context_switches:
*
* externally visible scheduler statistics: current number of runnable
* threads, total number of context switches performed since bootup.
*/
unsigned long nr_running(void)
{
unsigned long i, sum = 0;
for_each_online_cpu(i)
sum += cpu_rq(i)->nr_running;
return sum;
}
unsigned long long nr_context_switches(void)
{
int i;
unsigned long long sum = 0;
for_each_possible_cpu(i)
sum += cpu_rq(i)->nr_switches;
return sum;
}
unsigned long nr_iowait(void)
{
unsigned long i, sum = 0;
for_each_possible_cpu(i)
sum += atomic_read(&cpu_rq(i)->nr_iowait);
return sum;
}
unsigned long nr_iowait_cpu(int cpu)
{
struct rq *this = cpu_rq(cpu);
return atomic_read(&this->nr_iowait);
}
#ifdef CONFIG_SMP
/*
* sched_exec - execve() is a valuable balancing opportunity, because at
* this point the task has the smallest effective memory and cache footprint.
*/
void sched_exec(void)
{
struct task_struct *p = current;
unsigned long flags;
int dest_cpu;
raw_spin_lock_irqsave(&p->pi_lock, flags);
dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
if (dest_cpu == smp_processor_id())
goto unlock;
if (likely(cpu_active(dest_cpu))) {
struct migration_arg arg = { p, dest_cpu };
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
return;
}
unlock:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}
#endif
DEFINE_PER_CPU(struct kernel_stat, kstat);
DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
EXPORT_PER_CPU_SYMBOL(kstat);
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
/*
* Return any ns on the sched_clock that have not yet been accounted in
* @p in case that task is currently running.
*
* Called with task_rq_lock() held on @rq.
*/
static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
{
u64 ns = 0;
if (task_current(rq, p)) {
update_rq_clock(rq);
ns = rq_clock_task(rq) - p->se.exec_start;
if ((s64)ns < 0)
ns = 0;
}
return ns;
}
unsigned long long task_delta_exec(struct task_struct *p)
{
unsigned long flags;
struct rq *rq;
u64 ns = 0;
rq = task_rq_lock(p, &flags);
ns = do_task_delta_exec(p, rq);
task_rq_unlock(rq, p, &flags);
return ns;
}
/*
* Return accounted runtime for the task.
* In case the task is currently running, return the runtime plus current's
* pending runtime that have not been accounted yet.
*/
unsigned long long task_sched_runtime(struct task_struct *p)
{
unsigned long flags;
struct rq *rq;
u64 ns = 0;
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
/*
* 64-bit doesn't need locks to atomically read a 64bit value.
* So we have a optimization chance when the task's delta_exec is 0.
* Reading ->on_cpu is racy, but this is ok.
*
* If we race with it leaving cpu, we'll take a lock. So we're correct.
* If we race with it entering cpu, unaccounted time is 0. This is
* indistinguishable from the read occurring a few cycles earlier.
*/
if (!p->on_cpu)
return p->se.sum_exec_runtime;
#endif
rq = task_rq_lock(p, &flags);
ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
task_rq_unlock(rq, p, &flags);
return ns;
}
/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
*/
void scheduler_tick(void)
{
int cpu = smp_processor_id();
struct rq *rq = cpu_rq(cpu);
struct task_struct *curr = rq->curr;
sched_clock_tick();
raw_spin_lock(&rq->lock);
update_rq_clock(rq);
curr->sched_class->task_tick(rq, curr, 0);
update_cpu_load_active(rq);
raw_spin_unlock(&rq->lock);
perf_event_task_tick();
#ifdef CONFIG_SMP
rq->idle_balance = idle_cpu(cpu);
trigger_load_balance(rq, cpu);
#endif
rq_last_tick_reset(rq);
}
#ifdef CONFIG_NO_HZ_FULL
/**
* scheduler_tick_max_deferment
*
* Keep at least one tick per second when a single
* active task is running because the scheduler doesn't
* yet completely support full dynticks environment.
*
* This makes sure that uptime, CFS vruntime, load
* balancing, etc... continue to move forward, even
* with a very low granularity.
*
* Return: Maximum deferment in nanoseconds.
*/
u64 scheduler_tick_max_deferment(void)
{
struct rq *rq = this_rq();
unsigned long next, now = ACCESS_ONCE(jiffies);
next = rq->last_sched_tick + HZ;
if (time_before_eq(next, now))
return 0;
return jiffies_to_nsecs(next - now);
}
#endif
notrace unsigned long get_parent_ip(unsigned long addr)
{
if (in_lock_functions(addr)) {
addr = CALLER_ADDR2;
if (in_lock_functions(addr))
addr = CALLER_ADDR3;
}
return addr;
}
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_PREEMPT_TRACER))
void __kprobes preempt_count_add(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
*/
if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
return;
#endif
__preempt_count_add(val);
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Spinlock count overflowing soon?
*/
DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
PREEMPT_MASK - 10);
#endif
if (preempt_count() == val)
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
EXPORT_SYMBOL(preempt_count_add);
void __kprobes preempt_count_sub(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
*/
if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
return;
/*
* Is the spinlock portion underflowing?
*/
if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
!(preempt_count() & PREEMPT_MASK)))
return;
#endif
if (preempt_count() == val)
trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
__preempt_count_sub(val);
}
EXPORT_SYMBOL(preempt_count_sub);
#endif
/*
* Print scheduling while atomic bug:
*/
static noinline void __schedule_bug(struct task_struct *prev)
{
if (oops_in_progress)
return;
printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
prev->comm, prev->pid, preempt_count());
debug_show_held_locks(prev);
print_modules();
if (irqs_disabled())
print_irqtrace_events(prev);
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
/*
* Various schedule()-time debugging checks and statistics:
*/
static inline void schedule_debug(struct task_struct *prev)
{
/*
* Test if we are atomic. Since do_exit() needs to call into
* schedule() atomically, we ignore that path. Otherwise whine
* if we are scheduling when we should not.
*/
if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD))
__schedule_bug(prev);
rcu_sleep_check();
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
schedstat_inc(this_rq(), sched_count);
}
static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
if (prev->on_rq || rq->skip_clock_update < 0)
update_rq_clock(rq);
prev->sched_class->put_prev_task(rq, prev);
}
/*
* Pick up the highest-prio task:
*/
static inline struct task_struct *
pick_next_task(struct rq *rq)
{
const struct sched_class *class;
struct task_struct *p;
/*
* Optimization: we know that if all tasks are in
* the fair class we can call that function directly:
*/
if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
p = fair_sched_class.pick_next_task(rq);
if (likely(p))
return p;
}
for_each_class(class) {
p = class->pick_next_task(rq);
if (p)
return p;
}
BUG(); /* the idle class will always have a runnable task */
}
/*
* __schedule() is the main scheduler function.
*
* The main means of driving the scheduler and thus entering this function are:
*
* 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
*
* 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
* paths. For example, see arch/x86/entry_64.S.
*
* To drive preemption between tasks, the scheduler sets the flag in timer
* interrupt handler scheduler_tick().
*
* 3. Wakeups don't really cause entry into schedule(). They add a
* task to the run-queue and that's it.
*
* Now, if the new task added to the run-queue preempts the current
* task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
* called on the nearest possible occasion:
*
* - If the kernel is preemptible (CONFIG_PREEMPT=y):
*
* - in syscall or exception context, at the next outmost
* preempt_enable(). (this might be as soon as the wake_up()'s
* spin_unlock()!)
*
* - in IRQ context, return from interrupt-handler to
* preemptible context
*
* - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
* then at the next:
*
* - cond_resched() call
* - explicit schedule() call
* - return from syscall or exception to user-space
* - return from interrupt-handler to user-space
*/
static void __sched __schedule(void)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
struct rq *rq;
int cpu;
need_resched:
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
rcu_note_context_switch(cpu);
prev = rq->curr;
schedule_debug(prev);
if (sched_feat(HRTICK))
hrtick_clear(rq);
/*
* Make sure that signal_pending_state()->signal_pending() below
* can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
* done by the caller to avoid the race with signal_wake_up().
*/
smp_mb__before_spinlock();
raw_spin_lock_irq(&rq->lock);
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
prev->state = TASK_RUNNING;
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
/*
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
*/
if (prev->flags & PF_WQ_WORKER) {
struct task_struct *to_wakeup;
to_wakeup = wq_worker_sleeping(prev, cpu);
if (to_wakeup)
try_to_wake_up_local(to_wakeup);
}
}
switch_count = &prev->nvcsw;
}
pre_schedule(rq, prev);
if (unlikely(!rq->nr_running))
idle_balance(cpu, rq);
put_prev_task(rq, prev);
next = pick_next_task(rq);
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
rq->skip_clock_update = 0;
if (likely(prev != next)) {
rq->nr_switches++;
rq->curr = next;
++*switch_count;
context_switch(rq, prev, next); /* unlocks the rq */
/*
* The context switch have flipped the stack from under us
* and restored the local variables which were saved when
* this task called schedule() in the past. prev == current
* is still correct, but it can be moved to another cpu/rq.
*/
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
raw_spin_unlock_irq(&rq->lock);
post_schedule(rq);
sched_preempt_enable_no_resched();
if (need_resched())
goto need_resched;
}
static inline void sched_submit_work(struct task_struct *tsk)
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
*/
if (blk_needs_flush_plug(tsk))
blk_schedule_flush_plug(tsk);
}
asmlinkage void __sched schedule(void)
{
struct task_struct *tsk = current;
sched_submit_work(tsk);
__schedule();
}
EXPORT_SYMBOL(schedule);
#ifdef CONFIG_CONTEXT_TRACKING
asmlinkage void __sched schedule_user(void)
{
/*
* If we come here after a random call to set_need_resched(),
* or we have been woken up remotely but the IPI has not yet arrived,
* we haven't yet exited the RCU idle mode. Do it here manually until
* we find a better solution.
*/
user_exit();
schedule();
user_enter();
}
#endif
/**
* schedule_preempt_disabled - called with preemption disabled
*
* Returns with preemption disabled. Note: preempt_count must be 1
*/
void __sched schedule_preempt_disabled(void)
{
sched_preempt_enable_no_resched();
schedule();
preempt_disable();
}
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
* off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly.
*/
asmlinkage void __sched notrace preempt_schedule(void)
{
/*
* If there is a non-zero preempt_count or interrupts are disabled,
* we do not want to preempt the current task. Just return..
*/
if (likely(!preemptible()))
return;
do {
__preempt_count_add(PREEMPT_ACTIVE);
__schedule();
__preempt_count_sub(PREEMPT_ACTIVE);
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched());
}
EXPORT_SYMBOL(preempt_schedule);
#endif /* CONFIG_PREEMPT */
/*
* this is the entry point to schedule() from kernel preemption
* off of irq context.
* Note, that this is called and return with irqs disabled. This will
* protect us against recursive calling from irq.
*/
asmlinkage void __sched preempt_schedule_irq(void)
{
enum ctx_state prev_state;
/* Catch callers which need to be fixed */
BUG_ON(preempt_count() || !irqs_disabled());
prev_state = exception_enter();
do {
__preempt_count_add(PREEMPT_ACTIVE);
local_irq_enable();
__schedule();
local_irq_disable();
__preempt_count_sub(PREEMPT_ACTIVE);
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched());
exception_exit(prev_state);
}
int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
void *key)
{
return try_to_wake_up(curr->private, mode, wake_flags);
}
EXPORT_SYMBOL(default_wake_function);
static long __sched
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
{
unsigned long flags;
wait_queue_t wait;
init_waitqueue_entry(&wait, current);
__set_current_state(state);
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue(q, &wait);
spin_unlock(&q->lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&q->lock);
__remove_wait_queue(q, &wait);
spin_unlock_irqrestore(&q->lock, flags);
return timeout;
}
void __sched interruptible_sleep_on(wait_queue_head_t *q)
{
sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(interruptible_sleep_on);
long __sched
interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
}
EXPORT_SYMBOL(interruptible_sleep_on_timeout);
void __sched sleep_on(wait_queue_head_t *q)
{
sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(sleep_on);
long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
}
EXPORT_SYMBOL(sleep_on_timeout);
#ifdef CONFIG_RT_MUTEXES
/*
* rt_mutex_setprio - set the current priority of a task
* @p: task
* @prio: prio value (kernel-internal form)
*
* This function changes the 'effective' priority of a task. It does
* not touch ->normal_prio like __setscheduler().
*
* Used by the rt_mutex code to implement priority inheritance logic.
*/
void rt_mutex_setprio(struct task_struct *p, int prio)
{
int oldprio, on_rq, running, enqueue_flag = 0;
struct rq *rq;
const struct sched_class *prev_class;
BUG_ON(prio > MAX_PRIO);
rq = __task_rq_lock(p);
/*
* Idle task boosting is a nono in general. There is one
* exception, when PREEMPT_RT and NOHZ is active:
*
* The idle task calls get_next_timer_interrupt() and holds
* the timer wheel base->lock on the CPU and another CPU wants
* to access the timer (probably to cancel it). We can safely
* ignore the boosting request, as the idle CPU runs this code
* with interrupts disabled and will complete the lock
* protected section without being interrupted. So there is no
* real need to boost.
*/
if (unlikely(p == rq->idle)) {
WARN_ON(p != rq->curr);
WARN_ON(p->pi_blocked_on);
goto out_unlock;
}
trace_sched_pi_setprio(p, prio);
p->pi_top_task = rt_mutex_get_top_task(p);
oldprio = p->prio;
prev_class = p->sched_class;
on_rq = p->on_rq;
running = task_current(rq, p);
if (on_rq)
dequeue_task(rq, p, 0);
if (running)
p->sched_class->put_prev_task(rq, p);
/*
* Boosting condition are:
* 1. -rt task is running and holds mutex A
* --> -dl task blocks on mutex A
*
* 2. -dl task is running and holds mutex A
* --> -dl task blocks on mutex A and could preempt the
* running task
*/
if (dl_prio(prio)) {
if (!dl_prio(p->normal_prio) || (p->pi_top_task &&
dl_entity_preempt(&p->pi_top_task->dl, &p->dl))) {
p->dl.dl_boosted = 1;
p->dl.dl_throttled = 0;
enqueue_flag = ENQUEUE_REPLENISH;
} else
p->dl.dl_boosted = 0;
p->sched_class = &dl_sched_class;
} else if (rt_prio(prio)) {
if (dl_prio(oldprio))
p->dl.dl_boosted = 0;
if (oldprio < prio)
enqueue_flag = ENQUEUE_HEAD;
p->sched_class = &rt_sched_class;
} else {
if (dl_prio(oldprio))
p->dl.dl_boosted = 0;
p->sched_class = &fair_sched_class;
}
p->prio = prio;
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq)
enqueue_task(rq, p, enqueue_flag);
check_class_changed(rq, p, prev_class, oldprio);
out_unlock:
__task_rq_unlock(rq);
}
#endif
void set_user_nice(struct task_struct *p, long nice)
{
int old_prio, delta, on_rq;
unsigned long flags;
struct rq *rq;
if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
return;
/*
* We have to be careful, if called from sys_setpriority(),
* the task might be in the middle of scheduling on another CPU.
*/
rq = task_rq_lock(p, &flags);
/*
* The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected
* it wont have any effect on scheduling until the task is
* SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
*/
if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
p->static_prio = NICE_TO_PRIO(nice);
goto out_unlock;
}
on_rq = p->on_rq;
if (on_rq)
dequeue_task(rq, p, 0);
p->static_prio = NICE_TO_PRIO(nice);
set_load_weight(p);
old_prio = p->prio;
p->prio = effective_prio(p);
delta = p->prio - old_prio;
if (on_rq) {
enqueue_task(rq, p, 0);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
*/
if (delta < 0 || (delta > 0 && task_running(rq, p)))
resched_task(rq->curr);
}
out_unlock:
task_rq_unlock(rq, p, &flags);
}
EXPORT_SYMBOL(set_user_nice);
/*
* can_nice - check if a task can reduce its nice value
* @p: task
* @nice: nice value
*/
int can_nice(const struct task_struct *p, const int nice)
{
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = 20 - nice;
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
#ifdef __ARCH_WANT_SYS_NICE
/*
* sys_nice - change the priority of the current process.
* @increment: priority increment
*
* sys_setpriority is a more generic, but much slower function that
* does similar things.
*/
SYSCALL_DEFINE1(nice, int, increment)
{
long nice, retval;
/*
* Setpriority might change our priority at the same moment.
* We don't have to worry. Conceptually one call occurs first
* and we have a single winner.
*/
if (increment < -40)
increment = -40;
if (increment > 40)
increment = 40;
nice = TASK_NICE(current) + increment;
if (nice < -20)
nice = -20;
if (nice > 19)
nice = 19;
if (increment < 0 && !can_nice(current, nice))
return -EPERM;
retval = security_task_setnice(current, nice);
if (retval)
return retval;
set_user_nice(current, nice);
return 0;
}
#endif
/**
* task_prio - return the priority value of a given task.
* @p: the task in question.
*
* Return: The priority value as seen by users in /proc.
* RT tasks are offset by -200. Normal tasks are centered
* around 0, value goes from -16 to +15.
*/
int task_prio(const struct task_struct *p)
{
return p->prio - MAX_RT_PRIO;
}
/**
* task_nice - return the nice value of a given task.
* @p: the task in question.
*
* Return: The nice value [ -20 ... 0 ... 19 ].
*/
int task_nice(const struct task_struct *p)
{
return TASK_NICE(p);
}
EXPORT_SYMBOL(task_nice);
/**
* idle_cpu - is a given cpu idle currently?
* @cpu: the processor in question.
*
* Return: 1 if the CPU is currently idle. 0 otherwise.
*/
int idle_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (rq->curr != rq->idle)
return 0;
if (rq->nr_running)
return 0;
#ifdef CONFIG_SMP
if (!llist_empty(&rq->wake_list))
return 0;
#endif
return 1;
}
/**
* idle_task - return the idle task for a given cpu.
* @cpu: the processor in question.
*
* Return: The idle task for the cpu @cpu.
*/
struct task_struct *idle_task(int cpu)
{
return cpu_rq(cpu)->idle;
}
/**
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
*
* The task of @pid, if found. %NULL otherwise.
*/
static struct task_struct *find_process_by_pid(pid_t pid)
{
return pid ? find_task_by_vpid(pid) : current;
}
/*
* This function initializes the sched_dl_entity of a newly becoming
* SCHED_DEADLINE task.
*
* Only the static values are considered here, the actual runtime and the
* absolute deadline will be properly calculated when the task is enqueued
* for the first time with its new policy.
*/
static void
__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
{
struct sched_dl_entity *dl_se = &p->dl;
init_dl_task_timer(dl_se);
dl_se->dl_runtime = attr->sched_runtime;
dl_se->dl_deadline = attr->sched_deadline;
dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
dl_se->flags = attr->sched_flags;
dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
dl_se->dl_throttled = 0;
dl_se->dl_new = 1;
}
/* Actually do priority change: must hold pi & rq lock. */
static void __setscheduler(struct rq *rq, struct task_struct *p,
const struct sched_attr *attr)
{
int policy = attr->sched_policy;
if (policy == -1) /* setparam */
policy = p->policy;
p->policy = policy;
if (dl_policy(policy))
__setparam_dl(p, attr);
else if (fair_policy(policy))
p->static_prio = NICE_TO_PRIO(attr->sched_nice);
/*
* __sched_setscheduler() ensures attr->sched_priority == 0 when
* !rt_policy. Always setting this ensures that things like
* getparam()/getattr() don't report silly values for !rt tasks.
*/
p->rt_priority = attr->sched_priority;
p->normal_prio = normal_prio(p);
p->prio = rt_mutex_getprio(p);
if (dl_prio(p->prio))
p->sched_class = &dl_sched_class;
else if (rt_prio(p->prio)) {
p->sched_class = &rt_sched_class;
#ifdef CONFIG_SCHED_HMP
if (!cpumask_empty(&hmp_slow_cpu_mask))
if (cpumask_equal(&p->cpus_allowed, cpu_all_mask)) {
p->nr_cpus_allowed =
cpumask_weight(&hmp_slow_cpu_mask);
do_set_cpus_allowed(p, &hmp_slow_cpu_mask);
}
#endif
}
else
p->sched_class = &fair_sched_class;
set_load_weight(p);
}
static void
__getparam_dl(struct task_struct *p, struct sched_attr *attr)
{
struct sched_dl_entity *dl_se = &p->dl;
attr->sched_priority = p->rt_priority;
attr->sched_runtime = dl_se->dl_runtime;
attr->sched_deadline = dl_se->dl_deadline;
attr->sched_period = dl_se->dl_period;
attr->sched_flags = dl_se->flags;
}
/*
* This function validates the new parameters of a -deadline task.
* We ask for the deadline not being zero, and greater or equal
* than the runtime, as well as the period of being zero or
* greater than deadline. Furthermore, we have to be sure that
* user parameters are above the internal resolution of 1us (we
* check sched_runtime only since it is always the smaller one) and
* below 2^63 ns (we have to check both sched_deadline and
* sched_period, as the latter can be zero).
*/
static bool
__checkparam_dl(const struct sched_attr *attr)
{
/* deadline != 0 */
if (attr->sched_deadline == 0)
return false;
/*
* Since we truncate DL_SCALE bits, make sure we're at least
* that big.
*/
if (attr->sched_runtime < (1ULL << DL_SCALE))
return false;
/*
* Since we use the MSB for wrap-around and sign issues, make
* sure it's not set (mind that period can be equal to zero).
*/
if (attr->sched_deadline & (1ULL << 63) ||
attr->sched_period & (1ULL << 63))
return false;
/* runtime <= deadline <= period (if period != 0) */
if ((attr->sched_period != 0 &&
attr->sched_period < attr->sched_deadline) ||
attr->sched_deadline < attr->sched_runtime)
return false;
return true;
}
/*
* check the target process has a UID that matches the current process's
*/
static bool check_same_owner(struct task_struct *p)
{
const struct cred *cred = current_cred(), *pcred;
bool match;
rcu_read_lock();
pcred = __task_cred(p);
match = (uid_eq(cred->euid, pcred->euid) ||
uid_eq(cred->euid, pcred->uid));
rcu_read_unlock();
return match;
}
static int __sched_setscheduler(struct task_struct *p,
const struct sched_attr *attr,
bool user)
{
int retval, oldprio, oldpolicy = -1, on_rq, running;
int policy = attr->sched_policy;
unsigned long flags;
const struct sched_class *prev_class;
struct rq *rq;
int reset_on_fork;
/* may grab non-irq protected spin_locks */
BUG_ON(in_interrupt());
recheck:
/* double check policy once rq lock held */
if (policy < 0) {
reset_on_fork = p->sched_reset_on_fork;
policy = oldpolicy = p->policy;
} else {
reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
if (policy != SCHED_DEADLINE &&
policy != SCHED_FIFO && policy != SCHED_RR &&
policy != SCHED_NORMAL && policy != SCHED_BATCH &&
policy != SCHED_IDLE)
return -EINVAL;
}
if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
return -EINVAL;
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are
* 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
* SCHED_BATCH and SCHED_IDLE is 0.
*/
if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
(!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
return -EINVAL;
if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
(rt_policy(policy) != (attr->sched_priority != 0)))
return -EINVAL;
/*
* Allow unprivileged RT tasks to decrease priority:
*/
if (user && !capable(CAP_SYS_NICE)) {
if (fair_policy(policy)) {
if (attr->sched_nice < TASK_NICE(p) &&
!can_nice(p, attr->sched_nice))
return -EPERM;
}
if (rt_policy(policy)) {
unsigned long rlim_rtprio =
task_rlimit(p, RLIMIT_RTPRIO);
/* can't set/change the rt policy */
if (policy != p->policy && !rlim_rtprio)
return -EPERM;
/* can't increase priority */
if (attr->sched_priority > p->rt_priority &&
attr->sched_priority > rlim_rtprio)
return -EPERM;
}
/*
* Can't set/change SCHED_DEADLINE policy at all for now
* (safest behavior); in the future we would like to allow
* unprivileged DL tasks to increase their relative deadline
* or reduce their runtime (both ways reducing utilization)
*/
if (dl_policy(policy))
return -EPERM;
/*
* Treat SCHED_IDLE as nice 20. Only allow a switch to
* SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
*/
if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
if (!can_nice(p, TASK_NICE(p)))
return -EPERM;
}
/* can't change other user's priorities */
if (!check_same_owner(p))
return -EPERM;
/* Normal users shall not reset the sched_reset_on_fork flag */
if (p->sched_reset_on_fork && !reset_on_fork)
return -EPERM;
}
if (user) {
retval = security_task_setscheduler(p);
if (retval)
return retval;
}
/*
* make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
*
* To be able to change p->policy safely, the appropriate
* runqueue lock must be held.
*/
rq = task_rq_lock(p, &flags);
/*
* Changing the policy of the stop threads its a very bad idea
*/
if (p == rq->stop) {
task_rq_unlock(rq, p, &flags);
return -EINVAL;
}
/*
* If not changing anything there's no need to proceed further:
*/
if (unlikely(policy == p->policy)) {
if (fair_policy(policy) && attr->sched_nice != TASK_NICE(p))
goto change;
if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
goto change;
if (dl_policy(policy))
goto change;
task_rq_unlock(rq, p, &flags);
return 0;
}
change:
if (user) {
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Do not allow realtime tasks into groups that have no runtime
* assigned.
*/
if (rt_bandwidth_enabled() && rt_policy(policy) &&
task_group(p)->rt_bandwidth.rt_runtime == 0 &&
!task_group_is_autogroup(task_group(p))) {
task_rq_unlock(rq, p, &flags);
return -EPERM;
}
#endif
#ifdef CONFIG_SMP
if (dl_bandwidth_enabled() && dl_policy(policy)) {
cpumask_t *span = rq->rd->span;
/*
* Don't allow tasks with an affinity mask smaller than
* the entire root_domain to become SCHED_DEADLINE. We
* will also fail if there's no bandwidth available.
*/
if (!cpumask_subset(span, &p->cpus_allowed) ||
rq->rd->dl_bw.bw == 0) {
task_rq_unlock(rq, p, &flags);
return -EPERM;
}
}
#endif
}
/* recheck policy now with rq lock held */
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
task_rq_unlock(rq, p, &flags);
goto recheck;
}
/*
* If setscheduling to SCHED_DEADLINE (or changing the parameters
* of a SCHED_DEADLINE task) we need to check if enough bandwidth
* is available.
*/
if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
task_rq_unlock(rq, p, &flags);
return -EBUSY;
}
on_rq = p->on_rq;
running = task_current(rq, p);
if (on_rq)
dequeue_task(rq, p, 0);
if (running)
p->sched_class->put_prev_task(rq, p);
p->sched_reset_on_fork = reset_on_fork;
oldprio = p->prio;
prev_class = p->sched_class;
__setscheduler(rq, p, attr);
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq)
enqueue_task(rq, p, 0);
check_class_changed(rq, p, prev_class, oldprio);
task_rq_unlock(rq, p, &flags);
rt_mutex_adjust_pi(p);
return 0;
}
static int _sched_setscheduler(struct task_struct *p, int policy,
const struct sched_param *param, bool check)
{
struct sched_attr attr = {
.sched_policy = policy,
.sched_priority = param->sched_priority,
.sched_nice = PRIO_TO_NICE(p->static_prio),
};
/*
* Fixup the legacy SCHED_RESET_ON_FORK hack, except if
* the policy=-1 was passed by sched_setparam().
*/
if ((policy != -1) && (policy & SCHED_RESET_ON_FORK)) {
attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
policy &= ~SCHED_RESET_ON_FORK;
attr.sched_policy = policy;
}
return __sched_setscheduler(p, &attr, check);
}
/**
* sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
* @p: the task in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*
* Return: 0 on success. An error code otherwise.
*
* NOTE that the task may be already dead.
*/
int sched_setscheduler(struct task_struct *p, int policy,
const struct sched_param *param)
{
return _sched_setscheduler(p, policy, param, true);
}
EXPORT_SYMBOL_GPL(sched_setscheduler);
int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
{
return __sched_setscheduler(p, attr, true);
}
EXPORT_SYMBOL_GPL(sched_setattr);
/**
* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
* @p: the task in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*
* Just like sched_setscheduler, only don't bother checking if the
* current context has permission. For example, this is needed in
* stop_machine(): we create temporary high priority worker threads,
* but our caller might not have that capability.
*
* Return: 0 on success. An error code otherwise.
*/
int sched_setscheduler_nocheck(struct task_struct *p, int policy,
const struct sched_param *param)
{
return _sched_setscheduler(p, policy, param, false);
}
static int
do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
{
struct sched_param lparam;
struct task_struct *p;
int retval;
if (!param || pid < 0)
return -EINVAL;
if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
return -EFAULT;
rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
if (p != NULL)
retval = sched_setscheduler(p, policy, &lparam);
rcu_read_unlock();
return retval;
}
/*
* Mimics kernel/events/core.c perf_copy_attr().
*/
static int sched_copy_attr(struct sched_attr __user *uattr,
struct sched_attr *attr)
{
u32 size;
int ret;
if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
return -EFAULT;
/*
* zero the full structure, so that a short copy will be nice.
*/
memset(attr, 0, sizeof(*attr));
ret = get_user(size, &uattr->size);
if (ret)
return ret;
if (size > PAGE_SIZE) /* silly large */
goto err_size;
if (!size) /* abi compat */
size = SCHED_ATTR_SIZE_VER0;
if (size < SCHED_ATTR_SIZE_VER0)
goto err_size;
/*
* If we're handed a bigger struct than we know of,
* ensure all the unknown bits are 0 - i.e. new
* user-space does not rely on any kernel feature
* extensions we dont know about yet.
*/
if (size > sizeof(*attr)) {
unsigned char __user *addr;
unsigned char __user *end;
unsigned char val;
addr = (void __user *)uattr + sizeof(*attr);
end = (void __user *)uattr + size;
for (; addr < end; addr++) {
ret = get_user(val, addr);
if (ret)
return ret;
if (val)
goto err_size;
}
size = sizeof(*attr);
}
ret = copy_from_user(attr, uattr, size);
if (ret)
return -EFAULT;
/*
* XXX: do we want to be lenient like existing syscalls; or do we want
* to be strict and return an error on out-of-bounds values?
*/
attr->sched_nice = clamp(attr->sched_nice, -20, 19);
out:
return ret;
err_size:
put_user(sizeof(*attr), &uattr->size);
ret = -E2BIG;
goto out;
}
/**
* sys_sched_setscheduler - set/change the scheduler policy and RT priority
* @pid: the pid in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*
* Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
struct sched_param __user *, param)
{
/* negative values for policy are not valid */
if (policy < 0)
return -EINVAL;
return do_sched_setscheduler(pid, policy, param);
}
/**
* sys_sched_setparam - set/change the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the new RT priority.
*
* Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
{
return do_sched_setscheduler(pid, -1, param);
}
/**
* sys_sched_setattr - same as above, but with extended sched_attr
* @pid: the pid in question.
* @uattr: structure containing the extended parameters.
*/
SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
unsigned int, flags)
{
struct sched_attr attr;
struct task_struct *p;
int retval;
if (!uattr || pid < 0 || flags)
return -EINVAL;
retval = sched_copy_attr(uattr, &attr);
if (retval)
return retval;
if ((int)attr.sched_policy < 0)
return -EINVAL;
rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
if (p != NULL)
retval = sched_setattr(p, &attr);
rcu_read_unlock();
return retval;
}
/**
* sys_sched_getscheduler - get the policy (scheduling class) of a thread
* @pid: the pid in question.
*
* Return: On success, the policy of the thread. Otherwise, a negative error
* code.
*/
SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
{
struct task_struct *p;
int retval;
if (pid < 0)
return -EINVAL;
retval = -ESRCH;
rcu_read_lock();
p = find_process_by_pid(pid);
if (p) {
retval = security_task_getscheduler(p);
if (!retval)
retval = p->policy
| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
}
rcu_read_unlock();
return retval;
}
/**
* sys_sched_getparam - get the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the RT priority.
*
* Return: On success, 0 and the RT priority is in @param. Otherwise, an error
* code.
*/
SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
{
struct sched_param lp = { .sched_priority = 0 };
struct task_struct *p;
int retval;
if (!param || pid < 0)
return -EINVAL;
rcu_read_lock();
p = find_process_by_pid(pid);
retval = -ESRCH;
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
if (task_has_rt_policy(p))
lp.sched_priority = p->rt_priority;
rcu_read_unlock();
/*
* This one might sleep, we cannot do it with a spinlock held ...
*/
retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
return retval;
out_unlock:
rcu_read_unlock();
return retval;
}
static int sched_read_attr(struct sched_attr __user *uattr,
struct sched_attr *attr,
unsigned int usize)
{
int ret;
if (!access_ok(VERIFY_WRITE, uattr, usize))
return -EFAULT;
/*
* If we're handed a smaller struct than we know of,
* ensure all the unknown bits are 0 - i.e. old
* user-space does not get uncomplete information.
*/
if (usize < sizeof(*attr)) {
unsigned char *addr;
unsigned char *end;
addr = (void *)attr + usize;
end = (void *)attr + sizeof(*attr);
for (; addr < end; addr++) {
if (*addr)
goto err_size;
}
attr->size = usize;
}
ret = copy_to_user(uattr, attr, attr->size);
if (ret)
return -EFAULT;
out:
return ret;
err_size:
ret = -E2BIG;
goto out;
}
/**
* sys_sched_getattr - similar to sched_getparam, but with sched_attr
* @pid: the pid in question.
* @uattr: structure containing the extended parameters.
* @size: sizeof(attr) for fwd/bwd comp.
*/
SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
unsigned int, size, unsigned int, flags)
{
struct sched_attr attr = {
.size = sizeof(struct sched_attr),
};
struct task_struct *p;
int retval;
if (!uattr || pid < 0 || size > PAGE_SIZE ||
size < SCHED_ATTR_SIZE_VER0 || flags)
return -EINVAL;
rcu_read_lock();
p = find_process_by_pid(pid);
retval = -ESRCH;
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
attr.sched_policy = p->policy;
if (p->sched_reset_on_fork)
attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
if (task_has_dl_policy(p))
__getparam_dl(p, &attr);
else if (task_has_rt_policy(p))
attr.sched_priority = p->rt_priority;
else
attr.sched_nice = TASK_NICE(p);
rcu_read_unlock();
retval = sched_read_attr(uattr, &attr, size);
return retval;
out_unlock:
rcu_read_unlock();
return retval;
}
long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
{
cpumask_var_t cpus_allowed, new_mask;
struct task_struct *p;
int retval;
rcu_read_lock();
p = find_process_by_pid(pid);
if (!p) {
rcu_read_unlock();
return -ESRCH;
}
/* Prevent p going away */
get_task_struct(p);
rcu_read_unlock();
if (p->flags & PF_NO_SETAFFINITY) {
retval = -EINVAL;
goto out_put_task;
}
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_put_task;
}
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_free_cpus_allowed;
}
retval = -EPERM;
if (!check_same_owner(p)) {
rcu_read_lock();
if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
rcu_read_unlock();
goto out_unlock;
}
rcu_read_unlock();
}
retval = security_task_setscheduler(p);
if (retval)
goto out_unlock;
cpuset_cpus_allowed(p, cpus_allowed);
cpumask_and(new_mask, in_mask, cpus_allowed);
/*
* Since bandwidth control happens on root_domain basis,
* if admission test is enabled, we only admit -deadline
* tasks allowed to run on all the CPUs in the task's
* root_domain.
*/
#ifdef CONFIG_SMP
if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
rcu_read_lock();
if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
retval = -EBUSY;
rcu_read_unlock();
goto out_unlock;
}
rcu_read_unlock();
}
#endif
again:
retval = set_cpus_allowed_ptr(p, new_mask);
if (!retval) {
cpuset_cpus_allowed(p, cpus_allowed);
if (!cpumask_subset(new_mask, cpus_allowed)) {
/*
* We must have raced with a concurrent cpuset
* update. Just reset the cpus_allowed to the
* cpuset's cpus_allowed
*/
cpumask_copy(new_mask, cpus_allowed);
goto again;
}
}
out_unlock:
free_cpumask_var(new_mask);
out_free_cpus_allowed:
free_cpumask_var(cpus_allowed);
out_put_task:
put_task_struct(p);
return retval;
}
static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
struct cpumask *new_mask)
{
if (len < cpumask_size())
cpumask_clear(new_mask);
else if (len > cpumask_size())
len = cpumask_size();
return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
}
/**
* sys_sched_setaffinity - set the cpu affinity of a process
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to the new cpu mask
*
* Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
{
cpumask_var_t new_mask;
int retval;
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
return -ENOMEM;
retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
if (retval == 0)
retval = sched_setaffinity(pid, new_mask);
free_cpumask_var(new_mask);
return retval;
}
long sched_getaffinity(pid_t pid, struct cpumask *mask)
{
struct task_struct *p;
unsigned long flags;
int retval;
rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
raw_spin_lock_irqsave(&p->pi_lock, flags);
cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
rcu_read_unlock();
return retval;
}
/**
* sys_sched_getaffinity - get the cpu affinity of a process
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to hold the current cpu mask
*
* Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
{
int ret;
cpumask_var_t mask;
if ((len * BITS_PER_BYTE) < nr_cpu_ids)
return -EINVAL;
if (len & (sizeof(unsigned long)-1))
return -EINVAL;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
ret = sched_getaffinity(pid, mask);
if (ret == 0) {
size_t retlen = min_t(size_t, len, cpumask_size());
if (copy_to_user(user_mask_ptr, mask, retlen))
ret = -EFAULT;
else
ret = retlen;
}
free_cpumask_var(mask);
return ret;
}
/**
* sys_sched_yield - yield the current processor to other threads.
*
* This function yields the current CPU to other tasks. If there are no
* other threads running on this CPU then this function will return.
*
* Return: 0.
*/
SYSCALL_DEFINE0(sched_yield)
{
struct rq *rq = this_rq_lock();
schedstat_inc(rq, yld_count);
current->sched_class->yield_task(rq);
/*
* Since we are going to call schedule() anyway, there's
* no need to preempt or enable interrupts:
*/
__release(rq->lock);
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
do_raw_spin_unlock(&rq->lock);
sched_preempt_enable_no_resched();
schedule();
return 0;
}
static void __cond_resched(void)
{
__preempt_count_add(PREEMPT_ACTIVE);
__schedule();
__preempt_count_sub(PREEMPT_ACTIVE);
}
int __sched _cond_resched(void)
{
if (should_resched()) {
__cond_resched();
return 1;
}
return 0;
}
EXPORT_SYMBOL(_cond_resched);
/*
* __cond_resched_lock() - if a reschedule is pending, drop the given lock,
* call schedule, and on return reacquire the lock.
*
* This works OK both with and without CONFIG_PREEMPT. We do strange low-level
* operations here to prevent schedule() from being called twice (once via
* spin_unlock(), once by hand).
*/
int __cond_resched_lock(spinlock_t *lock)
{
int resched = should_resched();
int ret = 0;
lockdep_assert_held(lock);
if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
if (resched)
__cond_resched();
else
cpu_relax();
ret = 1;
spin_lock(lock);
}
return ret;
}
EXPORT_SYMBOL(__cond_resched_lock);
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
if (should_resched()) {
local_bh_enable();
__cond_resched();
local_bh_disable();
return 1;
}
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
/**
* yield - yield the current processor to other threads.
*
* Do not ever use this function, there's a 99% chance you're doing it wrong.
*
* The scheduler is at all times free to pick the calling task as the most
* eligible task to run, if removing the yield() call from your code breaks
* it, its already broken.
*
* Typical broken usage is:
*
* while (!event)
* yield();
*
* where one assumes that yield() will let 'the other' process run that will
* make event true. If the current task is a SCHED_FIFO task that will never
* happen. Never use yield() as a progress guarantee!!
*
* If you want to use yield() to wait for something, use wait_event().
* If you want to use yield() to be 'nice' for others, use cond_resched().
* If you still want to use yield(), do not!
*/
void __sched yield(void)
{
set_current_state(TASK_RUNNING);
sys_sched_yield();
}
EXPORT_SYMBOL(yield);
/**
* yield_to - yield the current processor to another thread in
* your thread group, or accelerate that thread toward the
* processor it's on.
* @p: target task
* @preempt: whether task preemption is allowed or not
*
* It's the caller's job to ensure that the target task struct
* can't go away on us before we can do any checks.
*
* Return:
* true (>0) if we indeed boosted the target task.
* false (0) if we failed to boost the target.
* -ESRCH if there's no task to yield to.
*/
bool __sched yield_to(struct task_struct *p, bool preempt)
{
struct task_struct *curr = current;
struct rq *rq, *p_rq;
unsigned long flags;
int yielded = 0;
local_irq_save(flags);
rq = this_rq();
again:
p_rq = task_rq(p);
/*
* If we're the only runnable task on the rq and target rq also
* has only one task, there's absolutely no point in yielding.
*/
if (rq->nr_running == 1 && p_rq->nr_running == 1) {
yielded = -ESRCH;
goto out_irq;
}
double_rq_lock(rq, p_rq);
if (task_rq(p) != p_rq) {
double_rq_unlock(rq, p_rq);
goto again;
}
if (!curr->sched_class->yield_to_task)
goto out_unlock;
if (curr->sched_class != p->sched_class)
goto out_unlock;
if (task_running(p_rq, p) || p->state)
goto out_unlock;
yielded = curr->sched_class->yield_to_task(rq, p, preempt);
if (yielded) {
schedstat_inc(rq, yld_count);
/*
* Make p's CPU reschedule; pick_next_entity takes care of
* fairness.
*/
if (preempt && rq != p_rq)
resched_task(p_rq->curr);
}
out_unlock:
double_rq_unlock(rq, p_rq);
out_irq:
local_irq_restore(flags);
if (yielded > 0)
schedule();
return yielded;
}
EXPORT_SYMBOL_GPL(yield_to);
/*
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
*/
void __sched io_schedule(void)
{
struct rq *rq = raw_rq();
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
blk_flush_plug(current);
current->in_iowait = 1;
schedule();
current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
}
EXPORT_SYMBOL(io_schedule);
long __sched io_schedule_timeout(long timeout)
{
struct rq *rq = raw_rq();
long ret;
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
blk_flush_plug(current);
current->in_iowait = 1;
ret = schedule_timeout(timeout);
current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
return ret;
}
/**
* sys_sched_get_priority_max - return maximum RT priority.
* @policy: scheduling class.
*
* Return: On success, this syscall returns the maximum
* rt_priority that can be used by a given scheduling class.
* On failure, a negative error code is returned.
*/
SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
{
int ret = -EINVAL;
switch (policy) {
case SCHED_FIFO:
case SCHED_RR:
ret = MAX_USER_RT_PRIO-1;
break;
case SCHED_DEADLINE:
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
ret = 0;
break;
}
return ret;
}
/**
* sys_sched_get_priority_min - return minimum RT priority.
* @policy: scheduling class.
*
* Return: On success, this syscall returns the minimum
* rt_priority that can be used by a given scheduling class.
* On failure, a negative error code is returned.
*/
SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
{
int ret = -EINVAL;
switch (policy) {
case SCHED_FIFO:
case SCHED_RR:
ret = 1;
break;
case SCHED_DEADLINE:
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
ret = 0;
}
return ret;
}
/**
* sys_sched_rr_get_interval - return the default timeslice of a process.
* @pid: pid of the process.
* @interval: userspace pointer to the timeslice value.
*
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
*
* Return: On success, 0 and the timeslice is in @interval. Otherwise,
* an error code.
*/
SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
struct timespec __user *, interval)
{
struct task_struct *p;
unsigned int time_slice;
unsigned long flags;
struct rq *rq;
int retval;
struct timespec t;
if (pid < 0)
return -EINVAL;
retval = -ESRCH;
rcu_read_lock();
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
rq = task_rq_lock(p, &flags);
time_slice = 0;
if (p->sched_class->get_rr_interval)
time_slice = p->sched_class->get_rr_interval(rq, p);
task_rq_unlock(rq, p, &flags);
rcu_read_unlock();
jiffies_to_timespec(time_slice, &t);
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
return retval;
out_unlock:
rcu_read_unlock();
return retval;
}
static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
void sched_show_task(struct task_struct *p)
{
unsigned long free = 0;
int ppid;
unsigned state;
state = p->state ? __ffs(p->state) + 1 : 0;
printk(KERN_INFO "%-15.15s %c", p->comm,
state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
#if BITS_PER_LONG == 32
if (state == TASK_RUNNING)
printk(KERN_CONT " running ");
else
printk(KERN_CONT " %08lx ", thread_saved_pc(p));
#else
if (state == TASK_RUNNING)
printk(KERN_CONT " running task ");
else
printk(KERN_CONT " %016lx ", thread_saved_pc(p));
#endif
#ifdef CONFIG_DEBUG_STACK_USAGE
free = stack_not_used(p);
#endif
rcu_read_lock();
ppid = task_pid_nr(rcu_dereference(p->real_parent));
rcu_read_unlock();
printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
task_pid_nr(p), ppid,
(unsigned long)task_thread_info(p)->flags);
print_worker_info(KERN_INFO, p);
show_stack(p, NULL);
}
void show_state_filter(unsigned long state_filter)
{
struct task_struct *g, *p;
#if BITS_PER_LONG == 32
printk(KERN_INFO
" task PC stack pid father\n");
#else
printk(KERN_INFO
" task PC stack pid father\n");
#endif
rcu_read_lock();
do_each_thread(g, p) {
/*
* reset the NMI-timeout, listing all files on a slow
* console might take a lot of time:
*/
touch_nmi_watchdog();
if (!state_filter || (p->state & state_filter))
sched_show_task(p);
} while_each_thread(g, p);
touch_all_softlockup_watchdogs();
#ifdef CONFIG_SCHED_DEBUG
sysrq_sched_debug_show();
#endif
rcu_read_unlock();
/*
* Only show locks if all tasks are dumped:
*/
if (!state_filter)
debug_show_all_locks();
}
void init_idle_bootup_task(struct task_struct *idle)
{
idle->sched_class = &idle_sched_class;
}
/**
* init_idle - set up an idle thread for a given CPU
* @idle: task in question
* @cpu: cpu the idle task belongs to
*
* NOTE: this function does not set the idle thread's NEED_RESCHED
* flag, to make booting more robust.
*/
void init_idle(struct task_struct *idle, int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
__sched_fork(0, idle);
idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
do_set_cpus_allowed(idle, cpumask_of(cpu));
/*
* We're having a chicken and egg problem, even though we are
* holding rq->lock, the cpu isn't yet set to this cpu so the
* lockdep check in task_group() will fail.
*
* Similar case to sched_fork(). / Alternatively we could
* use task_rq_lock() here and obtain the other rq->lock.
*
* Silence PROVE_RCU
*/
rcu_read_lock();
__set_task_cpu(idle, cpu);
rcu_read_unlock();
rq->curr = rq->idle = idle;
#if defined(CONFIG_SMP)
idle->on_cpu = 1;
#endif
raw_spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
/*
* The idle tasks have their own, simple scheduling class:
*/
idle->sched_class = &idle_sched_class;
ftrace_graph_init_idle_task(idle, cpu);
vtime_init_idle(idle, cpu);
#if defined(CONFIG_SMP)
sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
#endif
}
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
if (p->sched_class && p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, new_mask);
cpumask_copy(&p->cpus_allowed, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
/*
* This is how migration works:
*
* 1) we invoke migration_cpu_stop() on the target CPU using
* stop_one_cpu().
* 2) stopper starts to run (implicitly forcing the migrated thread
* off the CPU)
* 3) it checks whether the migrated task is still in the wrong runqueue.
* 4) if it's in the wrong runqueue then the migration thread removes
* it and puts it into the right queue.
* 5) stopper completes and stop_one_cpu() returns and the migration
* is done.
*/
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
* is removed from the allowed bitmask.
*
* NOTE: the caller must have a valid reference to the task, the
* task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
unsigned long flags;
struct rq *rq;
unsigned int dest_cpu;
int ret = 0;
rq = task_rq_lock(p, &flags);
if (cpumask_equal(&p->cpus_allowed, new_mask))
goto out;
if (!cpumask_intersects(new_mask, cpu_active_mask)) {
ret = -EINVAL;
goto out;
}
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
if (cpumask_test_cpu(task_cpu(p), new_mask))
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
if (p->on_rq) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, p, &flags);
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
return 0;
}
out:
task_rq_unlock(rq, p, &flags);
return ret;
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
/*
* Move (not current) task off this cpu, onto dest cpu. We're doing
* this because either it can't run here any more (set_cpus_allowed()
* away from this CPU, or CPU going down), or because we're
* attempting to rebalance this task on exec (sched_exec).
*
* So we race with normal scheduler movements, but that's OK, as long
* as the task is no longer on this CPU.
*
* Returns non-zero if task was successfully migrated.
*/
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
{
struct rq *rq_dest, *rq_src;
int ret = 0;
if (unlikely(!cpu_active(dest_cpu)))
return ret;
rq_src = cpu_rq(src_cpu);
rq_dest = cpu_rq(dest_cpu);
raw_spin_lock(&p->pi_lock);
double_rq_lock(rq_src, rq_dest);
/* Already moved. */
if (task_cpu(p) != src_cpu)
goto done;
/* Affinity changed (again). */
if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
goto fail;
/*
* If we're not on a rq, the next wake-up will ensure we're
* placed properly.
*/
if (p->on_rq) {
dequeue_task(rq_src, p, 0);
set_task_cpu(p, dest_cpu);
enqueue_task(rq_dest, p, 0);
check_preempt_curr(rq_dest, p, 0);
}
done:
ret = 1;
fail:
double_rq_unlock(rq_src, rq_dest);
raw_spin_unlock(&p->pi_lock);
return ret;
}
#ifdef CONFIG_NUMA_BALANCING
/* Migrate current task p to target_cpu */
int migrate_task_to(struct task_struct *p, int target_cpu)
{
struct migration_arg arg = { p, target_cpu };
int curr_cpu = task_cpu(p);
if (curr_cpu == target_cpu)
return 0;
if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
return -EINVAL;
/* TODO: This is not properly updating schedstats */
trace_sched_move_numa(p, curr_cpu, target_cpu);
return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
}
/*
* Requeue a task on a given node and accurately track the number of NUMA
* tasks on the runqueues
*/
void sched_setnuma(struct task_struct *p, int nid)
{
struct rq *rq;
unsigned long flags;
bool on_rq, running;
rq = task_rq_lock(p, &flags);
on_rq = p->on_rq;
running = task_current(rq, p);
if (on_rq)
dequeue_task(rq, p, 0);
if (running)
p->sched_class->put_prev_task(rq, p);
p->numa_preferred_nid = nid;
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq)
enqueue_task(rq, p, 0);
task_rq_unlock(rq, p, &flags);
}
#endif
/*
* migration_cpu_stop - this will be executed by a highprio stopper thread
* and performs thread migration by bumping thread off CPU then
* 'pushing' onto another runqueue.
*/
static int migration_cpu_stop(void *data)
{
struct migration_arg *arg = data;
/*
* The original target cpu might have gone down and we might
* be on another cpu but it doesn't matter.
*/
local_irq_disable();
__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
local_irq_enable();
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
*/
void idle_task_exit(void)
{
struct mm_struct *mm = current->active_mm;
BUG_ON(cpu_online(smp_processor_id()));
if (mm != &init_mm)
switch_mm(mm, &init_mm, current);
mmdrop(mm);
}
/*
* Since this CPU is going 'away' for a while, fold any nr_active delta
* we might have. Assumes we're called after migrate_tasks() so that the
* nr_active count is stable.
*
* Also see the comment "Global load-average calculations".
*/
static void calc_load_migrate(struct rq *rq)
{
long delta = calc_load_fold_active(rq);
if (delta)
atomic_long_add(delta, &calc_load_tasks);
}
/*
* Migrate all tasks from the rq, sleeping tasks will be migrated by
* try_to_wake_up()->select_task_rq().
*
* Called with rq->lock held even though we'er in stop_machine() and
* there's no concurrency possible, we hold the required locks anyway
* because of lock validation efforts.
*/
static void migrate_tasks(unsigned int dead_cpu)
{
struct rq *rq = cpu_rq(dead_cpu);
struct task_struct *next, *stop = rq->stop;
int dest_cpu;
/*
* Fudge the rq selection such that the below task selection loop
* doesn't get stuck on the currently eligible stop task.
*
* We're currently inside stop_machine() and the rq is either stuck
* in the stop_machine_cpu_stop() loop, or we're executing this code,
* either way we should never end up calling schedule() until we're
* done here.
*/
rq->stop = NULL;
/*
* put_prev_task() and pick_next_task() sched
* class method both need to have an up-to-date
* value of rq->clock[_task]
*/
update_rq_clock(rq);
for ( ; ; ) {
/*
* There's this thread running, bail when that's the only
* remaining thread.
*/
if (rq->nr_running == 1)
break;
next = pick_next_task(rq);
BUG_ON(!next);
next->sched_class->put_prev_task(rq, next);
/* Find suitable destination for @next, with force if needed. */
dest_cpu = select_fallback_rq(dead_cpu, next);
raw_spin_unlock(&rq->lock);
__migrate_task(next, dead_cpu, dest_cpu);
raw_spin_lock(&rq->lock);
}
rq->stop = stop;
}
#endif /* CONFIG_HOTPLUG_CPU */
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
static struct ctl_table sd_ctl_dir[] = {
{
.procname = "sched_domain",
.mode = 0555,
},
{}
};
static struct ctl_table sd_ctl_root[] = {
{
.procname = "kernel",
.mode = 0555,
.child = sd_ctl_dir,
},
{}
};
static struct ctl_table *sd_alloc_ctl_entry(int n)
{
struct ctl_table *entry =
kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
return entry;
}
static void sd_free_ctl_entry(struct ctl_table **tablep)
{
struct ctl_table *entry;
/*
* In the intermediate directories, both the child directory and
* procname are dynamically allocated and could fail but the mode
* will always be set. In the lowest directory the names are
* static strings and all have proc handlers.
*/
for (entry = *tablep; entry->mode; entry++) {
if (entry->child)
sd_free_ctl_entry(&entry->child);
if (entry->proc_handler == NULL)
kfree(entry->procname);
}
kfree(*tablep);
*tablep = NULL;
}
static int min_load_idx = 0;
static int max_load_idx = CPU_LOAD_IDX_MAX-1;
static void
set_table_entry(struct ctl_table *entry,
const char *procname, void *data, int maxlen,
umode_t mode, proc_handler *proc_handler,
bool load_idx)
{
entry->procname = procname;
entry->data = data;
entry->maxlen = maxlen;
entry->mode = mode;
entry->proc_handler = proc_handler;
if (load_idx) {
entry->extra1 = &min_load_idx;
entry->extra2 = &max_load_idx;
}
}
static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
struct ctl_table *table = sd_alloc_ctl_entry(13);
if (table == NULL)
return NULL;
set_table_entry(&table[0], "min_interval", &sd->min_interval,
sizeof(long), 0644, proc_doulongvec_minmax, false);
set_table_entry(&table[1], "max_interval", &sd->max_interval,
sizeof(long), 0644, proc_doulongvec_minmax, false);
set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
sizeof(int), 0644, proc_dointvec_minmax, true);
set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
sizeof(int), 0644, proc_dointvec_minmax, true);
set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
sizeof(int), 0644, proc_dointvec_minmax, true);
set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
sizeof(int), 0644, proc_dointvec_minmax, true);
set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
sizeof(int), 0644, proc_dointvec_minmax, true);
set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
sizeof(int), 0644, proc_dointvec_minmax, false);
set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
sizeof(int), 0644, proc_dointvec_minmax, false);
set_table_entry(&table[9], "cache_nice_tries",
&sd->cache_nice_tries,
sizeof(int), 0644, proc_dointvec_minmax, false);
set_table_entry(&table[10], "flags", &sd->flags,
sizeof(int), 0644, proc_dointvec_minmax, false);
set_table_entry(&table[11], "name", sd->name,
CORENAME_MAX_SIZE, 0444, proc_dostring, false);
/* &table[12] is terminator */
return table;
}
static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
{
struct ctl_table *entry, *table;
struct sched_domain *sd;
int domain_num = 0, i;
char buf[32];
for_each_domain(cpu, sd)
domain_num++;
entry = table = sd_alloc_ctl_entry(domain_num + 1);
if (table == NULL)
return NULL;
i = 0;
for_each_domain(cpu, sd) {
snprintf(buf, 32, "domain%d", i);
entry->procname = kstrdup(buf, GFP_KERNEL);
entry->mode = 0555;
entry->child = sd_alloc_ctl_domain_table(sd);
entry++;
i++;
}
return table;
}
static struct ctl_table_header *sd_sysctl_header;
static void register_sched_domain_sysctl(void)
{
int i, cpu_num = num_possible_cpus();
struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
char buf[32];
WARN_ON(sd_ctl_dir[0].child);
sd_ctl_dir[0].child = entry;
if (entry == NULL)
return;
for_each_possible_cpu(i) {
snprintf(buf, 32, "cpu%d", i);
entry->procname = kstrdup(buf, GFP_KERNEL);
entry->mode = 0555;
entry->child = sd_alloc_ctl_cpu_table(i);
entry++;
}
WARN_ON(sd_sysctl_header);
sd_sysctl_header = register_sysctl_table(sd_ctl_root);
}
/* may be called multiple times per register */
static void unregister_sched_domain_sysctl(void)
{
if (sd_sysctl_header)
unregister_sysctl_table(sd_sysctl_header);
sd_sysctl_header = NULL;
if (sd_ctl_dir[0].child)
sd_free_ctl_entry(&sd_ctl_dir[0].child);
}
#else
static void register_sched_domain_sysctl(void)
{
}
static void unregister_sched_domain_sysctl(void)
{
}
#endif
static void set_rq_online(struct rq *rq)
{
if (!rq->online) {
const struct sched_class *class;
cpumask_set_cpu(rq->cpu, rq->rd->online);
rq->online = 1;
for_each_class(class) {
if (class->rq_online)
class->rq_online(rq);
}
}
}
static void set_rq_offline(struct rq *rq)
{
if (rq->online) {
const struct sched_class *class;
for_each_class(class) {
if (class->rq_offline)
class->rq_offline(rq);
}
cpumask_clear_cpu(rq->cpu, rq->rd->online);
rq->online = 0;
}
}
/*
* migration_call - callback that gets triggered when a CPU is added.
* Here we can start up the necessary migration thread for the new CPU.
*/
static int
migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int cpu = (long)hcpu;
unsigned long flags;
struct rq *rq = cpu_rq(cpu);
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
rq->calc_load_update = calc_load_update;
break;
case CPU_ONLINE:
/* Update our root-domain */
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_online(rq);
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DYING:
sched_ttwu_pending();
/* Update our root-domain */
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
migrate_tasks(cpu);
BUG_ON(rq->nr_running != 1); /* the migration thread */
raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
case CPU_DEAD:
calc_load_migrate(rq);
break;
#endif
}
update_max_interval();
return NOTIFY_OK;
}
/*
* Register at high priority so that task migration (migrate_all_tasks)
* happens before everything else. This has to be lower priority than
* the notifier in the perf_event subsystem, though.
*/
static struct notifier_block migration_notifier = {
.notifier_call = migration_call,
.priority = CPU_PRI_MIGRATION,
};
static int sched_cpu_active(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_FAILED:
set_cpu_active((long)hcpu, true);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
static int sched_cpu_inactive(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned long flags;
long cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
set_cpu_active(cpu, false);
/* explicitly allow suspend */
if (!(action & CPU_TASKS_FROZEN)) {
struct dl_bw *dl_b = dl_bw_of(cpu);
bool overflow;
int cpus;
raw_spin_lock_irqsave(&dl_b->lock, flags);
cpus = dl_bw_cpus(cpu);
overflow = __dl_overflow(dl_b, cpus, 0, 0);
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
if (overflow)
return notifier_from_errno(-EBUSY);
}
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
static int __init migration_init(void)
{
void *cpu = (void *)(long)smp_processor_id();
int err;
/* Initialize migration for the boot CPU */
err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
BUG_ON(err == NOTIFY_BAD);
migration_call(&migration_notifier, CPU_ONLINE, cpu);
register_cpu_notifier(&migration_notifier);
/* Register cpu active notifiers */
cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
return 0;
}
early_initcall(migration_init);
#endif
#ifdef CONFIG_SMP
static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
#ifdef CONFIG_SCHED_DEBUG
static __read_mostly int sched_debug_enabled;
static int __init sched_debug_setup(char *str)
{
sched_debug_enabled = 1;
return 0;
}
early_param("sched_debug", sched_debug_setup);
static inline bool sched_debug(void)
{
return sched_debug_enabled;
}
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
struct cpumask *groupmask)
{
struct sched_group *group = sd->groups;
char str[256];
cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
cpumask_clear(groupmask);
printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
if (!(sd->flags & SD_LOAD_BALANCE)) {
printk("does not load-balance\n");
if (sd->parent)
printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
" has parent");
return -1;
}
printk(KERN_CONT "span %s level %s\n", str, sd->name);
if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
printk(KERN_ERR "ERROR: domain->span does not contain "
"CPU%d\n", cpu);
}
if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
printk(KERN_ERR "ERROR: domain->groups does not contain"
" CPU%d\n", cpu);
}
printk(KERN_DEBUG "%*s groups:", level + 1, "");
do {
if (!group) {
printk("\n");
printk(KERN_ERR "ERROR: group is NULL\n");
break;
}
/*
* Even though we initialize ->power to something semi-sane,
* we leave power_orig unset. This allows us to detect if
* domain iteration is still funny without causing /0 traps.
*/
if (!group->sgp->power_orig) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: domain->cpu_power not "
"set\n");
break;
}
if (!cpumask_weight(sched_group_cpus(group))) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: empty group\n");
break;
}
if (!(sd->flags & SD_OVERLAP) &&
cpumask_intersects(groupmask, sched_group_cpus(group))) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: repeated CPUs\n");
break;
}
cpumask_or(groupmask, groupmask, sched_group_cpus(group));
cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
printk(KERN_CONT " %s", str);
if (group->sgp->power != SCHED_POWER_SCALE) {
printk(KERN_CONT " (cpu_power = %d)",
group->sgp->power);
}
group = group->next;
} while (group != sd->groups);
printk(KERN_CONT "\n");
if (!cpumask_equal(sched_domain_span(sd), groupmask))
printk(KERN_ERR "ERROR: groups don't span domain->span\n");
if (sd->parent &&
!cpumask_subset(groupmask, sched_domain_span(sd->parent)))
printk(KERN_ERR "ERROR: parent span is not a superset "
"of domain->span\n");
return 0;
}
static void sched_domain_debug(struct sched_domain *sd, int cpu)
{
int level = 0;
if (!sched_debug_enabled)
return;
if (!sd) {
printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
return;
}
printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
for (;;) {
if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
break;
level++;
sd = sd->parent;
if (!sd)
break;
}
}
#else /* !CONFIG_SCHED_DEBUG */
# define sched_domain_debug(sd, cpu) do { } while (0)
static inline bool sched_debug(void)
{
return false;
}
#endif /* CONFIG_SCHED_DEBUG */
static int sd_degenerate(struct sched_domain *sd)
{
if (cpumask_weight(sched_domain_span(sd)) == 1)
return 1;
/* Following flags need at least 2 groups */
if (sd->flags & (SD_LOAD_BALANCE |
SD_BALANCE_NEWIDLE |
SD_BALANCE_FORK |
SD_BALANCE_EXEC |
SD_SHARE_CPUPOWER |
SD_SHARE_PKG_RESOURCES)) {
if (sd->groups != sd->groups->next)
return 0;
}
/* Following flags don't use groups */
if (sd->flags & (SD_WAKE_AFFINE))
return 0;
return 1;
}
static int
sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
{
unsigned long cflags = sd->flags, pflags = parent->flags;
if (sd_degenerate(parent))
return 1;
if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
return 0;
/* Flags needing groups don't count if only 1 group in parent */
if (parent->groups == parent->groups->next) {
pflags &= ~(SD_LOAD_BALANCE |
SD_BALANCE_NEWIDLE |
SD_BALANCE_FORK |
SD_BALANCE_EXEC |
SD_SHARE_CPUPOWER |
SD_SHARE_PKG_RESOURCES |
SD_PREFER_SIBLING);
if (nr_node_ids == 1)
pflags &= ~SD_SERIALIZE;
}
if (~cflags & pflags)
return 0;
return 1;
}
static void free_rootdomain(struct rcu_head *rcu)
{
struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
cpupri_cleanup(&rd->cpupri);
cpudl_cleanup(&rd->cpudl);
free_cpumask_var(rd->dlo_mask);
free_cpumask_var(rd->rto_mask);
free_cpumask_var(rd->online);
free_cpumask_var(rd->span);
kfree(rd);
}
static void rq_attach_root(struct rq *rq, struct root_domain *rd)
{
struct root_domain *old_rd = NULL;
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
old_rd = rq->rd;
if (cpumask_test_cpu(rq->cpu, old_rd->online))
set_rq_offline(rq);
cpumask_clear_cpu(rq->cpu, old_rd->span);
/*
* If we dont want to free the old_rd yet then
* set old_rd to NULL to skip the freeing later
* in this function:
*/
if (!atomic_dec_and_test(&old_rd->refcount))
old_rd = NULL;
}
atomic_inc(&rd->refcount);
rq->rd = rd;
cpumask_set_cpu(rq->cpu, rd->span);
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
if (old_rd)
call_rcu_sched(&old_rd->rcu, free_rootdomain);
}
static int init_rootdomain(struct root_domain *rd)
{
memset(rd, 0, sizeof(*rd));
if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
goto out;
if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
goto free_span;
if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
goto free_online;
if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
goto free_dlo_mask;
init_dl_bw(&rd->dl_bw);
if (cpudl_init(&rd->cpudl) != 0)
goto free_dlo_mask;
if (cpupri_init(&rd->cpupri) != 0)
goto free_rto_mask;
return 0;
free_rto_mask:
free_cpumask_var(rd->rto_mask);
free_dlo_mask:
free_cpumask_var(rd->dlo_mask);
free_online:
free_cpumask_var(rd->online);
free_span:
free_cpumask_var(rd->span);
out:
return -ENOMEM;
}
/*
* By default the system creates a single root-domain with all cpus as
* members (mimicking the global state we have today).
*/
struct root_domain def_root_domain;
static void init_defrootdomain(void)
{
init_rootdomain(&def_root_domain);
atomic_set(&def_root_domain.refcount, 1);
}
static struct root_domain *alloc_rootdomain(void)
{
struct root_domain *rd;
rd = kmalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return NULL;
if (init_rootdomain(rd) != 0) {
kfree(rd);
return NULL;
}
return rd;
}
static void free_sched_groups(struct sched_group *sg, int free_sgp)
{
struct sched_group *tmp, *first;
if (!sg)
return;
first = sg;
do {
tmp = sg->next;
if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
kfree(sg->sgp);
kfree(sg);
sg = tmp;
} while (sg != first);
}
static void free_sched_domain(struct rcu_head *rcu)
{
struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
/*
* If its an overlapping domain it has private groups, iterate and
* nuke them all.
*/
if (sd->flags & SD_OVERLAP) {
free_sched_groups(sd->groups, 1);
} else if (atomic_dec_and_test(&sd->groups->ref)) {
kfree(sd->groups->sgp);
kfree(sd->groups);
}
kfree(sd);
}
static void destroy_sched_domain(struct sched_domain *sd, int cpu)
{
call_rcu(&sd->rcu, free_sched_domain);
}
static void destroy_sched_domains(struct sched_domain *sd, int cpu)
{
for (; sd; sd = sd->parent)
destroy_sched_domain(sd, cpu);
}
/*
* Keep a special pointer to the highest sched_domain that has
* SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
* allows us to avoid some pointer chasing select_idle_sibling().
*
* Also keep a unique ID per domain (we use the first cpu number in
* the cpumask of the domain), this allows us to quickly tell if
* two cpus are in the same cache domain, see cpus_share_cache().
*/
DEFINE_PER_CPU(struct sched_domain *, sd_llc);
DEFINE_PER_CPU(int, sd_llc_size);
DEFINE_PER_CPU(int, sd_llc_id);
DEFINE_PER_CPU(struct sched_domain *, sd_numa);
DEFINE_PER_CPU(struct sched_domain *, sd_busy);
DEFINE_PER_CPU(struct sched_domain *, sd_asym);
static void update_top_cache_domain(int cpu)
{
struct sched_domain *sd;
struct sched_domain *busy_sd = NULL;
int id = cpu;
int size = 1;
sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
if (sd) {
id = cpumask_first(sched_domain_span(sd));
size = cpumask_weight(sched_domain_span(sd));
busy_sd = sd->parent; /* sd_busy */
}
rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
per_cpu(sd_llc_size, cpu) = size;
per_cpu(sd_llc_id, cpu) = id;
sd = lowest_flag_domain(cpu, SD_NUMA);
rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
}
/*
* Attach the domain 'sd' to 'cpu' as its base domain. Callers must
* hold the hotplug lock.
*/
static void
cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
/* Remove the sched domains which do not contribute to scheduling. */
for (tmp = sd; tmp; ) {
struct sched_domain *parent = tmp->parent;
if (!parent)
break;
if (sd_parent_degenerate(tmp, parent)) {
tmp->parent = parent->parent;
if (parent->parent)
parent->parent->child = tmp;
/*
* Transfer SD_PREFER_SIBLING down in case of a
* degenerate parent; the spans match for this
* so the property transfers.
*/
if (parent->flags & SD_PREFER_SIBLING)
tmp->flags |= SD_PREFER_SIBLING;
destroy_sched_domain(parent, cpu);
} else
tmp = tmp->parent;
}
if (sd && sd_degenerate(sd)) {
tmp = sd;
sd = sd->parent;
destroy_sched_domain(tmp, cpu);
if (sd)
sd->child = NULL;
}
sched_domain_debug(sd, cpu);
rq_attach_root(rq, rd);
tmp = rq->sd;
rcu_assign_pointer(rq->sd, sd);
destroy_sched_domains(tmp, cpu);
update_top_cache_domain(cpu);
}
/* cpus with isolated domains */
static cpumask_var_t cpu_isolated_map;
/* Setup the mask of cpus configured for isolated domains */
static int __init isolated_cpu_setup(char *str)
{
alloc_bootmem_cpumask_var(&cpu_isolated_map);
cpulist_parse(str, cpu_isolated_map);
return 1;
}
__setup("isolcpus=", isolated_cpu_setup);
static const struct cpumask *cpu_cpu_mask(int cpu)
{
return cpumask_of_node(cpu_to_node(cpu));
}
struct sd_data {
struct sched_domain **__percpu sd;
struct sched_group **__percpu sg;
struct sched_group_power **__percpu sgp;
};
struct s_data {
struct sched_domain ** __percpu sd;
struct root_domain *rd;
};
enum s_alloc {
sa_rootdomain,
sa_sd,
sa_sd_storage,
sa_none,
};
struct sched_domain_topology_level;
typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
#define SDTL_OVERLAP 0x01
struct sched_domain_topology_level {
sched_domain_init_f init;
sched_domain_mask_f mask;
int flags;
int numa_level;
struct sd_data data;
};
/*
* Build an iteration mask that can exclude certain CPUs from the upwards
* domain traversal.
*
* Asymmetric node setups can result in situations where the domain tree is of
* unequal depth, make sure to skip domains that already cover the entire
* range.
*
* In that case build_sched_domains() will have terminated the iteration early
* and our sibling sd spans will be empty. Domains should always include the
* cpu they're built on, so check that.
*
*/
static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
{
const struct cpumask *span = sched_domain_span(sd);
struct sd_data *sdd = sd->private;
struct sched_domain *sibling;
int i;
for_each_cpu(i, span) {
sibling = *per_cpu_ptr(sdd->sd, i);
if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
continue;
cpumask_set_cpu(i, sched_group_mask(sg));
}
}
/*
* Return the canonical balance cpu for this group, this is the first cpu
* of this group that's also in the iteration mask.
*/
int group_balance_cpu(struct sched_group *sg)
{
return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
}
static int
build_overlap_sched_groups(struct sched_domain *sd, int cpu)
{
struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
const struct cpumask *span = sched_domain_span(sd);
struct cpumask *covered = sched_domains_tmpmask;
struct sd_data *sdd = sd->private;
struct sched_domain *child;
int i;
cpumask_clear(covered);
for_each_cpu(i, span) {
struct cpumask *sg_span;
if (cpumask_test_cpu(i, covered))
continue;
child = *per_cpu_ptr(sdd->sd, i);
/* See the comment near build_group_mask(). */
if (!cpumask_test_cpu(i, sched_domain_span(child)))
continue;
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, cpu_to_node(cpu));
if (!sg)
goto fail;
sg_span = sched_group_cpus(sg);
if (child->child) {
child = child->child;
cpumask_copy(sg_span, sched_domain_span(child));
} else
cpumask_set_cpu(i, sg_span);
cpumask_or(covered, covered, sg_span);
sg->sgp = *per_cpu_ptr(sdd->sgp, i);
if (atomic_inc_return(&sg->sgp->ref) == 1)
build_group_mask(sd, sg);
/*
* Initialize sgp->power such that even if we mess up the
* domains and no possible iteration will get us here, we won't
* die on a /0 trap.
*/
sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
sg->sgp->power_orig = sg->sgp->power;
/*
* Make sure the first group of this domain contains the
* canonical balance cpu. Otherwise the sched_domain iteration
* breaks. See update_sg_lb_stats().
*/
if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
group_balance_cpu(sg) == cpu)
groups = sg;
if (!first)
first = sg;
if (last)
last->next = sg;
last = sg;
last->next = first;
}
sd->groups = groups;
return 0;
fail:
free_sched_groups(first, 0);
return -ENOMEM;
}
static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
{
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
struct sched_domain *child = sd->child;
if (child)
cpu = cpumask_first(sched_domain_span(child));
if (sg) {
*sg = *per_cpu_ptr(sdd->sg, cpu);
(*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
}
return cpu;
}
/*
* build_sched_groups will build a circular linked list of the groups
* covered by the given span, and will set each group's ->cpumask correctly,
* and ->cpu_power to 0.
*
* Assumes the sched_domain tree is fully constructed
*/
static int
build_sched_groups(struct sched_domain *sd, int cpu)
{
struct sched_group *first = NULL, *last = NULL;
struct sd_data *sdd = sd->private;
const struct cpumask *span = sched_domain_span(sd);
struct cpumask *covered;
int i;
get_group(cpu, sdd, &sd->groups);
atomic_inc(&sd->groups->ref);
if (cpu != cpumask_first(span))
return 0;
lockdep_assert_held(&sched_domains_mutex);
covered = sched_domains_tmpmask;
cpumask_clear(covered);
for_each_cpu(i, span) {
struct sched_group *sg;
int group, j;
if (cpumask_test_cpu(i, covered))
continue;
group = get_group(i, sdd, &sg);
cpumask_clear(sched_group_cpus(sg));
sg->sgp->power = 0;
cpumask_setall(sched_group_mask(sg));
for_each_cpu(j, span) {
if (get_group(j, sdd, NULL) != group)
continue;
cpumask_set_cpu(j, covered);
cpumask_set_cpu(j, sched_group_cpus(sg));
}
if (!first)
first = sg;
if (last)
last->next = sg;
last = sg;
}
last->next = first;
return 0;
}
/*
* Initialize sched groups cpu_power.
*
* cpu_power indicates the capacity of sched group, which is used while
* distributing the load between different sched groups in a sched domain.
* Typically cpu_power for all the groups in a sched domain will be same unless
* there are asymmetries in the topology. If there are asymmetries, group
* having more cpu_power will pickup more load compared to the group having
* less cpu_power.
*/
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
{
struct sched_group *sg = sd->groups;
WARN_ON(!sg);
do {
sg->group_weight = cpumask_weight(sched_group_cpus(sg));
sg = sg->next;
} while (sg != sd->groups);
if (cpu != group_balance_cpu(sg))
return;
update_group_power(sd, cpu);
atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
}
int __weak arch_sd_sibling_asym_packing(void)
{
return 0*SD_ASYM_PACKING;
}
/*
* Initializers for schedule domains
* Non-inlined to reduce accumulated stack pressure in build_sched_domains()
*/
#ifdef CONFIG_SCHED_DEBUG
# define SD_INIT_NAME(sd, type) sd->name = #type
#else
# define SD_INIT_NAME(sd, type) do { } while (0)
#endif
#define SD_INIT_FUNC(type) \
static noinline struct sched_domain * \
sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
{ \
struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
*sd = SD_##type##_INIT; \
SD_INIT_NAME(sd, type); \
sd->private = &tl->data; \
return sd; \
}
SD_INIT_FUNC(CPU)
#ifdef CONFIG_SCHED_SMT
SD_INIT_FUNC(SIBLING)
#endif
#ifdef CONFIG_SCHED_MC
SD_INIT_FUNC(MC)
#endif
#ifdef CONFIG_SCHED_BOOK
SD_INIT_FUNC(BOOK)
#endif
static int default_relax_domain_level = -1;
int sched_domain_level_max;
static int __init setup_relax_domain_level(char *str)
{
if (kstrtoint(str, 0, &default_relax_domain_level))
pr_warn("Unable to set relax_domain_level\n");
return 1;
}
__setup("relax_domain_level=", setup_relax_domain_level);
static void set_domain_attribute(struct sched_domain *sd,
struct sched_domain_attr *attr)
{
int request;
if (!attr || attr->relax_domain_level < 0) {
if (default_relax_domain_level < 0)
return;
else
request = default_relax_domain_level;
} else
request = attr->relax_domain_level;
if (request < sd->level) {
/* turn off idle balance on this domain */
sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
} else {
/* turn on idle balance on this domain */
sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
}
}
static void __sdt_free(const struct cpumask *cpu_map);
static int __sdt_alloc(const struct cpumask *cpu_map);
static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
const struct cpumask *cpu_map)
{
switch (what) {
case sa_rootdomain:
if (!atomic_read(&d->rd->refcount))
free_rootdomain(&d->rd->rcu); /* fall through */
case sa_sd:
free_percpu(d->sd); /* fall through */
case sa_sd_storage:
__sdt_free(cpu_map); /* fall through */
case sa_none:
break;
}
}
static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
const struct cpumask *cpu_map)
{
memset(d, 0, sizeof(*d));
if (__sdt_alloc(cpu_map))
return sa_sd_storage;
d->sd = alloc_percpu(struct sched_domain *);
if (!d->sd)
return sa_sd_storage;
d->rd = alloc_rootdomain();
if (!d->rd)
return sa_sd;
return sa_rootdomain;
}
/*
* NULL the sd_data elements we've used to build the sched_domain and
* sched_group structure so that the subsequent __free_domain_allocs()
* will not free the data we're using.
*/
static void claim_allocations(int cpu, struct sched_domain *sd)
{
struct sd_data *sdd = sd->private;
WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
*per_cpu_ptr(sdd->sd, cpu) = NULL;
if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
*per_cpu_ptr(sdd->sg, cpu) = NULL;
if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
*per_cpu_ptr(sdd->sgp, cpu) = NULL;
}
#ifdef CONFIG_SCHED_SMT
static const struct cpumask *cpu_smt_mask(int cpu)
{
return topology_thread_cpumask(cpu);
}
#endif
/*
* Topology list, bottom-up.
*/
static struct sched_domain_topology_level default_topology[] = {
#ifdef CONFIG_SCHED_SMT
{ sd_init_SIBLING, cpu_smt_mask, },
#endif
#ifdef CONFIG_SCHED_MC
{ sd_init_MC, cpu_coregroup_mask, },
#endif
#ifdef CONFIG_SCHED_BOOK
{ sd_init_BOOK, cpu_book_mask, },
#endif
{ sd_init_CPU, cpu_cpu_mask, },
{ NULL, },
};
static struct sched_domain_topology_level *sched_domain_topology = default_topology;
#define for_each_sd_topology(tl) \
for (tl = sched_domain_topology; tl->init; tl++)
#ifdef CONFIG_NUMA
static int sched_domains_numa_levels;
static int *sched_domains_numa_distance;
static struct cpumask ***sched_domains_numa_masks;
static int sched_domains_curr_level;
static inline int sd_local_flags(int level)
{
if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
return 0;
return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
}
static struct sched_domain *
sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
{
struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
int level = tl->numa_level;
int sd_weight = cpumask_weight(
sched_domains_numa_masks[level][cpu_to_node(cpu)]);
*sd = (struct sched_domain){
.min_interval = sd_weight,
.max_interval = 2*sd_weight,
.busy_factor = 32,
.imbalance_pct = 125,
.cache_nice_tries = 2,
.busy_idx = 3,
.idle_idx = 2,
.newidle_idx = 0,
.wake_idx = 0,
.forkexec_idx = 0,
.flags = 1*SD_LOAD_BALANCE
| 1*SD_BALANCE_NEWIDLE
| 0*SD_BALANCE_EXEC
| 0*SD_BALANCE_FORK
| 0*SD_BALANCE_WAKE
| 0*SD_WAKE_AFFINE
| 0*SD_SHARE_CPUPOWER
| 0*SD_SHARE_PKG_RESOURCES
| 1*SD_SERIALIZE
| 0*SD_PREFER_SIBLING
| 1*SD_NUMA
| sd_local_flags(level)
,
.last_balance = jiffies,
.balance_interval = sd_weight,
};
SD_INIT_NAME(sd, NUMA);
sd->private = &tl->data;
/*
* Ugly hack to pass state to sd_numa_mask()...
*/
sched_domains_curr_level = tl->numa_level;
return sd;
}
static const struct cpumask *sd_numa_mask(int cpu)
{
return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
}
static void sched_numa_warn(const char *str)
{
static int done = false;
int i,j;
if (done)
return;
done = true;
printk(KERN_WARNING "ERROR: %s\n\n", str);
for (i = 0; i < nr_node_ids; i++) {
printk(KERN_WARNING " ");
for (j = 0; j < nr_node_ids; j++)
printk(KERN_CONT "%02d ", node_distance(i,j));
printk(KERN_CONT "\n");
}
printk(KERN_WARNING "\n");
}
static bool find_numa_distance(int distance)
{
int i;
if (distance == node_distance(0, 0))
return true;
for (i = 0; i < sched_domains_numa_levels; i++) {
if (sched_domains_numa_distance[i] == distance)
return true;
}
return false;
}
static void sched_init_numa(void)
{
int next_distance, curr_distance = node_distance(0, 0);
struct sched_domain_topology_level *tl;
int level = 0;
int i, j, k;
sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
if (!sched_domains_numa_distance)
return;
/*
* O(nr_nodes^2) deduplicating selection sort -- in order to find the
* unique distances in the node_distance() table.
*
* Assumes node_distance(0,j) includes all distances in
* node_distance(i,j) in order to avoid cubic time.
*/
next_distance = curr_distance;
for (i = 0; i < nr_node_ids; i++) {
for (j = 0; j < nr_node_ids; j++) {
for (k = 0; k < nr_node_ids; k++) {
int distance = node_distance(i, k);
if (distance > curr_distance &&
(distance < next_distance ||
next_distance == curr_distance))
next_distance = distance;
/*
* While not a strong assumption it would be nice to know
* about cases where if node A is connected to B, B is not
* equally connected to A.
*/
if (sched_debug() && node_distance(k, i) != distance)
sched_numa_warn("Node-distance not symmetric");
if (sched_debug() && i && !find_numa_distance(distance))
sched_numa_warn("Node-0 not representative");
}
if (next_distance != curr_distance) {
sched_domains_numa_distance[level++] = next_distance;
sched_domains_numa_levels = level;
curr_distance = next_distance;
} else break;
}
/*
* In case of sched_debug() we verify the above assumption.
*/
if (!sched_debug())
break;
}
/*
* 'level' contains the number of unique distances, excluding the
* identity distance node_distance(i,i).
*
* The sched_domains_numa_distance[] array includes the actual distance
* numbers.
*/
/*
* Here, we should temporarily reset sched_domains_numa_levels to 0.
* If it fails to allocate memory for array sched_domains_numa_masks[][],
* the array will contain less then 'level' members. This could be
* dangerous when we use it to iterate array sched_domains_numa_masks[][]
* in other functions.
*
* We reset it to 'level' at the end of this function.
*/
sched_domains_numa_levels = 0;
sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
if (!sched_domains_numa_masks)
return;
/*
* Now for each level, construct a mask per node which contains all
* cpus of nodes that are that many hops away from us.
*/
for (i = 0; i < level; i++) {
sched_domains_numa_masks[i] =
kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
if (!sched_domains_numa_masks[i])
return;
for (j = 0; j < nr_node_ids; j++) {
struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
if (!mask)
return;
sched_domains_numa_masks[i][j] = mask;
for (k = 0; k < nr_node_ids; k++) {
if (node_distance(j, k) > sched_domains_numa_distance[i])
continue;
cpumask_or(mask, mask, cpumask_of_node(k));
}
}
}
tl = kzalloc((ARRAY_SIZE(default_topology) + level) *
sizeof(struct sched_domain_topology_level), GFP_KERNEL);
if (!tl)
return;
/*
* Copy the default topology bits..
*/
for (i = 0; default_topology[i].init; i++)
tl[i] = default_topology[i];
/*
* .. and append 'j' levels of NUMA goodness.
*/
for (j = 0; j < level; i++, j++) {
tl[i] = (struct sched_domain_topology_level){
.init = sd_numa_init,
.mask = sd_numa_mask,
.flags = SDTL_OVERLAP,
.numa_level = j,
};
}
sched_domain_topology = tl;
sched_domains_numa_levels = level;
}
static void sched_domains_numa_masks_set(int cpu)
{
int i, j;
int node = cpu_to_node(cpu);
for (i = 0; i < sched_domains_numa_levels; i++) {
for (j = 0; j < nr_node_ids; j++) {
if (node_distance(j, node) <= sched_domains_numa_distance[i])
cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
}
}
}
static void sched_domains_numa_masks_clear(int cpu)
{
int i, j;
for (i = 0; i < sched_domains_numa_levels; i++) {
for (j = 0; j < nr_node_ids; j++)
cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
}
}
/*
* Update sched_domains_numa_masks[level][node] array when new cpus
* are onlined.
*/
static int sched_domains_numa_masks_update(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
sched_domains_numa_masks_set(cpu);
break;
case CPU_DEAD:
sched_domains_numa_masks_clear(cpu);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
#else
static inline void sched_init_numa(void)
{
}
static int sched_domains_numa_masks_update(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
return 0;
}
#endif /* CONFIG_NUMA */
static int __sdt_alloc(const struct cpumask *cpu_map)
{
struct sched_domain_topology_level *tl;
int j;
for_each_sd_topology(tl) {
struct sd_data *sdd = &tl->data;
sdd->sd = alloc_percpu(struct sched_domain *);
if (!sdd->sd)
return -ENOMEM;
sdd->sg = alloc_percpu(struct sched_group *);
if (!sdd->sg)
return -ENOMEM;
sdd->sgp = alloc_percpu(struct sched_group_power *);
if (!sdd->sgp)
return -ENOMEM;
for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
struct sched_group *sg;
struct sched_group_power *sgp;
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sd)
return -ENOMEM;
*per_cpu_ptr(sdd->sd, j) = sd;
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sg)
return -ENOMEM;
sg->next = sg;
*per_cpu_ptr(sdd->sg, j) = sg;
sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sgp)
return -ENOMEM;
*per_cpu_ptr(sdd->sgp, j) = sgp;
}
}
return 0;
}
static void __sdt_free(const struct cpumask *cpu_map)
{
struct sched_domain_topology_level *tl;
int j;
for_each_sd_topology(tl) {
struct sd_data *sdd = &tl->data;
for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
if (sdd->sd) {
sd = *per_cpu_ptr(sdd->sd, j);
if (sd && (sd->flags & SD_OVERLAP))
free_sched_groups(sd->groups, 0);
kfree(*per_cpu_ptr(sdd->sd, j));
}
if (sdd->sg)
kfree(*per_cpu_ptr(sdd->sg, j));
if (sdd->sgp)
kfree(*per_cpu_ptr(sdd->sgp, j));
}
free_percpu(sdd->sd);
sdd->sd = NULL;
free_percpu(sdd->sg);
sdd->sg = NULL;
free_percpu(sdd->sgp);
sdd->sgp = NULL;
}
}
struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
struct sched_domain *child, int cpu)
{
struct sched_domain *sd = tl->init(tl, cpu);
if (!sd)
return child;
cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
if (child) {
sd->level = child->level + 1;
sched_domain_level_max = max(sched_domain_level_max, sd->level);
child->parent = sd;
sd->child = child;
}
set_domain_attribute(sd, attr);
return sd;
}
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
*/
static int build_sched_domains(const struct cpumask *cpu_map,
struct sched_domain_attr *attr)
{
enum s_alloc alloc_state;
struct sched_domain *sd;
struct s_data d;
int i, ret = -ENOMEM;
alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
if (alloc_state != sa_rootdomain)
goto error;
/* Set up domains for cpus specified by the cpu_map. */
for_each_cpu(i, cpu_map) {
struct sched_domain_topology_level *tl;
sd = NULL;
for_each_sd_topology(tl) {
sd = build_sched_domain(tl, cpu_map, attr, sd, i);
if (tl == sched_domain_topology)
*per_cpu_ptr(d.sd, i) = sd;
if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
sd->flags |= SD_OVERLAP;
if (cpumask_equal(cpu_map, sched_domain_span(sd)))
break;
}
}
/* Build the groups for the domains */
for_each_cpu(i, cpu_map) {
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
sd->span_weight = cpumask_weight(sched_domain_span(sd));
if (sd->flags & SD_OVERLAP) {
if (build_overlap_sched_groups(sd, i))
goto error;
} else {
if (build_sched_groups(sd, i))
goto error;
}
}
}
/* Calculate CPU power for physical packages and nodes */
for (i = nr_cpumask_bits-1; i >= 0; i--) {
if (!cpumask_test_cpu(i, cpu_map))
continue;
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
claim_allocations(i, sd);
init_sched_groups_power(i, sd);
}
}
/* Attach the domains */
rcu_read_lock();
for_each_cpu(i, cpu_map) {
sd = *per_cpu_ptr(d.sd, i);
cpu_attach_domain(sd, d.rd, i);
}
rcu_read_unlock();
ret = 0;
error:
__free_domain_allocs(&d, alloc_state, cpu_map);
return ret;
}
static cpumask_var_t *doms_cur; /* current sched domains */
static int ndoms_cur; /* number of sched domains in 'doms_cur' */
static struct sched_domain_attr *dattr_cur;
/* attribues of custom domains in 'doms_cur' */
/*
* Special case: If a kmalloc of a doms_cur partition (array of
* cpumask) fails, then fallback to a single sched domain,
* as determined by the single cpumask fallback_doms.
*/
static cpumask_var_t fallback_doms;
/*
* arch_update_cpu_topology lets virtualized architectures update the
* cpu core maps. It is supposed to return 1 if the topology changed
* or 0 if it stayed the same.
*/
int __attribute__((weak)) arch_update_cpu_topology(void)
{
return 0;
}
cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
{
int i;
cpumask_var_t *doms;
doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
if (!doms)
return NULL;
for (i = 0; i < ndoms; i++) {
if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
free_sched_domains(doms, i);
return NULL;
}
}
return doms;
}
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
{
unsigned int i;
for (i = 0; i < ndoms; i++)
free_cpumask_var(doms[i]);
kfree(doms);
}
/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
* For now this just excludes isolated cpus, but could be used to
* exclude other special cases in the future.
*/
static int init_sched_domains(const struct cpumask *cpu_map)
{
int err;
arch_update_cpu_topology();
ndoms_cur = 1;
doms_cur = alloc_sched_domains(ndoms_cur);
if (!doms_cur)
doms_cur = &fallback_doms;
cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
err = build_sched_domains(doms_cur[0], NULL);
register_sched_domain_sysctl();
return err;
}
/*
* Detach sched domains from a group of cpus specified in cpu_map
* These cpus will now be attached to the NULL domain
*/
static void detach_destroy_domains(const struct cpumask *cpu_map)
{
int i;
rcu_read_lock();
for_each_cpu(i, cpu_map)
cpu_attach_domain(NULL, &def_root_domain, i);
rcu_read_unlock();
}
/* handle null as "default" */
static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
struct sched_domain_attr *new, int idx_new)
{
struct sched_domain_attr tmp;
/* fast path */
if (!new && !cur)
return 1;
tmp = SD_ATTR_INIT;
return !memcmp(cur ? (cur + idx_cur) : &tmp,
new ? (new + idx_new) : &tmp,
sizeof(struct sched_domain_attr));
}
/*
* Partition sched domains as specified by the 'ndoms_new'
* cpumasks in the array doms_new[] of cpumasks. This compares
* doms_new[] to the current sched domain partitioning, doms_cur[].
* It destroys each deleted domain and builds each new domain.
*
* 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
* The masks don't intersect (don't overlap.) We should setup one
* sched domain for each mask. CPUs not in any of the cpumasks will
* not be load balanced. If the same cpumask appears both in the
* current 'doms_cur' domains and in the new 'doms_new', we can leave
* it as it is.
*
* The passed in 'doms_new' should be allocated using
* alloc_sched_domains. This routine takes ownership of it and will
* free_sched_domains it when done with it. If the caller failed the
* alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
* and partition_sched_domains() will fallback to the single partition
* 'fallback_doms', it also forces the domains to be rebuilt.
*
* If doms_new == NULL it will be replaced with cpu_online_mask.
* ndoms_new == 0 is a special case for destroying existing domains,
* and it will not create the default domain.
*
* Call with hotplug lock held
*/
void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
int i, j, n;
int new_topology;
mutex_lock(&sched_domains_mutex);
/* always unregister in case we don't destroy any domains */
unregister_sched_domain_sysctl();
/* Let architecture update cpu core mappings. */
new_topology = arch_update_cpu_topology();
n = doms_new ? ndoms_new : 0;
/* Destroy deleted domains */
for (i = 0; i < ndoms_cur; i++) {
for (j = 0; j < n && !new_topology; j++) {
if (cpumask_equal(doms_cur[i], doms_new[j])
&& dattrs_equal(dattr_cur, i, dattr_new, j))
goto match1;
}
/* no match - a current sched domain not in new doms_new[] */
detach_destroy_domains(doms_cur[i]);
match1:
;
}
n = ndoms_cur;
if (doms_new == NULL) {
n = 0;
doms_new = &fallback_doms;
cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
WARN_ON_ONCE(dattr_new);
}
/* Build new domains */
for (i = 0; i < ndoms_new; i++) {
for (j = 0; j < n && !new_topology; j++) {
if (cpumask_equal(doms_new[i], doms_cur[j])
&& dattrs_equal(dattr_new, i, dattr_cur, j))
goto match2;
}
/* no match - add a new doms_new */
build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
match2:
;
}
/* Remember the new sched domains */
if (doms_cur != &fallback_doms)
free_sched_domains(doms_cur, ndoms_cur);
kfree(dattr_cur); /* kfree(NULL) is safe */
doms_cur = doms_new;
dattr_cur = dattr_new;
ndoms_cur = ndoms_new;
register_sched_domain_sysctl();
mutex_unlock(&sched_domains_mutex);
}
static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
/*
* Update cpusets according to cpu_active mask. If cpusets are
* disabled, cpuset_update_active_cpus() becomes a simple wrapper
* around partition_sched_domains().
*
* If we come here as part of a suspend/resume, don't touch cpusets because we
* want to restore it back to its original state upon resume anyway.
*/
static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
switch (action) {
case CPU_ONLINE_FROZEN:
case CPU_DOWN_FAILED_FROZEN:
/*
* num_cpus_frozen tracks how many CPUs are involved in suspend
* resume sequence. As long as this is not the last online
* operation in the resume sequence, just build a single sched
* domain, ignoring cpusets.
*/
num_cpus_frozen--;
if (likely(num_cpus_frozen)) {
partition_sched_domains(1, NULL, NULL);
break;
}
/*
* This is the last CPU online operation. So fall through and
* restore the original sched domains by considering the
* cpuset configurations.
*/
case CPU_ONLINE:
case CPU_DOWN_FAILED:
cpuset_update_active_cpus(true);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
switch (action) {
case CPU_DOWN_PREPARE:
cpuset_update_active_cpus(false);
break;
case CPU_DOWN_PREPARE_FROZEN:
num_cpus_frozen++;
partition_sched_domains(1, NULL, NULL);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
void __init sched_init_smp(void)
{
cpumask_var_t non_isolated_cpus;
alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
sched_init_numa();
/*
* There's no userspace yet to cause hotplug operations; hence all the
* cpu masks are stable and all blatant races in the below code cannot
* happen.
*/
mutex_lock(&sched_domains_mutex);
init_sched_domains(cpu_active_mask);
cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
if (cpumask_empty(non_isolated_cpus))
cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
mutex_unlock(&sched_domains_mutex);
hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
init_hrtick();
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
BUG();
sched_init_granularity();
free_cpumask_var(non_isolated_cpus);
init_sched_rt_class();
init_sched_dl_class();
}
#else
void __init sched_init_smp(void)
{
sched_init_granularity();
}
#endif /* CONFIG_SMP */
const_debug unsigned int sysctl_timer_migration = 1;
int in_sched_functions(unsigned long addr)
{
return in_lock_functions(addr) ||
(addr >= (unsigned long)__sched_text_start
&& addr < (unsigned long)__sched_text_end);
}
#ifdef CONFIG_CGROUP_SCHED
/*
* Default task group.
* Every task in system belongs to this group at bootup.
*/
struct task_group root_task_group;
LIST_HEAD(task_groups);
#endif
DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
void __init sched_init(void)
{
int i, j;
unsigned long alloc_size = 0, ptr;
#ifdef CONFIG_FAIR_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_RT_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_CPUMASK_OFFSTACK
alloc_size += num_possible_cpus() * cpumask_size();
#endif
if (alloc_size) {
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
#ifdef CONFIG_FAIR_GROUP_SCHED
root_task_group.se = (struct sched_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
root_task_group.cfs_rq = (struct cfs_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
root_task_group.rt_se = (struct sched_rt_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
root_task_group.rt_rq = (struct rt_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CPUMASK_OFFSTACK
for_each_possible_cpu(i) {
per_cpu(load_balance_mask, i) = (void *)ptr;
ptr += cpumask_size();
}
#endif /* CONFIG_CPUMASK_OFFSTACK */
}
init_rt_bandwidth(&def_rt_bandwidth,
global_rt_period(), global_rt_runtime());
init_dl_bandwidth(&def_dl_bandwidth,
global_rt_period(), global_rt_runtime());
#ifdef CONFIG_SMP
init_defrootdomain();
#endif
#ifdef CONFIG_RT_GROUP_SCHED
init_rt_bandwidth(&root_task_group.rt_bandwidth,
global_rt_period(), global_rt_runtime());
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CGROUP_SCHED
list_add(&root_task_group.list, &task_groups);
INIT_LIST_HEAD(&root_task_group.children);
INIT_LIST_HEAD(&root_task_group.siblings);
autogroup_init(&init_task);
#endif /* CONFIG_CGROUP_SCHED */
for_each_possible_cpu(i) {
struct rq *rq;
rq = cpu_rq(i);
raw_spin_lock_init(&rq->lock);
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
init_cfs_rq(&rq->cfs);
init_rt_rq(&rq->rt, rq);
init_dl_rq(&rq->dl, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
root_task_group.shares = ROOT_TASK_GROUP_LOAD;
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
/*
* How much cpu bandwidth does root_task_group get?
*
* In case of task-groups formed thr' the cgroup filesystem, it
* gets 100% of the cpu resources in the system. This overall
* system cpu resource is divided among the tasks of
* root_task_group and its child task-groups in a fair manner,
* based on each entity's (task or task-group's) weight
* (se->load.weight).
*
* In other words, if root_task_group has 10 tasks of weight
* 1024) and two child groups A0 and A1 (of weight 1024 each),
* then A0's share of the cpu resource is:
*
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
*
* We achieve this by letting root_task_group's tasks sit
* directly in rq->cfs (i.e root_task_group->se[] = NULL).
*/
init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
#endif /* CONFIG_FAIR_GROUP_SCHED */
rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
#ifdef CONFIG_RT_GROUP_SCHED
INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
#endif
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
rq->cpu_load[j] = 0;
rq->last_load_update_tick = jiffies;
#ifdef CONFIG_SMP
rq->sd = NULL;
rq->rd = NULL;
rq->cpu_power = SCHED_POWER_SCALE;
rq->post_schedule = 0;
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
rq->cpu = i;
rq->online = 0;
rq->idle_stamp = 0;
rq->avg_idle = 2*sysctl_sched_migration_cost;
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
INIT_LIST_HEAD(&rq->cfs_tasks);
rq_attach_root(rq, &def_root_domain);
#ifdef CONFIG_NO_HZ_COMMON
rq->nohz_flags = 0;
#endif
#ifdef CONFIG_NO_HZ_FULL
rq->last_sched_tick = 0;
#endif
#endif
init_rq_hrtick(rq);
atomic_set(&rq->nr_iowait, 0);
}
set_load_weight(&init_task);
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&init_task.preempt_notifiers);
#endif
/*
* The boot idle thread does lazy MMU switching as well:
*/
atomic_inc(&init_mm.mm_count);
enter_lazy_tlb(&init_mm, current);
/*
* Make us the idle thread. Technically, schedule() should not be
* called from this thread, however somewhere below it might be,
* but because we are the idle thread, we just pick up running again
* when this runqueue becomes "idle".
*/
init_idle(current, smp_processor_id());
calc_load_update = jiffies + LOAD_FREQ;
/*
* During early bootup we pretend to be a normal task:
*/
current->sched_class = &fair_sched_class;
#ifdef CONFIG_SMP
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
/* May be allocated at isolcpus cmdline parse time */
if (cpu_isolated_map == NULL)
zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
idle_thread_set_boot_cpu();
#endif
init_sched_fair_class();
scheduler_running = 1;
}
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
return (nested == preempt_offset);
}
static int __might_sleep_init_called;
int __init __might_sleep_init(void)
{
__might_sleep_init_called = 1;
return 0;
}
early_initcall(__might_sleep_init);
void __might_sleep(const char *file, int line, int preempt_offset)
{
static unsigned long prev_jiffy; /* ratelimiting */
rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
oops_in_progress)
return;
if (system_state != SYSTEM_RUNNING &&
(!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
prev_jiffy = jiffies;
printk(KERN_ERR
"BUG: sleeping function called from invalid context at %s:%d\n",
file, line);
printk(KERN_ERR
"in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
in_atomic(), irqs_disabled(),
current->pid, current->comm);
debug_show_held_locks(current);
if (irqs_disabled())
print_irqtrace_events(current);
dump_stack();
}
EXPORT_SYMBOL(__might_sleep);
#endif
#ifdef CONFIG_MAGIC_SYSRQ
static void normalize_task(struct rq *rq, struct task_struct *p)
{
const struct sched_class *prev_class = p->sched_class;
struct sched_attr attr = {
.sched_policy = SCHED_NORMAL,
};
int old_prio = p->prio;
int on_rq;
on_rq = p->on_rq;
if (on_rq)
dequeue_task(rq, p, 0);
__setscheduler(rq, p, &attr);
if (on_rq) {
enqueue_task(rq, p, 0);
resched_task(rq->curr);
}
check_class_changed(rq, p, prev_class, old_prio);
}
void normalize_rt_tasks(void)
{
struct task_struct *g, *p;
unsigned long flags;
struct rq *rq;
read_lock_irqsave(&tasklist_lock, flags);
do_each_thread(g, p) {
/*
* Only normalize user tasks:
*/
if (!p->mm)
continue;
p->se.exec_start = 0;
#ifdef CONFIG_SCHEDSTATS
p->se.statistics.wait_start = 0;
p->se.statistics.sleep_start = 0;
p->se.statistics.block_start = 0;
#endif
if (!dl_task(p) && !rt_task(p)) {
/*
* Renice negative nice level userspace
* tasks back to 0:
*/
if (TASK_NICE(p) < 0 && p->mm)
set_user_nice(p, 0);
continue;
}
raw_spin_lock(&p->pi_lock);
rq = __task_rq_lock(p);
normalize_task(rq, p);
__task_rq_unlock(rq);
raw_spin_unlock(&p->pi_lock);
} while_each_thread(g, p);
read_unlock_irqrestore(&tasklist_lock, flags);
}
#endif /* CONFIG_MAGIC_SYSRQ */
#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
/*
* These functions are only useful for the IA64 MCA handling, or kdb.
*
* They can only be called when the whole system has been
* stopped - every CPU needs to be quiescent, and no scheduling
* activity can take place. Using them for anything else would
* be a serious bug, and as a result, they aren't even visible
* under any other configuration.
*/
/**
* curr_task - return the current task for a given cpu.
* @cpu: the processor in question.
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
*
* Return: The current task for @cpu.
*/
struct task_struct *curr_task(int cpu)
{
return cpu_curr(cpu);
}
#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
#ifdef CONFIG_IA64
/**
* set_curr_task - set the current task for a given cpu.
* @cpu: the processor in question.
* @p: the task pointer to set.
*
* Description: This function must only be used when non-maskable interrupts
* are serviced on a separate stack. It allows the architecture to switch the
* notion of the current task on a cpu in a non-blocking manner. This function
* must be called with all CPU's synchronized, and interrupts disabled, the
* and caller must save the original value of the current task (see
* curr_task() above) and restore that value before reenabling interrupts and
* re-starting the system.
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
*/
void set_curr_task(int cpu, struct task_struct *p)
{
cpu_curr(cpu) = p;
}
#endif
#ifdef CONFIG_CGROUP_SCHED
/* task_group_lock serializes the addition/removal of task groups */
static DEFINE_SPINLOCK(task_group_lock);
static void free_sched_group(struct task_group *tg)
{
free_fair_sched_group(tg);
free_rt_sched_group(tg);
autogroup_free(tg);
kfree(tg);
}
/* allocate runqueue etc for a new task group */
struct task_group *sched_create_group(struct task_group *parent)
{
struct task_group *tg;
tg = kzalloc(sizeof(*tg), GFP_KERNEL);
if (!tg)
return ERR_PTR(-ENOMEM);
if (!alloc_fair_sched_group(tg, parent))
goto err;
if (!alloc_rt_sched_group(tg, parent))
goto err;
return tg;
err:
free_sched_group(tg);
return ERR_PTR(-ENOMEM);
}
void sched_online_group(struct task_group *tg, struct task_group *parent)
{
unsigned long flags;
spin_lock_irqsave(&task_group_lock, flags);
list_add_rcu(&tg->list, &task_groups);
WARN_ON(!parent); /* root should already exist */
tg->parent = parent;
INIT_LIST_HEAD(&tg->children);
list_add_rcu(&tg->siblings, &parent->children);
spin_unlock_irqrestore(&task_group_lock, flags);
}
/* rcu callback to free various structures associated with a task group */
static void free_sched_group_rcu(struct rcu_head *rhp)
{
/* now it should be safe to free those cfs_rqs */
free_sched_group(container_of(rhp, struct task_group, rcu));
}
/* Destroy runqueue etc associated with a task group */
void sched_destroy_group(struct task_group *tg)
{
/* wait for possible concurrent references to cfs_rqs complete */
call_rcu(&tg->rcu, free_sched_group_rcu);
}
void sched_offline_group(struct task_group *tg)
{
unsigned long flags;
int i;
/* end participation in shares distribution */
for_each_possible_cpu(i)
unregister_fair_sched_group(tg, i);
spin_lock_irqsave(&task_group_lock, flags);
list_del_rcu(&tg->list);
list_del_rcu(&tg->siblings);
spin_unlock_irqrestore(&task_group_lock, flags);
}
/* change task's runqueue when it moves between groups.
* The caller of this function should have put the task in its new group
* by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
* reflect its new group.
*/
void sched_move_task(struct task_struct *tsk)
{
struct task_group *tg;
int on_rq, running;
unsigned long flags;
struct rq *rq;
rq = task_rq_lock(tsk, &flags);
running = task_current(rq, tsk);
on_rq = tsk->on_rq;
if (on_rq)
dequeue_task(rq, tsk, 0);
if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk);
tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id,
lockdep_is_held(&tsk->sighand->siglock)),
struct task_group, css);
tg = autogroup_task_group(tsk, tg);
tsk->sched_task_group = tg;
#ifdef CONFIG_FAIR_GROUP_SCHED
if (tsk->sched_class->task_move_group)
tsk->sched_class->task_move_group(tsk, on_rq);
else
#endif
set_task_rq(tsk, task_cpu(tsk));
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
if (on_rq)
enqueue_task(rq, tsk, 0);
task_rq_unlock(rq, tsk, &flags);
}
#endif /* CONFIG_CGROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Ensure that the real time constraints are schedulable.
*/
static DEFINE_MUTEX(rt_constraints_mutex);
/* Must be called with tasklist_lock held */
static inline int tg_has_rt_tasks(struct task_group *tg)
{
struct task_struct *g, *p;
do_each_thread(g, p) {
if (rt_task(p) && task_rq(p)->rt.tg == tg)
return 1;
} while_each_thread(g, p);
return 0;
}
struct rt_schedulable_data {
struct task_group *tg;
u64 rt_period;
u64 rt_runtime;
};
static int tg_rt_schedulable(struct task_group *tg, void *data)
{
struct rt_schedulable_data *d = data;
struct task_group *child;
unsigned long total, sum = 0;
u64 period, runtime;
period = ktime_to_ns(tg->rt_bandwidth.rt_period);
runtime = tg->rt_bandwidth.rt_runtime;
if (tg == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}
/*
* Cannot have more runtime than the period.
*/
if (runtime > period && runtime != RUNTIME_INF)
return -EINVAL;
/*
* Ensure we don't starve existing RT tasks.
*/
if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
return -EBUSY;
total = to_ratio(period, runtime);
/*
* Nobody can have more than the global setting allows.
*/
if (total > to_ratio(global_rt_period(), global_rt_runtime()))
return -EINVAL;
/*
* The sum of our children's runtime should not exceed our own.
*/
list_for_each_entry_rcu(child, &tg->children, siblings) {
period = ktime_to_ns(child->rt_bandwidth.rt_period);
runtime = child->rt_bandwidth.rt_runtime;
if (child == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}
sum += to_ratio(period, runtime);
}
if (sum > total)
return -EINVAL;
return 0;
}
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
int ret;
struct rt_schedulable_data data = {
.tg = tg,
.rt_period = period,
.rt_runtime = runtime,
};
rcu_read_lock();
ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
rcu_read_unlock();
return ret;
}
static int tg_set_rt_bandwidth(struct task_group *tg,
u64 rt_period, u64 rt_runtime)
{
int i, err = 0;
mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
err = __rt_schedulable(tg, rt_period, rt_runtime);
if (err)
goto unlock;
raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i];
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
return err;
}
static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
{
u64 rt_runtime, rt_period;
rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
if (rt_runtime_us < 0)
rt_runtime = RUNTIME_INF;
return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}
static long sched_group_rt_runtime(struct task_group *tg)
{
u64 rt_runtime_us;
if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
return -1;
rt_runtime_us = tg->rt_bandwidth.rt_runtime;
do_div(rt_runtime_us, NSEC_PER_USEC);
return rt_runtime_us;
}
static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
{
u64 rt_runtime, rt_period;
rt_period = (u64)rt_period_us * NSEC_PER_USEC;
rt_runtime = tg->rt_bandwidth.rt_runtime;
if (rt_period == 0)
return -EINVAL;
return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}
static long sched_group_rt_period(struct task_group *tg)
{
u64 rt_period_us;
rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
do_div(rt_period_us, NSEC_PER_USEC);
return rt_period_us;
}
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static int sched_rt_global_constraints(void)
{
int ret = 0;
mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
ret = __rt_schedulable(NULL, 0, 0);
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
return ret;
}
static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
{
/* Don't accept realtime tasks when there is no way for them to run */
if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
return 0;
return 1;
}
#else /* !CONFIG_RT_GROUP_SCHED */
static int sched_rt_global_constraints(void)
{
unsigned long flags;
int i, ret = 0;
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = global_rt_runtime();
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return ret;
}
#endif /* CONFIG_RT_GROUP_SCHED */
static int sched_dl_global_constraints(void)
{
u64 runtime = global_rt_runtime();
u64 period = global_rt_period();
u64 new_bw = to_ratio(period, runtime);
int cpu, ret = 0;
unsigned long flags;
rcu_read_lock();
/*
* Here we want to check the bandwidth not being set to some
* value smaller than the currently allocated bandwidth in
* any of the root_domains.
*
* FIXME: Cycling on all the CPUs is overdoing, but simpler than
* cycling on root_domains... Discussion on different/better
* solutions is welcome!
*/
for_each_possible_cpu(cpu) {
struct dl_bw *dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
if (new_bw < dl_b->total_bw)
ret = -EBUSY;
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
if (ret)
break;
}
rcu_read_unlock();
return ret;
}
static void sched_dl_do_global(void)
{
u64 new_bw = -1;
int cpu;
unsigned long flags;
def_dl_bandwidth.dl_period = global_rt_period();
def_dl_bandwidth.dl_runtime = global_rt_runtime();
if (global_rt_runtime() != RUNTIME_INF)
new_bw = to_ratio(global_rt_period(), global_rt_runtime());
rcu_read_lock();
/*
* FIXME: As above...
*/
for_each_possible_cpu(cpu) {
struct dl_bw *dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
dl_b->bw = new_bw;
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
}
rcu_read_unlock();
}
static int sched_rt_global_validate(void)
{
if (sysctl_sched_rt_period <= 0)
return -EINVAL;
if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
(sysctl_sched_rt_runtime > sysctl_sched_rt_period))
return -EINVAL;
return 0;
}
static void sched_rt_do_global(void)
{
def_rt_bandwidth.rt_runtime = global_rt_runtime();
def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
}
int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int old_period, old_runtime;
static DEFINE_MUTEX(mutex);
int ret;
mutex_lock(&mutex);
old_period = sysctl_sched_rt_period;
old_runtime = sysctl_sched_rt_runtime;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write) {
ret = sched_rt_global_validate();
if (ret)
goto undo;
ret = sched_rt_global_constraints();
if (ret)
goto undo;
ret = sched_dl_global_constraints();
if (ret)
goto undo;
sched_rt_do_global();
sched_dl_do_global();
}
if (0) {
undo:
sysctl_sched_rt_period = old_period;
sysctl_sched_rt_runtime = old_runtime;
}
mutex_unlock(&mutex);
return ret;
}
int sched_rr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
static DEFINE_MUTEX(mutex);
mutex_lock(&mutex);
ret = proc_dointvec(table, write, buffer, lenp, ppos);
/* make sure that internally we keep jiffies */
/* also, writing zero resets timeslice to default */
if (!ret && write) {
sched_rr_timeslice = sched_rr_timeslice <= 0 ?
RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
}
mutex_unlock(&mutex);
return ret;
}
#ifdef CONFIG_CGROUP_SCHED
static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct task_group, css) : NULL;
}
static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct task_group *parent = css_tg(parent_css);
struct task_group *tg;
if (!parent) {
/* This is early initialization for the top cgroup */
return &root_task_group.css;
}
tg = sched_create_group(parent);
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
return &tg->css;
}
static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
struct task_group *parent = css_tg(css_parent(css));
if (parent)
sched_online_group(tg, parent);
return 0;
}
static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
sched_destroy_group(tg);
}
static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
sched_offline_group(tg);
}
static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
struct task_struct *task;
cgroup_taskset_for_each(task, css, tset) {
#ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(css_tg(css), task))
return -EINVAL;
#else
/* We don't support RT-tasks being in separate groups */
if (task->sched_class != &fair_sched_class)
return -EINVAL;
#endif
}
return 0;
}
static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
struct task_struct *task;
cgroup_taskset_for_each(task, css, tset)
sched_move_task(task);
}
static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
struct cgroup_subsys_state *old_css,
struct task_struct *task)
{
/*
* cgroup_exit() is called in the copy_process() failure path.
* Ignore this case since the task hasn't ran yet, this avoids
* trying to poke a half freed task state from generic code.
*/
if (!(task->flags & PF_EXITING))
return;
sched_move_task(task);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 shareval)
{
return sched_group_set_shares(css_tg(css), scale_load(shareval));
}
static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
struct task_group *tg = css_tg(css);
return (u64) scale_load_down(tg->shares);
}
#ifdef CONFIG_CFS_BANDWIDTH
static DEFINE_MUTEX(cfs_constraints_mutex);
const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
{
int i, ret = 0, runtime_enabled, runtime_was_enabled;
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
if (tg == &root_task_group)
return -EINVAL;
/*
* Ensure we have at some amount of bandwidth every period. This is
* to prevent reaching a state of large arrears when throttled via
* entity_tick() resulting in prolonged exit starvation.
*/
if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
return -EINVAL;
/*
* Likewise, bound things on the otherside by preventing insane quota
* periods. This also allows us to normalize in computing quota
* feasibility.
*/
if (period > max_cfs_quota_period)
return -EINVAL;
mutex_lock(&cfs_constraints_mutex);
ret = __cfs_schedulable(tg, period, quota);
if (ret)
goto out_unlock;
runtime_enabled = quota != RUNTIME_INF;
runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
/*
* If we need to toggle cfs_bandwidth_used, off->on must occur
* before making related changes, and on->off must occur afterwards
*/
if (runtime_enabled && !runtime_was_enabled)
cfs_bandwidth_usage_inc();
raw_spin_lock_irq(&cfs_b->lock);
cfs_b->period = ns_to_ktime(period);
cfs_b->quota = quota;
__refill_cfs_bandwidth_runtime(cfs_b);
/* restart the period timer (if active) to handle new period expiry */
if (runtime_enabled && cfs_b->timer_active) {
/* force a reprogram */
cfs_b->timer_active = 0;
__start_cfs_bandwidth(cfs_b);
}
raw_spin_unlock_irq(&cfs_b->lock);
for_each_possible_cpu(i) {
struct cfs_rq *cfs_rq = tg->cfs_rq[i];
struct rq *rq = cfs_rq->rq;
raw_spin_lock_irq(&rq->lock);
cfs_rq->runtime_enabled = runtime_enabled;
cfs_rq->runtime_remaining = 0;
if (cfs_rq->throttled)
unthrottle_cfs_rq(cfs_rq);
raw_spin_unlock_irq(&rq->lock);
}
if (runtime_was_enabled && !runtime_enabled)
cfs_bandwidth_usage_dec();
out_unlock:
mutex_unlock(&cfs_constraints_mutex);
return ret;
}
int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
{
u64 quota, period;
period = ktime_to_ns(tg->cfs_bandwidth.period);
if (cfs_quota_us < 0)
quota = RUNTIME_INF;
else
quota = (u64)cfs_quota_us * NSEC_PER_USEC;
return tg_set_cfs_bandwidth(tg, period, quota);
}
long tg_get_cfs_quota(struct task_group *tg)
{
u64 quota_us;
if (tg->cfs_bandwidth.quota == RUNTIME_INF)
return -1;
quota_us = tg->cfs_bandwidth.quota;
do_div(quota_us, NSEC_PER_USEC);
return quota_us;
}
int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
{
u64 quota, period;
period = (u64)cfs_period_us * NSEC_PER_USEC;
quota = tg->cfs_bandwidth.quota;
return tg_set_cfs_bandwidth(tg, period, quota);
}
long tg_get_cfs_period(struct task_group *tg)
{
u64 cfs_period_us;
cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
do_div(cfs_period_us, NSEC_PER_USEC);
return cfs_period_us;
}
static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
return tg_get_cfs_quota(css_tg(css));
}
static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
struct cftype *cftype, s64 cfs_quota_us)
{
return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
}
static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
return tg_get_cfs_period(css_tg(css));
}
static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 cfs_period_us)
{
return tg_set_cfs_period(css_tg(css), cfs_period_us);
}
struct cfs_schedulable_data {
struct task_group *tg;
u64 period, quota;
};
/*
* normalize group quota/period to be quota/max_period
* note: units are usecs
*/
static u64 normalize_cfs_quota(struct task_group *tg,
struct cfs_schedulable_data *d)
{
u64 quota, period;
if (tg == d->tg) {
period = d->period;
quota = d->quota;
} else {
period = tg_get_cfs_period(tg);
quota = tg_get_cfs_quota(tg);
}
/* note: these should typically be equivalent */
if (quota == RUNTIME_INF || quota == -1)
return RUNTIME_INF;
return to_ratio(period, quota);
}
static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
{
struct cfs_schedulable_data *d = data;
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
s64 quota = 0, parent_quota = -1;
if (!tg->parent) {
quota = RUNTIME_INF;
} else {
struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
quota = normalize_cfs_quota(tg, d);
parent_quota = parent_b->hierarchal_quota;
/*
* ensure max(child_quota) <= parent_quota, inherit when no
* limit is set
*/
if (quota == RUNTIME_INF)
quota = parent_quota;
else if (parent_quota != RUNTIME_INF && quota > parent_quota)
return -EINVAL;
}
cfs_b->hierarchal_quota = quota;
return 0;
}
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
{
int ret;
struct cfs_schedulable_data data = {
.tg = tg,
.period = period,
.quota = quota,
};
if (quota != RUNTIME_INF) {
do_div(data.period, NSEC_PER_USEC);
do_div(data.quota, NSEC_PER_USEC);
}
rcu_read_lock();
ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
rcu_read_unlock();
return ret;
}
static int cpu_stats_show(struct seq_file *sf, void *v)
{
struct task_group *tg = css_tg(seq_css(sf));
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
return 0;
}
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
struct cftype *cft, s64 val)
{
return sched_group_set_rt_runtime(css_tg(css), val);
}
static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
return sched_group_rt_runtime(css_tg(css));
}
static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 rt_period_us)
{
return sched_group_set_rt_period(css_tg(css), rt_period_us);
}
static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
struct cftype *cft)
{
return sched_group_rt_period(css_tg(css));
}
#endif /* CONFIG_RT_GROUP_SCHED */
static struct cftype cpu_files[] = {
#ifdef CONFIG_FAIR_GROUP_SCHED
{
.name = "shares",
.read_u64 = cpu_shares_read_u64,
.write_u64 = cpu_shares_write_u64,
},
#endif
#ifdef CONFIG_CFS_BANDWIDTH
{
.name = "cfs_quota_us",
.read_s64 = cpu_cfs_quota_read_s64,
.write_s64 = cpu_cfs_quota_write_s64,
},
{
.name = "cfs_period_us",
.read_u64 = cpu_cfs_period_read_u64,
.write_u64 = cpu_cfs_period_write_u64,
},
{
.name = "stat",
.seq_show = cpu_stats_show,
},
#endif
#ifdef CONFIG_RT_GROUP_SCHED
{
.name = "rt_runtime_us",
.read_s64 = cpu_rt_runtime_read,
.write_s64 = cpu_rt_runtime_write,
},
{
.name = "rt_period_us",
.read_u64 = cpu_rt_period_read_uint,
.write_u64 = cpu_rt_period_write_uint,
},
#endif
{ } /* terminate */
};
struct cgroup_subsys cpu_cgroup_subsys = {
.name = "cpu",
.css_alloc = cpu_cgroup_css_alloc,
.css_free = cpu_cgroup_css_free,
.css_online = cpu_cgroup_css_online,
.css_offline = cpu_cgroup_css_offline,
.can_attach = cpu_cgroup_can_attach,
.attach = cpu_cgroup_attach,
.allow_attach = subsys_cgroup_allow_attach,
.exit = cpu_cgroup_exit,
.subsys_id = cpu_cgroup_subsys_id,
.base_cftypes = cpu_files,
.early_init = 1,
};
#endif /* CONFIG_CGROUP_SCHED */
void dump_cpu_task(int cpu)
{
pr_info("Task dump for CPU %d:\n", cpu);
sched_show_task(cpu_curr(cpu));
}
| gpl-2.0 |
Evonline/ManaPlus | src/gui/ministatuswindow.cpp | 2 | 13566 | /*
* The ManaPlus Client
* Copyright (C) 2004-2009 The Mana World Development Team
* Copyright (C) 2009-2010 The Mana Developers
* Copyright (C) 2011-2012 The ManaPlus Developers
*
* This file is part of The ManaPlus Client.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "gui/ministatuswindow.h"
#include "animatedsprite.h"
#include "configuration.h"
#include "graphics.h"
#include "playerinfo.h"
#include "gui/chatwindow.h"
#include "gui/gui.h"
#include "gui/statuswindow.h"
#include "gui/statuspopup.h"
#include "gui/textpopup.h"
#include "gui/theme.h"
#include "gui/viewport.h"
#include "gui/widgets/chattab.h"
#include "gui/widgets/label.h"
#include "gui/widgets/progressbar.h"
#include "net/net.h"
#include "net/playerhandler.h"
#include "net/gamehandler.h"
#include "utils/dtor.h"
#include "utils/gettext.h"
#include "utils/stringutils.h"
#include "debug.h"
extern volatile int tick_time;
MiniStatusWindow::MiniStatusWindow():
Popup("MiniStatus", "ministatus.xml")
{
listen(CHANNEL_ATTRIBUTES);
mHpBar = createBar(0, 100, 20, Theme::PROG_HP, "hp bar", _("health bar"));
StatusWindow::updateHPBar(mHpBar);
if (Net::getGameHandler()->canUseMagicBar())
{
mMpBar = createBar(0, 100, 20, Net::getPlayerHandler()->canUseMagic()
? Theme::PROG_MP : Theme::PROG_NO_MP, "mp bar", _("mana bar"));
StatusWindow::updateMPBar(mMpBar);
}
else
{
mMpBar = nullptr;
}
int job = Net::getPlayerHandler()->getJobLocation()
&& serverConfig.getValueBool("showJob", false);
mXpBar = createBar(0, 100, 20, Theme::PROG_EXP,
"xp bar", _("experience bar"));
StatusWindow::updateXPBar(mXpBar);
if (job)
{
mJobBar = createBar(0, 100, 20, Theme::PROG_JOB, "job bar",
_("job bar"));
StatusWindow::updateJobBar(mJobBar);
}
else
{
mJobBar = nullptr;
}
mWeightBar = createBar(0, 140, 20, Theme::PROG_WEIGHT,
"weight bar", _("weight bar"));
mInvSlotsBar = createBar(0, 45, 20, Theme::PROG_INVY_SLOTS,
"inventory slots bar", _("inventory slots bar"));
mMoneyBar = createBar(0, 130, 20, Theme::PROG_INVY_SLOTS,
"money bar", _("money bar"));
mArrowsBar = createBar(0, 50, 20, Theme::PROG_INVY_SLOTS,
"arrows bar", _("arrows bar"));
mStatusBar = createBar(100, 165, 20, Theme::PROG_EXP,
"status bar", _("status bar"));
loadBars();
updateBars();
setVisible(config.getValueBool(getPopupName() + "Visible", true));
mStatusPopup = new StatusPopup();
mTextPopup = new TextPopup();
addMouseListener(this);
Inventory *inv = PlayerInfo::getInventory();
if (inv)
inv->addInventoyListener(this);
StatusWindow::updateMoneyBar(mMoneyBar);
StatusWindow::updateArrowsBar(mArrowsBar);
updateStatus();
}
MiniStatusWindow::~MiniStatusWindow()
{
delete mTextPopup;
mTextPopup = nullptr;
delete mStatusPopup;
mStatusPopup = nullptr;
delete_all(mIcons);
mIcons.clear();
Inventory *inv = PlayerInfo::getInventory();
if (inv)
inv->removeInventoyListener(this);
for (std::vector <ProgressBar*>::const_iterator it = mBars.begin(),
it_end = mBars.end(); it != it_end; ++it)
{
ProgressBar *bar = *it;
if (!bar)
continue;
if (!bar->isVisible())
delete bar;
}
}
ProgressBar *MiniStatusWindow::createBar(float progress, int width, int height,
int color, std::string name,
std::string description)
{
ProgressBar *bar = new ProgressBar(progress, width, height, color);
bar->setActionEventId(name);
bar->setId(description);
mBars.push_back(bar);
mBarNames[name] = bar;
return bar;
}
void MiniStatusWindow::updateBars()
{
int x = 0;
ProgressBar* lastBar = nullptr;
for (std::vector <ProgressBar*>::const_iterator it = mBars.begin(),
it_end = mBars.end(); it != it_end; ++it)
{
safeRemove(*it);
}
for (std::vector <ProgressBar*>::const_iterator it = mBars.begin(),
it_end = mBars.end(); it != it_end; ++it)
{
ProgressBar *bar = *it;
if (!bar)
continue;
if (bar->isVisible())
{
bar->setPosition(x, 3);
add(bar);
x += bar->getWidth() + 3;
lastBar = bar;
}
}
if (lastBar)
{
setContentSize(lastBar->getX() + lastBar->getWidth(),
lastBar->getY() + lastBar->getHeight());
}
}
void MiniStatusWindow::setIcon(int index, AnimatedSprite *sprite)
{
if (index >= static_cast<int>(mIcons.size()))
mIcons.resize(index + 1, nullptr);
delete mIcons[index];
mIcons[index] = sprite;
}
void MiniStatusWindow::eraseIcon(int index)
{
if (index < static_cast<int>(mIcons.size()))
{
delete mIcons[index];
mIcons.erase(mIcons.begin() + index);
}
}
void MiniStatusWindow::drawIcons(Graphics *graphics)
{
// Draw icons
int icon_x = mStatusBar->getX() + mStatusBar->getWidth() + 4;
for (unsigned int i = 0; i < mIcons.size(); i++)
{
if (mIcons[i])
{
mIcons[i]->draw(graphics, icon_x, 3);
icon_x += 2 + mIcons[i]->getWidth();
}
}
}
void MiniStatusWindow::processEvent(Channels channel A_UNUSED,
const DepricatedEvent &event)
{
if (event.getName() == EVENT_UPDATEATTRIBUTE)
{
int id = event.getInt("id");
if (id == HP || id == MAX_HP)
StatusWindow::updateHPBar(mHpBar);
else if (id == MP || id == MAX_MP)
StatusWindow::updateMPBar(mMpBar);
else if (id == EXP || id == EXP_NEEDED)
StatusWindow::updateXPBar(mXpBar);
else if (id == TOTAL_WEIGHT || id == MAX_WEIGHT)
StatusWindow::updateWeightBar(mWeightBar);
else if (id == MONEY)
StatusWindow::updateMoneyBar(mMoneyBar);
}
else if (event.getName() == EVENT_UPDATESTAT)
{
StatusWindow::updateMPBar(mMpBar);
StatusWindow::updateJobBar(mJobBar);
}
}
void MiniStatusWindow::updateStatus()
{
StatusWindow::updateStatusBar(mStatusBar);
if (mStatusPopup && mStatusPopup->isVisible())
mStatusPopup->update();
}
void MiniStatusWindow::logic()
{
Popup::logic();
for (unsigned int i = 0; i < mIcons.size(); i++)
{
if (mIcons[i])
mIcons[i]->update(tick_time * 10);
}
}
void MiniStatusWindow::draw(gcn::Graphics *graphics)
{
drawChildren(graphics);
}
void MiniStatusWindow::mouseMoved(gcn::MouseEvent &event)
{
Popup::mouseMoved(event);
const int x = event.getX();
const int y = event.getY();
if (event.getSource() == mStatusBar)
{
mStatusPopup->view(x + getX(), y + getY());
mTextPopup->hide();
}
else if (event.getSource() == mXpBar)
{
if (PlayerInfo::getAttribute(EXP)
> PlayerInfo::getAttribute(EXP_NEEDED))
{
mTextPopup->show(x + getX(), y + getY(),
event.getSource()->getId(),
strprintf("%u/%u", PlayerInfo::getAttribute(EXP),
PlayerInfo::getAttribute(EXP_NEEDED)));
}
else
{
mTextPopup->show(x + getX(), y + getY(),
event.getSource()->getId(),
strprintf("%u/%u", PlayerInfo::getAttribute(EXP),
PlayerInfo::getAttribute(EXP_NEEDED)),
strprintf("%s: %u", _("Need"),
PlayerInfo::getAttribute(EXP_NEEDED)
- PlayerInfo::getAttribute(EXP)));
}
mStatusPopup->hide();
}
else if (event.getSource() == mHpBar)
{
mTextPopup->show(x + getX(), y + getY(),
event.getSource()->getId(),
strprintf("%u/%u", PlayerInfo::getAttribute(HP),
PlayerInfo::getAttribute(MAX_HP)));
mStatusPopup->hide();
}
else if (event.getSource() == mMpBar)
{
mTextPopup->show(x + getX(), y + getY(),
event.getSource()->getId(),
strprintf("%u/%u", PlayerInfo::getAttribute(MP),
PlayerInfo::getAttribute(MAX_MP)));
mStatusPopup->hide();
}
else if (event.getSource() == mJobBar)
{
std::pair<int, int> exp = PlayerInfo::getStatExperience(
Net::getPlayerHandler()->getJobLocation());
if (exp.first > exp.second)
{
mTextPopup->show(x + getX(), y + getY(),
event.getSource()->getId(),
strprintf("%u/%u", exp.first,
exp.second));
}
else
{
mTextPopup->show(x + getX(), y + getY(),
event.getSource()->getId(),
strprintf("%u/%u", exp.first,
exp.second),
strprintf("%s: %u", _("Need"),
exp.second
- exp.first));
}
mStatusPopup->hide();
}
else if (event.getSource() == mWeightBar)
{
mTextPopup->show(x + getX(), y + getY(),
event.getSource()->getId(),
strprintf("%u/%u", PlayerInfo::getAttribute(TOTAL_WEIGHT),
PlayerInfo::getAttribute(MAX_WEIGHT)));
mStatusPopup->hide();
}
else if (event.getSource() == mInvSlotsBar)
{
Inventory *inv = PlayerInfo::getInventory();
if (inv)
{
const int usedSlots = inv->getNumberOfSlotsUsed();
const int maxSlots = inv->getSize();
mTextPopup->show(x + getX(), y + getY(),
event.getSource()->getId(),
strprintf("%u/%u", usedSlots, maxSlots));
}
mStatusPopup->hide();
}
else if (event.getSource() == mMoneyBar)
{
mTextPopup->show(x + getX(), y + getY(),
event.getSource()->getId(),
toString(PlayerInfo::getAttribute(MONEY)));
}
else
{
mTextPopup->hide();
mStatusPopup->hide();
}
}
void MiniStatusWindow::mousePressed(gcn::MouseEvent &event)
{
if (!viewport)
return;
if (event.getButton() == gcn::MouseEvent::RIGHT)
{
ProgressBar *bar = dynamic_cast<ProgressBar*>(event.getSource());
if (!bar)
return;
if (viewport)
{
viewport->showPopup(getX() + event.getX(),
getY() + event.getY(), bar);
}
}
}
void MiniStatusWindow::mouseExited(gcn::MouseEvent &event)
{
Popup::mouseExited(event);
mTextPopup->hide();
mStatusPopup->hide();
}
void MiniStatusWindow::showBar(std::string name, bool visible)
{
ProgressBar *bar = mBarNames[name];
if (!bar)
return;
bar->setVisible(visible);
updateBars();
saveBars();
}
void MiniStatusWindow::loadBars()
{
if (!config.getValue("ministatussaved", 0))
{
if (mWeightBar)
mWeightBar->setVisible(false);
if (mInvSlotsBar)
mInvSlotsBar->setVisible(false);
if (mMoneyBar)
mMoneyBar->setVisible(false);
if (mArrowsBar)
mArrowsBar->setVisible(false);
return;
}
for (int f = 0; f < 10; f ++)
{
std::string str = config.getValue("ministatus" + toString(f), "");
if (str == "" || str == "status bar")
continue;
ProgressBar *bar = mBarNames[str];
if (!bar)
continue;
bar->setVisible(false);
}
}
void MiniStatusWindow::saveBars()
{
int i = 0;
for (std::vector <ProgressBar*>::const_iterator it = mBars.begin(),
it_end = mBars.end(); it != it_end; ++it)
{
ProgressBar *bar = *it;
if (!bar->isVisible())
{
config.setValue("ministatus" + toString(i),
bar->getActionEventId());
i ++;
}
}
for (int f = i; f < 10; f ++)
config.deleteKey("ministatus" + toString(f));
config.setValue("ministatussaved", true);
}
void MiniStatusWindow::slotsChanged(Inventory* inventory)
{
if (!inventory)
return;
if (inventory->getType() == Inventory::INVENTORY)
StatusWindow::updateInvSlotsBar(mInvSlotsBar);
}
void MiniStatusWindow::updateArrows()
{
StatusWindow::updateArrowsBar(mArrowsBar);
}
| gpl-2.0 |
teamfx/openjfx-10-dev-rt | modules/javafx.web/src/main/native/Source/WebCore/css/FontFace.cpp | 2 | 14717 | /*
* Copyright (C) 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "FontFace.h"
#include "CSSFontFaceSource.h"
#include "CSSFontFeatureValue.h"
#include "CSSParser.h"
#include "CSSUnicodeRangeValue.h"
#include "CSSValueList.h"
#include "CSSValuePool.h"
#include "Document.h"
#include "FontVariantBuilder.h"
#include "JSFontFace.h"
#include "StyleProperties.h"
#include <runtime/ArrayBuffer.h>
#include <runtime/ArrayBufferView.h>
#include <runtime/JSCInlines.h>
namespace WebCore {
static bool populateFontFaceWithArrayBuffer(CSSFontFace& fontFace, Ref<JSC::ArrayBufferView>&& arrayBufferView)
{
auto source = std::make_unique<CSSFontFaceSource>(fontFace, String(), nullptr, nullptr, WTFMove(arrayBufferView));
fontFace.adoptSource(WTFMove(source));
return false;
}
ExceptionOr<Ref<FontFace>> FontFace::create(Document& document, const String& family, Source&& source, const Descriptors& descriptors)
{
auto result = adoptRef(*new FontFace(document.fontSelector()));
bool dataRequiresAsynchronousLoading = true;
auto setFamilyResult = result->setFamily(family);
if (setFamilyResult.hasException())
return setFamilyResult.releaseException();
auto sourceConversionResult = WTF::switchOn(source,
[&] (String& string) -> ExceptionOr<void> {
auto value = FontFace::parseString(string, CSSPropertySrc);
if (!is<CSSValueList>(value.get()))
return Exception { SYNTAX_ERR };
CSSFontFace::appendSources(result->backing(), downcast<CSSValueList>(*value), &document, false);
return { };
},
[&] (RefPtr<ArrayBufferView>& arrayBufferView) -> ExceptionOr<void> {
dataRequiresAsynchronousLoading = populateFontFaceWithArrayBuffer(result->backing(), arrayBufferView.releaseNonNull());
return { };
},
[&] (RefPtr<ArrayBuffer>& arrayBuffer) -> ExceptionOr<void> {
unsigned byteLength = arrayBuffer->byteLength();
auto arrayBufferView = JSC::Uint8Array::create(WTFMove(arrayBuffer), 0, byteLength);
dataRequiresAsynchronousLoading = populateFontFaceWithArrayBuffer(result->backing(), arrayBufferView.releaseNonNull());
return { };
}
);
if (sourceConversionResult.hasException())
return sourceConversionResult.releaseException();
// These ternaries match the default strings inside the FontFaceDescriptors dictionary inside FontFace.idl.
auto setStyleResult = result->setStyle(descriptors.style.isEmpty() ? ASCIILiteral("normal") : descriptors.style);
if (setStyleResult.hasException())
return setStyleResult.releaseException();
auto setWeightResult = result->setWeight(descriptors.weight.isEmpty() ? ASCIILiteral("normal") : descriptors.weight);
if (setWeightResult.hasException())
return setWeightResult.releaseException();
auto setStretchResult = result->setStretch(descriptors.stretch.isEmpty() ? ASCIILiteral("normal") : descriptors.stretch);
if (setStretchResult.hasException())
return setStretchResult.releaseException();
auto setUnicodeRangeResult = result->setUnicodeRange(descriptors.unicodeRange.isEmpty() ? ASCIILiteral("U+0-10FFFF") : descriptors.unicodeRange);
if (setUnicodeRangeResult.hasException())
return setUnicodeRangeResult.releaseException();
auto setVariantResult = result->setVariant(descriptors.variant.isEmpty() ? ASCIILiteral("normal") : descriptors.variant);
if (setVariantResult.hasException())
return setVariantResult.releaseException();
auto setFeatureSettingsResult = result->setFeatureSettings(descriptors.featureSettings.isEmpty() ? ASCIILiteral("normal") : descriptors.featureSettings);
if (setFeatureSettingsResult.hasException())
return setFeatureSettingsResult.releaseException();
if (!dataRequiresAsynchronousLoading) {
result->backing().load();
ASSERT(result->backing().status() == CSSFontFace::Status::Success);
}
return WTFMove(result);
}
Ref<FontFace> FontFace::create(CSSFontFace& face)
{
return adoptRef(*new FontFace(face));
}
FontFace::FontFace(CSSFontSelector& fontSelector)
: m_weakPtrFactory(this)
, m_backing(CSSFontFace::create(&fontSelector, nullptr, this))
{
m_backing->addClient(*this);
}
FontFace::FontFace(CSSFontFace& face)
: m_weakPtrFactory(this)
, m_backing(face)
{
m_backing->addClient(*this);
}
FontFace::~FontFace()
{
m_backing->removeClient(*this);
}
WeakPtr<FontFace> FontFace::createWeakPtr() const
{
return m_weakPtrFactory.createWeakPtr();
}
RefPtr<CSSValue> FontFace::parseString(const String& string, CSSPropertyID propertyID)
{
// FIXME: Should use the Document to get the right parsing mode.
return CSSParser::parseFontFaceDescriptor(propertyID, string, HTMLStandardMode);
}
ExceptionOr<void> FontFace::setFamily(const String& family)
{
if (family.isEmpty())
return Exception { SYNTAX_ERR };
bool success = false;
if (auto value = parseString(family, CSSPropertyFontFamily))
success = m_backing->setFamilies(*value);
if (!success)
return Exception { SYNTAX_ERR };
return { };
}
ExceptionOr<void> FontFace::setStyle(const String& style)
{
if (style.isEmpty())
return Exception { SYNTAX_ERR };
bool success = false;
if (auto value = parseString(style, CSSPropertyFontStyle))
success = m_backing->setStyle(*value);
if (!success)
return Exception { SYNTAX_ERR };
return { };
}
ExceptionOr<void> FontFace::setWeight(const String& weight)
{
if (weight.isEmpty())
return Exception { SYNTAX_ERR };
bool success = false;
if (auto value = parseString(weight, CSSPropertyFontWeight))
success = m_backing->setWeight(*value);
if (!success)
return Exception { SYNTAX_ERR };
return { };
}
ExceptionOr<void> FontFace::setStretch(const String&)
{
// We don't support font-stretch. Swallow the call.
return { };
}
ExceptionOr<void> FontFace::setUnicodeRange(const String& unicodeRange)
{
if (unicodeRange.isEmpty())
return Exception { SYNTAX_ERR };
bool success = false;
if (auto value = parseString(unicodeRange, CSSPropertyUnicodeRange))
success = m_backing->setUnicodeRange(*value);
if (!success)
return Exception { SYNTAX_ERR };
return { };
}
ExceptionOr<void> FontFace::setVariant(const String& variant)
{
if (variant.isEmpty())
return Exception { SYNTAX_ERR };
auto style = MutableStyleProperties::create();
auto result = CSSParser::parseValue(style, CSSPropertyFontVariant, variant, true, HTMLStandardMode);
if (result == CSSParser::ParseResult::Error)
return Exception { SYNTAX_ERR };
// FIXME: Would be much better to stage the new settings and set them all at once
// instead of this dance where we make a backup and revert to it if something fails.
FontVariantSettings backup = m_backing->variantSettings();
auto normal = CSSValuePool::singleton().createIdentifierValue(CSSValueNormal);
bool success = true;
if (auto value = style->getPropertyCSSValue(CSSPropertyFontVariantLigatures))
success &= m_backing->setVariantLigatures(*value);
else
m_backing->setVariantLigatures(normal);
if (auto value = style->getPropertyCSSValue(CSSPropertyFontVariantPosition))
success &= m_backing->setVariantPosition(*value);
else
m_backing->setVariantPosition(normal);
if (auto value = style->getPropertyCSSValue(CSSPropertyFontVariantCaps))
success &= m_backing->setVariantCaps(*value);
else
m_backing->setVariantCaps(normal);
if (auto value = style->getPropertyCSSValue(CSSPropertyFontVariantNumeric))
success &= m_backing->setVariantNumeric(*value);
else
m_backing->setVariantNumeric(normal);
if (auto value = style->getPropertyCSSValue(CSSPropertyFontVariantAlternates))
success &= m_backing->setVariantAlternates(*value);
else
m_backing->setVariantAlternates(normal);
if (auto value = style->getPropertyCSSValue(CSSPropertyFontVariantEastAsian))
success &= m_backing->setVariantEastAsian(*value);
else
m_backing->setVariantEastAsian(normal);
if (!success) {
m_backing->setVariantSettings(backup);
return Exception { SYNTAX_ERR };
}
return { };
}
ExceptionOr<void> FontFace::setFeatureSettings(const String& featureSettings)
{
if (featureSettings.isEmpty())
return Exception { SYNTAX_ERR };
auto value = parseString(featureSettings, CSSPropertyFontFeatureSettings);
if (!value)
return Exception { SYNTAX_ERR };
m_backing->setFeatureSettings(*value);
return { };
}
String FontFace::family() const
{
m_backing->updateStyleIfNeeded();
return m_backing->families()->cssText();
}
String FontFace::style() const
{
m_backing->updateStyleIfNeeded();
switch (m_backing->traitsMask() & FontStyleMask) {
case FontStyleNormalMask:
return String("normal", String::ConstructFromLiteral);
case FontStyleItalicMask:
return String("italic", String::ConstructFromLiteral);
}
ASSERT_NOT_REACHED();
return String("normal", String::ConstructFromLiteral);
}
String FontFace::weight() const
{
m_backing->updateStyleIfNeeded();
switch (m_backing->traitsMask() & FontWeightMask) {
case FontWeight100Mask:
return String("100", String::ConstructFromLiteral);
case FontWeight200Mask:
return String("200", String::ConstructFromLiteral);
case FontWeight300Mask:
return String("300", String::ConstructFromLiteral);
case FontWeight400Mask:
return String("normal", String::ConstructFromLiteral);
case FontWeight500Mask:
return String("500", String::ConstructFromLiteral);
case FontWeight600Mask:
return String("600", String::ConstructFromLiteral);
case FontWeight700Mask:
return String("bold", String::ConstructFromLiteral);
case FontWeight800Mask:
return String("800", String::ConstructFromLiteral);
case FontWeight900Mask:
return String("900", String::ConstructFromLiteral);
}
ASSERT_NOT_REACHED();
return String("normal", String::ConstructFromLiteral);
}
String FontFace::stretch() const
{
return ASCIILiteral("normal");
}
String FontFace::unicodeRange() const
{
m_backing->updateStyleIfNeeded();
if (!m_backing->ranges().size())
return ASCIILiteral("U+0-10FFFF");
RefPtr<CSSValueList> values = CSSValueList::createCommaSeparated();
for (auto& range : m_backing->ranges())
values->append(CSSUnicodeRangeValue::create(range.from, range.to));
return values->cssText();
}
String FontFace::variant() const
{
m_backing->updateStyleIfNeeded();
return computeFontVariant(m_backing->variantSettings())->cssText();
}
String FontFace::featureSettings() const
{
m_backing->updateStyleIfNeeded();
if (!m_backing->featureSettings().size())
return ASCIILiteral("normal");
RefPtr<CSSValueList> list = CSSValueList::createCommaSeparated();
for (auto& feature : m_backing->featureSettings())
list->append(CSSFontFeatureValue::create(FontTag(feature.tag()), feature.value()));
return list->cssText();
}
auto FontFace::status() const -> LoadStatus
{
switch (m_backing->status()) {
case CSSFontFace::Status::Pending:
return LoadStatus::Unloaded;
case CSSFontFace::Status::Loading:
return LoadStatus::Loading;
case CSSFontFace::Status::TimedOut:
return LoadStatus::Error;
case CSSFontFace::Status::Success:
return LoadStatus::Loaded;
case CSSFontFace::Status::Failure:
return LoadStatus::Error;
}
ASSERT_NOT_REACHED();
return LoadStatus::Error;
}
void FontFace::adopt(CSSFontFace& newFace)
{
m_backing->removeClient(*this);
m_backing = newFace;
m_backing->addClient(*this);
newFace.setWrapper(*this);
}
void FontFace::fontStateChanged(CSSFontFace& face, CSSFontFace::Status, CSSFontFace::Status newState)
{
ASSERT_UNUSED(face, &face == m_backing.ptr());
switch (newState) {
case CSSFontFace::Status::Loading:
// We still need to resolve promises when loading completes, even if all references to use have fallen out of scope.
ref();
break;
case CSSFontFace::Status::TimedOut:
break;
case CSSFontFace::Status::Success:
if (m_promise)
std::exchange(m_promise, std::nullopt)->resolve(*this);
deref();
return;
case CSSFontFace::Status::Failure:
if (m_promise)
std::exchange(m_promise, std::nullopt)->reject(NETWORK_ERR);
deref();
return;
case CSSFontFace::Status::Pending:
ASSERT_NOT_REACHED();
return;
}
}
void FontFace::registerLoaded(Promise&& promise)
{
ASSERT(!m_promise);
switch (m_backing->status()) {
case CSSFontFace::Status::Loading:
case CSSFontFace::Status::Pending:
m_promise = WTFMove(promise);
return;
case CSSFontFace::Status::Success:
promise.resolve(*this);
return;
case CSSFontFace::Status::TimedOut:
case CSSFontFace::Status::Failure:
promise.reject(NETWORK_ERR);
return;
}
}
void FontFace::load()
{
m_backing->load();
}
}
| gpl-2.0 |
kvonbredow/JoystickMouse | xdotool/cmd_set_window.c | 2 | 3226 | #define _GNU_SOURCE 1
#ifndef __USE_BSD
#define __USE_BSD /* for strdup on linux/glibc */
#endif /* __USE_BSD */
#include <string.h>
#include "xdo_cmd.h"
int cmd_set_window(context_t *context) {
char *cmd = *context->argv;
int c;
char *role = NULL, *icon = NULL, *name = NULL, *_class = NULL,
*classname = NULL;
int override_redirect = -1;
int urgency = -1;
const char *window_arg = "%1";
struct option longopts[] = {
{ "name", required_argument, NULL, 'n' },
{ "icon-name", required_argument, NULL, 'i' },
{ "role", required_argument, NULL, 'r' },
{ "class", required_argument, NULL, 'C' },
{ "classname", required_argument, NULL, 'N' },
{ "overrideredirect", required_argument, NULL, 'O' },
{ "urgency", required_argument, NULL, 'u' },
{ "help", no_argument, NULL, 'h' },
{ 0, 0, 0, 0 },
};
int option_index;
static const char *usage =
"Usage: %s [options] [window=%1]\n"
"--name NAME - set the window name (aka title)\n"
"--icon-name NAME - set the window name while minimized/iconified\n"
"--role ROLE - set the window's role string\n"
"--class CLASS - set the window's class\n"
"--classname CLASSNAME - set the window's classname\n"
"--overrideredirect OVERRIDE - set override_redirect.\n"
" 1 means the window manager will not manage this window.\n"
"--urgency URGENT - set the window's urgency hint.\n"
" 1 sets the urgency flag, 0 removes it.\n";
while ((c = getopt_long_only(context->argc, context->argv, "+hn:i:r:C:N:u:",
longopts, &option_index)) != -1) {
switch(c) {
case 'n':
name = strdup(optarg);
break;
case 'i':
icon = strdup(optarg);
break;
case 'r':
role = strdup(optarg);
break;
case 'C':
_class = strdup(optarg);
break;
case 'N':
classname = strdup(optarg);
break;
case 'O':
override_redirect = (atoi(optarg) > 0);
break;
case 'u':
urgency = (atoi(optarg) > 0);
break;
case 'h':
printf(usage, cmd);
consume_args(context, context->argc);
return EXIT_SUCCESS;
default:
fprintf(stderr, usage, cmd);
return EXIT_FAILURE;
}
}
/* adjust context->argc, argv */
consume_args(context, optind);
if (!window_get_arg(context, 0, 0, &window_arg)) {
fprintf(stderr, usage, cmd);
return EXIT_FAILURE;
}
/* TODO(sissel): error handling needed... */
window_each(context, window_arg, {
if (name)
xdo_window_setprop(context->xdo, window, "WM_NAME", name);
if (icon)
xdo_window_setprop(context->xdo, window, "WM_ICON_NAME", icon);
if (role)
xdo_window_setprop(context->xdo, window, "WM_WINDOW_ROLE", role);
if (classname || _class)
xdo_window_setclass(context->xdo, window, classname, _class);
if (override_redirect != -1)
xdo_window_set_override_redirect(context->xdo, window,
override_redirect);
if (urgency != -1)
xdo_window_seturgency(context->xdo, window, urgency);
}); /* window_each(...) */
return 0;
}
| gpl-2.0 |
prool/ccx_prool | CalculiX/ccx_2.15/src/deformationplasticitys.f | 2 | 3416 | !
! CalculiX - A 3-dimensional finite element program
! Copyright (C) 1998-2018 Guido Dhondt
!
! This program is free software; you can redistribute it and/or
! modify it under the terms of the GNU General Public License as
! published by the Free Software Foundation(version 2);
!
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License
! along with this program; if not, write to the Free Software
! Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
!
subroutine deformationplasticitys(inpc,textpart,elcon,nelcon,
& nmat,ntmat_,ncmat_,irstrt,istep,istat,n,iperturb,iline,ipol,
& inl,ipoinp,inp,ipoinpc,ier)
!
! reading the input deck: *DEFORMATION PLASTICITY
!
implicit none
!
character*1 inpc(*)
character*132 textpart(16)
!
integer nelcon(2,*),nmat,ntmat,ntmat_,istep,istat,ier,
& n,key,i,iperturb(2),iend,ncmat_,irstrt(*),iline,ipol,inl,
& ipoinp(2,*),inp(3,*),ipoinpc(0:*)
!
real*8 elcon(0:ncmat_,ntmat_,*)
!
ntmat=0
iperturb(1)=3
iperturb(2)=1
write(*,*) '*INFO reading *DEFORMATION PLASTICITY: nonlinear'
write(*,*) ' geometric effects are turned on'
write(*,*)
!
if((istep.gt.0).and.(irstrt(1).ge.0)) then
write(*,*) '*ERROR reading *DEFORMATION PLASTICITY:'
write(*,*) ' *DEFORMATION PLASTICITY'
write(*,*) ' should be placed before all step definitions'
ier=1
return
endif
!
if(nmat.eq.0) then
write(*,*) '*ERROR reading *DEFORMATION PLASTICITY:'
write(*,*) ' *DEFORMATION PLASTICITY'
write(*,*) ' should bepreceded by a *MATERIAL card'
ier=1
return
endif
!
do i=2,n
write(*,*)
& '*WARNING reading *DEFORMATION PLASTICITY:'
write(*,*) ' parameter not recognized:'
write(*,*) ' ',
& textpart(i)(1:index(textpart(i),' ')-1)
call inputwarning(inpc,ipoinpc,iline,
&"DEFORMATION PLASTICITY%")
enddo
!
nelcon(1,nmat)=-50
!
iend=5
do
call getnewline(inpc,textpart,istat,n,key,iline,ipol,inl,
& ipoinp,inp,ipoinpc)
if((istat.lt.0).or.(key.eq.1)) return
ntmat=ntmat+1
nelcon(2,nmat)=ntmat
if(ntmat.gt.ntmat_) then
write(*,*) '*ERROR reading *DEFORMATION PLASTICITY:'
write(*,*) ' increase ntmat_'
ier=1
return
endif
do i=1,iend
read(textpart(i)(1:20),'(f20.0)',iostat=istat)
& elcon(i,ntmat,nmat)
if(istat.gt.0) then
call inputerror(inpc,ipoinpc,iline,
& "DEFORMATION PLASTICITY%",ier)
return
endif
enddo
read(textpart(6)(1:20),'(f20.0)',iostat=istat)
& elcon(0,ntmat,nmat)
if(istat.gt.0) then
call inputerror(inpc,ipoinpc,iline,
& "DEFORMATION PLASTICITY%",ier)
return
endif
enddo
!
return
end
| gpl-2.0 |
tobimensch/aqemu | src/VM_Wizard_Window.cpp | 2 | 24635 | /****************************************************************************
**
** Copyright (C) 2008-2010 Andrey Rijov <ANDron142@yandex.ru>
** COpyirght (C) 2016 Tobias Gläßer
**
** This file is part of AQEMU.
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 51 Franklin Street, Fifth Floor,
** Boston, MA 02110-1301, USA.
**
****************************************************************************/
#include <QDir>
#include <QRegExp>
#include <QFileDialog>
#include "Utils.h"
#include "VM_Wizard_Window.h"
#include "System_Info.h"
#include <sys/utsname.h>
#include <stdio.h>
// FIXME this may be Linux only so far
// if you're porting this to something else
// this is a place where a lot of ifdefs may be needed
QString Get_My_System_Architecture()
{
struct utsname name;
uname(&name);
return QString(name.machine);
}
VM_Wizard_Window::VM_Wizard_Window( QWidget *parent )
: QDialog(parent)
{
ui.setupUi( this );
ui.Label_Page->setBackgroundRole( QPalette::Base );
ui.Wizard_Pages->setCurrentIndex(0);
New_VM = new Virtual_Machine();
// Loadind All Templates
if( Load_OS_Templates() )
{
// Find default template
for( int ix = 0; ix < ui.CB_OS_Type->count(); ++ix )
{
if( ui.CB_OS_Type->itemText(ix) == Settings.value("Default_VM_Template", "Linux 2.6").toString() )
ui.CB_OS_Type->setCurrentIndex( ix );
}
}
else
{
AQWarning( "void VM_Wizard_Window::on_Button_Next_clicked()",
"No VM Templates Found!" );
}
connect(ui.RB_Emulator_KVM, SIGNAL(toggled(bool)),this, SLOT(on_KVM_toggled(bool)));
}
void VM_Wizard_Window::on_KVM_toggled(bool toggled)
{
if ( toggled )
ui.toolBox_accelInfo->setCurrentIndex(1);
else
ui.toolBox_accelInfo->setCurrentIndex(0);
}
void VM_Wizard_Window::Set_VM_List( QList<Virtual_Machine*> *list )
{
VM_List = list;
}
void VM_Wizard_Window::on_Button_Back_clicked()
{
ui.Button_Next->setEnabled( true );
if( ui.Wizard_Mode_Page == ui.Wizard_Pages->currentWidget() )
{
//ui.Wizard_Pages->setCurrentWidget( ui.Welcome_Page );
//ui.Label_Page->setText( tr("New Virtual Machine Wizard") );
}
else if( ui.Template_Page == ui.Wizard_Pages->currentWidget() )
{
ui.Wizard_Pages->setCurrentWidget( ui.Wizard_Mode_Page );
ui.Label_Page->setText( tr("Wizard Mode") );
ui.Button_Back->setEnabled( false );
}
else if( ui.Accelerator_Page == ui.Wizard_Pages->currentWidget() )
{
ui.Wizard_Pages->setCurrentWidget( ui.Template_Page );
ui.Label_Page->setText( tr("VM Hardware Template") );
}
else if( ui.General_Settings_Page == ui.Wizard_Pages->currentWidget() )
{
ui.Wizard_Pages->setCurrentWidget( ui.Accelerator_Page );
ui.Label_Page->setText( tr("Accelerator") );
}
else if( ui.Memory_Page == ui.Wizard_Pages->currentWidget() )
{
ui.Wizard_Pages->setCurrentWidget( ui.General_Settings_Page );
ui.Label_Page->setText( tr("VM Name and CPU Type") );
ui.Label_Caption_CPU_Type->setVisible( true );
ui.Line_CPU_Type->setVisible( true );
ui.Label_CPU_Type->setVisible( true );
ui.CB_CPU_Type->setVisible( true );
}
else if( ui.Typical_HDD_Page == ui.Wizard_Pages->currentWidget() )
{
ui.Wizard_Pages->setCurrentWidget( ui.General_Settings_Page );
ui.Label_Page->setText( tr("Virtual Machine Name") );
ui.Label_Caption_CPU_Type->setVisible( false );
ui.Line_CPU_Type->setVisible( false );
ui.Label_CPU_Type->setVisible( false );
ui.CB_CPU_Type->setVisible( false );
}
else if( ui.Custom_HDD_Page == ui.Wizard_Pages->currentWidget() )
{
ui.Wizard_Pages->setCurrentWidget( ui.Memory_Page );
ui.Label_Page->setText( tr("Memory") );
}
else if( ui.Network_Page == ui.Wizard_Pages->currentWidget() )
{
if( ui.RB_Typical->isChecked() ) // typical or custom mode
{
ui.Wizard_Pages->setCurrentWidget( ui.Typical_HDD_Page );
ui.Label_Page->setText( tr("Hard Disk Size") );
}
else
{
ui.Wizard_Pages->setCurrentWidget( ui.Custom_HDD_Page );
ui.Label_Page->setText( tr("Virtual Hard Disk") );
}
}
else if( ui.Finish_Page == ui.Wizard_Pages->currentWidget() )
{
ui.Wizard_Pages->setCurrentWidget( ui.Network_Page );
ui.Label_Page->setText( tr("Network") );
ui.Button_Next->setText( tr("&Next") );
}
else
{
// Default
AQError( "void VM_Wizard_Window::on_Button_Back_clicked()",
"Default Section!" );
}
}
void VM_Wizard_Window::on_Button_Next_clicked()
{
if( ui.Wizard_Mode_Page == ui.Wizard_Pages->currentWidget() )
{
ui.Wizard_Pages->setCurrentWidget( ui.Template_Page );
ui.Label_Page->setText( tr("Template For VM") );
on_RB_VM_Template_toggled( ui.RB_VM_Template->isChecked() );
Current_Emulator = Get_Default_Emulator();
// All Find Systems FIXME ^^^
All_Systems = Current_Emulator.Get_Devices();
if( All_Systems.isEmpty() )
{
AQError( "void VM_Wizard_Window::on_Button_Next_clicked()",
"Cannot get devices!" );
return;
}
// Comp types
ui.CB_Computer_Type->clear();
ui.CB_Computer_Type->addItem( tr("None Selected") );
for( QMap<QString, Available_Devices>::const_iterator it = All_Systems.constBegin(); it != All_Systems.constEnd(); it++ )
{
ui.CB_Computer_Type->addItem( it.value().System.Caption );
}
ui.Button_Next->setEnabled( true );
ui.Button_Back->setEnabled( true );
}
else if( ui.Accelerator_Page == ui.Wizard_Pages->currentWidget() )
{
applyTemplate();
ui.Wizard_Pages->setCurrentWidget( ui.General_Settings_Page );
}
else if( ui.Template_Page == ui.Wizard_Pages->currentWidget() )
{
Use_Accelerator_Page = true;
ui.Wizard_Pages->setCurrentWidget( ui.Accelerator_Page );
//FIXME: arch shouldn't be hardcoded
if ( ui.RB_Generate_VM->isChecked() && ui.CB_Computer_Type->currentText() != "IBM PC 64Bit")
ui.RB_Emulator_QEMU->setChecked( true );
else
ui.RB_Emulator_KVM->setChecked( true );
ui.Label_Page->setText( tr("Accelerator") );
}
else if( ui.General_Settings_Page == ui.Wizard_Pages->currentWidget() )
{
for( int vx = 0; vx < VM_List->count(); ++vx )
{
if( VM_List->at(vx)->Get_Machine_Name() == ui.Edit_VM_Name->text() )
{
AQGraphic_Warning( tr("Warning"), tr("This VM Name Does Already Exist!") );
return;
}
}
if( ui.RB_Typical->isChecked() )
{
ui.Wizard_Pages->setCurrentWidget( ui.Typical_HDD_Page );
ui.Label_Page->setText( tr("Hard Disk Size") );
}
else
{
ui.Wizard_Pages->setCurrentWidget( ui.Memory_Page );
ui.Label_Page->setText( tr("Memory") );
}
}
else if( ui.Memory_Page == ui.Wizard_Pages->currentWidget() )
{
on_CH_Remove_RAM_Size_Limitation_stateChanged( Qt::Unchecked ); // update max available RAM size
ui.Wizard_Pages->setCurrentWidget( ui.Custom_HDD_Page );
ui.Label_Page->setText( tr("Virtual Hard Disk") );
}
else if( ui.Typical_HDD_Page == ui.Wizard_Pages->currentWidget() )
{
ui.Wizard_Pages->setCurrentWidget( ui.Network_Page );
ui.Label_Page->setText( tr("Network") );
}
else if( ui.Custom_HDD_Page == ui.Wizard_Pages->currentWidget() )
{
ui.Wizard_Pages->setCurrentWidget( ui.Network_Page );
ui.Label_Page->setText( tr("Network") );
}
else if( ui.Network_Page == ui.Wizard_Pages->currentWidget() )
{
ui.Wizard_Pages->setCurrentWidget( ui.Finish_Page );
ui.Button_Next->setText( tr("&Finish") );
ui.Label_Page->setText( tr("Finish!") );
Create_New_VM(true);
ui.VM_Information_Text->setHtml(New_VM->GenerateHTMLInfoText(3));
}
else if( ui.Finish_Page == ui.Wizard_Pages->currentWidget() )
{
if( Create_New_VM() ) accept();
}
else
{
AQError( "void VM_Wizard_Window::on_Button_Next_clicked()",
"Default Section!" );
}
}
void VM_Wizard_Window::applyTemplate()
{
// Use Selected Template
if( ui.RB_VM_Template->isChecked() )
{
if( ! New_VM->Load_VM(OS_Templates_List[ui.CB_OS_Type->currentIndex()-1].filePath()) )
{
AQGraphic_Error( "void VM_Wizard_Window::Create_New_VM()", tr("Error!"),
tr("Cannot Create New VM from Template!") );
return;
}
}
// Emulator
New_VM->Set_Emulator( Current_Emulator );
// Find CPU List For This Template
bool devices_found = false;
if( ui.RB_Emulator_KVM->isChecked() )
{
New_VM->Set_Machine_Accelerator(VM::KVM);
New_VM->Set_Computer_Type( "qemu-system-x86_64" );
if( New_VM->Get_Audio_Cards().Audio_es1370 )
{
VM::Sound_Cards tmp_audio = New_VM->Get_Audio_Cards();
tmp_audio.Audio_es1370 = false;
tmp_audio.Audio_AC97 = true;
New_VM->Set_Audio_Cards( tmp_audio );
}
Current_Devices = &All_Systems[ New_VM->Get_Computer_Type() ];
devices_found = true;
}
else
{
New_VM->Set_Machine_Accelerator(VM::TCG);
New_VM->Set_Computer_Type( "qemu-system-x86_64" );
Current_Devices = &All_Systems[ New_VM->Get_Computer_Type() ];
/*if( ! Current_Devices->System.QEMU_Name.isEmpty() )*/ devices_found = true;
}
// Use Selected Template
if( ui.RB_VM_Template->isChecked() )
{
// Name
ui.Edit_VM_Name->setText( New_VM->Get_Machine_Name() );
// Memory
ui.Memory_Size->setValue( New_VM->Get_Memory_Size() );
// HDA
double hda_size = New_VM->Get_HDA().Get_Virtual_Size_in_GB();
if( hda_size != 0.0 )
ui.SB_HDD_Size->setValue( hda_size );
else
ui.SB_HDD_Size->setValue( 10.0 );
// Network
ui.RB_User_Mode_Network->setChecked( New_VM->Get_Use_Network() );
// Find CPU List For This Template
Current_Devices = &All_Systems[ New_VM->Get_Computer_Type() ];
if( ! Current_Devices->System.QEMU_Name.isEmpty() ) devices_found = true;
}
else // Create New VM in Date Mode
{
By_Year();
// Find CPU List For This Template
QString compCaption = ui.CB_Computer_Type->currentText();
for( QMap<QString, Available_Devices>::const_iterator it = All_Systems.constBegin(); it != All_Systems.constEnd(); it++ )
{
if( it.value().System.Caption == compCaption )
{
Current_Devices = &it.value();
/*if( ! Current_Devices->System.QEMU_Name.isEmpty() )*/ devices_found = true;
}
}
}
if( ! devices_found )
{
AQGraphic_Error( "void VM_Wizard_Window::applyTemplate()", tr("Error!"),
tr("Cannot Find Emulator System ID!") );
}
else
{
// Add CPU's
ui.CB_CPU_Type->clear();
for( int cx = 0; cx < Current_Devices->CPU_List.count(); ++cx )
ui.CB_CPU_Type->addItem( Current_Devices->CPU_List[cx].Caption );
}
// Typical or custom mode
Typical_Or_Custom();
}
void VM_Wizard_Window::Typical_Or_Custom()
{
if( ui.RB_Typical->isChecked() )
{
ui.Label_Page->setText( tr("Virtual Machine Name") );
on_Edit_VM_Name_textEdited( ui.Edit_VM_Name->text() );
ui.Label_Caption_CPU_Type->setVisible( false );
ui.Line_CPU_Type->setVisible( false );
ui.Label_CPU_Type->setVisible( false );
ui.CB_CPU_Type->setVisible( false );
}
else
{
ui.Label_Page->setText( tr("VM Name and CPU Type") );
on_Edit_VM_Name_textEdited( ui.Edit_VM_Name->text() );
ui.Label_Caption_CPU_Type->setVisible( true );
ui.Line_CPU_Type->setVisible( true );
ui.Label_CPU_Type->setVisible( true );
ui.CB_CPU_Type->setVisible( true );
}
}
void VM_Wizard_Window::By_Year()
{
// Select Memory Size, and HDD Size
switch( ui.CB_Relese_Date->currentIndex() )
{
case 0:
AQError( "void VM_Wizard_Window::Create_New_VM()",
"Relese Date Not Selected!" );
ui.Memory_Size->setValue( 512 );
break;
case 1: // 1985-1990
ui.Memory_Size->setValue( 16 );
ui.SB_HDD_Size->setValue( 1.0 );
break;
case 2: // 1990-1995
ui.Memory_Size->setValue( 64 );
ui.SB_HDD_Size->setValue( 2.0 );
break;
case 3: // 1995-2000
ui.Memory_Size->setValue( 256 );
ui.SB_HDD_Size->setValue( 10.0 );
break;
case 4: // 2000-2005
ui.Memory_Size->setValue( 512 );
ui.SB_HDD_Size->setValue( 20.0 );
break;
case 5: // 2005-2010
ui.Memory_Size->setValue( 1024 );
ui.SB_HDD_Size->setValue( 40.0 );
break;
default:
AQError( "void VM_Wizard_Window::Create_New_VM()",
"Relese Date Default Section!" );
ui.Memory_Size->setValue( 512 );
break;
}
}
bool VM_Wizard_Window::Load_OS_Templates()
{
QList<QString> tmp_list = Get_Templates_List();
for( int ax = 0; ax < tmp_list.count(); ++ax )
{
OS_Templates_List.append( QFileInfo(tmp_list[ax]) );
}
for( int ix = 0; ix < OS_Templates_List.count(); ++ix )
{
ui.CB_OS_Type->addItem( OS_Templates_List[ix].completeBaseName() );
}
// no items found
if( ui.CB_OS_Type->count() < 2 ) return false;
else return true;
}
bool VM_Wizard_Window::Create_New_VM(bool simulate)
{
// Icon
QString icon_path = Find_OS_Icon( ui.Edit_VM_Name->text() );
if( icon_path.isEmpty() )
{
AQWarning( "void VM_Wizard_Window::Create_New_VM()", "Icon for new VM not Found!" );
New_VM->Set_Icon_Path( ":/other.png" );
}
else
{
New_VM->Set_Icon_Path( icon_path );
}
// Name
New_VM->Set_Machine_Name( ui.Edit_VM_Name->text() );
// Create path valid string
QString VM_File_Name = Get_FS_Compatible_VM_Name( ui.Edit_VM_Name->text() );
// Set Computer Type?
if( ui.RB_Generate_VM->isChecked() )
{
New_VM->Set_Computer_Type( Current_Devices->System.QEMU_Name );
}
// RAM
New_VM->Set_Memory_Size( ui.Memory_Size->value() );
// Wizard Mode
if( ui.RB_Typical->isChecked() )
{
// Hard Disk
VM::Device_Size hd_size;
hd_size.Size = ui.SB_HDD_Size->value();
hd_size.Suffix = VM::Size_Suf_Gb;
QString hd_path = Settings.value( "VM_Directory", "~" ).toString() + VM_File_Name;
if ( ! simulate )
Create_New_HDD_Image( hd_path + "_HDA.img", hd_size );
New_VM->Set_HDA( VM_HDD(true, hd_path + "_HDA.img") );
// Other HDD's
if( New_VM->Get_HDB().Get_Enabled() )
{
if ( ! simulate )
Create_New_HDD_Image( hd_path + "_HDB.img", New_VM->Get_HDB().Get_Virtual_Size() );
New_VM->Set_HDB( VM_HDD(true, hd_path + "_HDB.img") );
}
if( New_VM->Get_HDC().Get_Enabled() )
{
if ( ! simulate )
Create_New_HDD_Image( hd_path + "_HDC.img", New_VM->Get_HDC().Get_Virtual_Size() );
New_VM->Set_HDC( VM_HDD(true, hd_path + "_HDC.img") );
}
if( New_VM->Get_HDD().Get_Enabled() )
{
if ( ! simulate )
Create_New_HDD_Image( hd_path + "_HDD.img", New_VM->Get_HDD().Get_Virtual_Size() );
New_VM->Set_HDD( VM_HDD(true, hd_path + "_HDD.img") );
}
}
else
{
bool devices_found = false;
// CPU Type
if( ui.RB_VM_Template->isChecked() )
{
Current_Devices = &All_Systems[ New_VM->Get_Computer_Type() ];
if( ! Current_Devices->System.QEMU_Name.isEmpty() ) devices_found = true;
}
else
{
// Find QEMU System Name in CB_Computer_Type
if( ui.RB_Emulator_KVM->isChecked() )
{
Current_Devices = &All_Systems[ "qemu-system-x86_64" ];
if( ! Current_Devices->System.QEMU_Name.isEmpty() ) devices_found = true;
}
else // QEMU
{
for( QMap<QString, Available_Devices>::const_iterator it = All_Systems.constBegin(); it != All_Systems.constEnd(); it++ )
{
if( it.value().System.Caption == ui.CB_Computer_Type->currentText() )
{
Current_Devices = &it.value();
devices_found = true;
break;
}
}
}
}
if( ! devices_found )
{
AQGraphic_Error( "bool VM_Wizard_Window::Create_New_VM()", tr("Error!"),
tr("Cannot Find QEMU System ID!") );
return false;
}
New_VM->Set_CPU_Type( Current_Devices->CPU_List[ui.CB_CPU_Type->currentIndex()].QEMU_Name );
// Hard Disk
if( ! ui.Edit_HDA_File_Name->text().isEmpty() )
New_VM->Set_HDA( VM_HDD(true, ui.Edit_HDA_File_Name->text()) );
else
New_VM->Set_HDA( VM_HDD(false, "") );
}
// Network
if( ui.RB_User_Mode_Network->isChecked() )
{
if( New_VM->Get_Network_Cards_Count() == 0 )
{
New_VM->Set_Use_Network( true );
VM_Net_Card net_card;
net_card.Set_Net_Mode( VM::Net_Mode_Usermode );
New_VM->Add_Network_Card( net_card );
}
}
else if( ui.RB_No_Network->isChecked() )
{
New_VM->Set_Use_Network( false );
for( int rx = 0; rx < New_VM->Get_Network_Cards_Count(); ++rx )
{
New_VM->Delete_Network_Card( 0 );
}
}
// Set Emulator Name (version) to Default ("")
Emulator tmp_emul = New_VM->Get_Emulator();
tmp_emul.Set_Name( "" );
New_VM->Set_Emulator( tmp_emul );
if ( ! simulate )
{
// Create New VM XML File
New_VM->Create_VM_File( Settings.value("VM_Directory", "~").toString() + VM_File_Name + ".aqemu", false );
}
return true;
}
QString VM_Wizard_Window::Find_OS_Icon( const QString os_name )
{
if( os_name.isEmpty() )
{
AQError( "QString VM_Wizard_Window::Find_OS_Icon( const QString os_name )",
"os_name is Empty!" );
return "";
}
// Find all os icons
QDir icons_dir( QDir::toNativeSeparators(Settings.value("AQEMU_Data_Folder","").toString() + "/os_icons/") );
QFileInfoList all_os_icons = icons_dir.entryInfoList( QStringList("*.png"), QDir::Files, QDir::Unsorted );
QRegExp rex;
rex.setPatternSyntax( QRegExp::Wildcard );
rex.setCaseSensitivity( Qt::CaseInsensitive );
for( int i = 0; i < all_os_icons.count(); i++ )
{
rex.setPattern( "*" + all_os_icons[i].baseName() + "*" );
if( rex.exactMatch(os_name) )
{
return all_os_icons[ i ].absoluteFilePath();
}
}
// select os family...
// Linux
rex.setPattern( "*linux*" );
if( rex.exactMatch(os_name) )
return ":/default_linux.png";
// Windows
rex.setPattern( "*windows*" );
if( rex.exactMatch(os_name) )
return ":/default_windows.png";
return ":/other.png";
}
void VM_Wizard_Window::on_RB_VM_Template_toggled( bool on )
{
if( on )
{
if( ui.CB_OS_Type->currentIndex() == 0 )
ui.Button_Next->setEnabled( false );
else
ui.Button_Next->setEnabled( true );
}
}
void VM_Wizard_Window::on_RB_Generate_VM_toggled( bool on )
{
if( on )
{
if( ui.CB_Computer_Type->currentIndex() == 0 ||
ui.CB_Relese_Date->currentIndex() == 0 )
ui.Button_Next->setEnabled( false );
else
ui.Button_Next->setEnabled( true );
}
}
void VM_Wizard_Window::on_CB_OS_Type_currentIndexChanged( int index )
{
if( index == 0 )
ui.Button_Next->setEnabled( false );
else
ui.Button_Next->setEnabled( true );
}
void VM_Wizard_Window::on_CB_Computer_Type_currentIndexChanged( int index )
{
if( index == 0 )
{
ui.Button_Next->setEnabled( false );
}
else
{
if( ui.CB_Relese_Date->currentIndex() != 0 )
ui.Button_Next->setEnabled( true );
}
}
void VM_Wizard_Window::on_CB_Relese_Date_currentIndexChanged( int index )
{
if( index == 0 )
{
ui.Button_Next->setEnabled( false );
}
else
{
if( ui.CB_Computer_Type->currentIndex() != 0 )
ui.Button_Next->setEnabled( true );
}
}
void VM_Wizard_Window::on_Memory_Size_valueChanged( int value )
{
int cursorPos = ui.CB_RAM_Size->lineEdit()->cursorPosition();
if( value % 1024 == 0 )
ui.CB_RAM_Size->setEditText( QString("%1 GB").arg(value / 1024) );
else
ui.CB_RAM_Size->setEditText( QString("%1 MB").arg(value) );
ui.CB_RAM_Size->lineEdit()->setCursorPosition( cursorPos );
}
void VM_Wizard_Window::on_CB_RAM_Size_editTextChanged( const QString &text )
{
if( text.isEmpty() )
return;
QRegExp rx( "\\s*([\\d]+)\\s*(MB|GB|M|G|)\\s*" ); // like: 512MB or 512
if( ! rx.exactMatch(text.toUpper()) )
{
AQGraphic_Warning( tr("Error"),
tr("Cannot convert \"%1\" to memory size!").arg(text) );
return;
}
QStringList ramStrings = rx.capturedTexts();
if( ramStrings.count() != 3 )
{
AQGraphic_Warning( tr("Error"),
tr("Cannot convert \"%1\" to memory size!").arg(text) );
return;
}
bool ok = false;
int value = ramStrings[1].toInt( &ok, 10 );
if( ! ok )
{
AQGraphic_Warning( tr("Error"),
tr("Cannot convert \"%1\" to integer!").arg(ramStrings[1]) );
return;
}
if( ramStrings[2] == "MB" || ramStrings[2] == "M" ); // Size in megabytes
else if( ramStrings[2] == "GB" || ramStrings[2] == "G" ) value *= 1024;
else
{
AQGraphic_Warning( tr("Error"),
tr("Cannot convert \"%1\" to size suffix! Valid suffixes: MB, GB").arg(ramStrings[2]) );
return;
}
if( value <= 0 )
{
AQGraphic_Warning( tr("Error"), tr("Memory size < 0! Valid size is 1 or more") );
return;
}
on_TB_Update_Available_RAM_Size_clicked();
if( (value > ui.Memory_Size->maximum()) &&
(ui.CH_Remove_RAM_Size_Limitation->isChecked() == false) )
{
AQGraphic_Warning( tr("Error"),
tr("Your memory size %1 MB > %2 MB - all free RAM on this system!\n"
"To setup this value, check \"Remove limitation on maximum amount of memory\".")
.arg(value).arg(ui.Memory_Size->maximum()) );
on_Memory_Size_valueChanged( ui.Memory_Size->value() ); // Set valid size
return;
}
// All OK. Set memory size
ui.Memory_Size->setValue( value );
}
void VM_Wizard_Window::on_CH_Remove_RAM_Size_Limitation_stateChanged( int state )
{
if( state == Qt::Checked )
{
ui.Memory_Size->setMaximum( 32768 );
ui.Label_Available_Free_Memory->setText( "32 GB" );
Update_RAM_Size_ComboBox( 32768 );
}
else
{
int allRAM = 0, freeRAM = 0;
System_Info::Get_Free_Memory_Size( allRAM, freeRAM );
if( allRAM < ui.Memory_Size->value() )
AQGraphic_Warning( tr("Error"), tr("Current memory size bigger than all existing host memory!\nUsing maximum available size.") );
ui.Memory_Size->setMaximum( allRAM );
ui.Label_Available_Free_Memory->setText( QString("%1 MB").arg(allRAM) );
Update_RAM_Size_ComboBox( allRAM );
}
}
void VM_Wizard_Window::on_TB_Update_Available_RAM_Size_clicked()
{
int allRAM = 0, freeRAM = 0;
System_Info::Get_Free_Memory_Size( allRAM, freeRAM );
ui.TB_Update_Available_RAM_Size->setText( tr("Free memory: %1 MB").arg(freeRAM) );
if( ! ui.CH_Remove_RAM_Size_Limitation->isChecked() )
{
ui.Memory_Size->setMaximum( allRAM );
Update_RAM_Size_ComboBox( allRAM );
}
}
void VM_Wizard_Window::Update_RAM_Size_ComboBox( int freeRAM )
{
static int oldRamSize = 0;
if( freeRAM == oldRamSize ) return;
else oldRamSize = freeRAM;
QStringList ramSizes;
ramSizes << "32 MB" << "64 MB" << "128 MB" << "256 MB" << "512 MB"
<< "1 GB" << "2 GB" << "3 GB" << "4 GB" << "8 GB" << "16 GB" << "32 GB";
int maxRamIndex = 0;
if( freeRAM >= 32768 ) maxRamIndex = 12;
else if( freeRAM >= 16384 ) maxRamIndex = 11;
else if( freeRAM >= 8192 ) maxRamIndex = 10;
else if( freeRAM >= 4096 ) maxRamIndex = 9;
else if( freeRAM >= 3072 ) maxRamIndex = 8;
else if( freeRAM >= 2048 ) maxRamIndex = 7;
else if( freeRAM >= 1024 ) maxRamIndex = 6;
else if( freeRAM >= 512 ) maxRamIndex = 5;
else if( freeRAM >= 256 ) maxRamIndex = 4;
else if( freeRAM >= 128 ) maxRamIndex = 3;
else if( freeRAM >= 64 ) maxRamIndex = 2;
else if( freeRAM >= 32 ) maxRamIndex = 1;
else
{
AQGraphic_Warning( tr("Error"), tr("Free memory on this system is lower than 32 MB!") );
return;
}
if( maxRamIndex > ramSizes.count() )
{
AQError( "void VM_Wizard_Window::Update_RAM_Size_ComboBox( int freeRAM )",
"maxRamIndex > ramSizes.count()" );
return;
}
QString oldText = ui.CB_RAM_Size->currentText();
ui.CB_RAM_Size->clear();
for( int ix = 0; ix < maxRamIndex; ix++ ) ui.CB_RAM_Size->addItem( ramSizes[ix] );
ui.CB_RAM_Size->setEditText( oldText );
}
void VM_Wizard_Window::on_Edit_VM_Name_textEdited( const QString &text )
{
if( ui.Edit_VM_Name->text().isEmpty() ) ui.Button_Next->setEnabled( false );
else ui.Button_Next->setEnabled( true );
}
void VM_Wizard_Window::on_Button_New_HDD_clicked()
{
Create_HDD_Image_Window Create_HDD_Win( this );
Create_HDD_Win.Set_Image_Size( ui.SB_HDD_Size->value() ); // Set Initial HDA Size
if( Create_HDD_Win.exec() == QDialog::Accepted )
ui.Edit_HDA_File_Name->setText( Create_HDD_Win.Get_Image_File_Name() );
}
void VM_Wizard_Window::on_Button_Existing_clicked()
{
QString hddPath = QFileDialog::getOpenFileName( this, tr("Select HDD Image"),
Get_Last_Dir_Path(ui.Edit_HDA_File_Name->text()),
tr("All Files (*)") );
if( ! hddPath.isEmpty() )
ui.Edit_HDA_File_Name->setText( QDir::toNativeSeparators(hddPath) );
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.